--- old/.hgignore 2016-10-31 17:46:07.000000000 -0700 +++ new/.hgignore 2016-10-31 17:46:07.000000000 -0700 @@ -24,3 +24,19 @@ ^test/compiler/jvmci/\w[\w\.]*/.*\.iml ^test/compiler/jvmci/\w[\w\.]*/nbproject ^test/compiler/jvmci/\w[\w\.]*/\..* +^test/compiler/aot/\w[\w\.]*/.*\.xml +^test/compiler/aot/\w[\w\.]*/.*\.iml +^test/compiler/aot/\w[\w\.]*/nbproject +^test/compiler/aot/\w[\w\.]*/\..* +^src/jdk.vm.compiler/\.mx.graal/env +^src/jdk.vm.compiler/\.mx.graal/.*\.pyc +^src/jdk.vm.compiler/\.mx.graal/eclipse-launches/.* +^src/jdk.aot/share/classes/\w[\w\.]*/.*\.xml +^src/jdk.aot/share/classes/\w[\w\.]*/.*\.iml +^src/jdk.aot/share/classes/\w[\w\.]*/nbproject +^src/jdk.aot/share/classes/\w[\w\.]*/\..* +^src/jdk.vm.compiler/share/classes/\w[\w\.]*/.*\.xml +^src/jdk.vm.compiler/share/classes/\w[\w\.]*/.*\.iml +^src/jdk.vm.compiler/share/classes/\w[\w\.]*/nbproject +^src/jdk.vm.compiler/share/classes/\w[\w\.]*/\..* + --- old/make/lib/JvmFeatures.gmk 2016-10-31 17:46:08.000000000 -0700 +++ new/make/lib/JvmFeatures.gmk 2016-10-31 17:46:08.000000000 -0700 @@ -146,3 +146,11 @@ memBaseline.cpp memReporter.cpp mallocTracker.cpp virtualMemoryTracker.cpp nmtCommon.cpp \ memTracker.cpp nmtDCmd.cpp mallocSiteTable.cpp endif + +ifeq ($(call check-jvm-feature, aot), true) + JVM_CFLAGS_FEATURES += -DINCLUDE_AOT +else + JVM_EXCLUDE_FILES += \ + compiledIC_aot_x86_64.cpp \ + aotCodeHeap.cpp aotCompiledMethod.cpp aotLoader.cpp compiledIC_aot.cpp +endif --- old/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp 2016-10-31 17:46:08.000000000 -0700 +++ new/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp 2016-10-31 17:46:08.000000000 -0700 @@ -375,7 +375,7 @@ __ nop(); // generate code for exception handler - address handler_base = __ start_a_stub(exception_handler_size); + address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler bailout("exception handler overflow"); @@ -393,7 +393,7 @@ // search an exception handler (r0: exception oop, r3: throwing pc) __ far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here(); - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -467,7 +467,7 @@ __ nop(); // generate code for exception handler - address handler_base = __ start_a_stub(deopt_handler_size); + address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler bailout("deopt handler overflow"); @@ -478,7 +478,7 @@ __ adr(lr, pc()); __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); - guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -2001,7 +2001,7 @@ void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); - address stub = __ start_a_stub(call_stub_size); + address stub = __ start_a_stub(call_stub_size()); if (stub == NULL) { bailout("static call stub overflow"); return; @@ -2014,7 +2014,7 @@ __ movptr(rscratch1, 0); __ br(rscratch1); - assert(__ offset() - start <= call_stub_size, "stub too big"); + assert(__ offset() - start <= call_stub_size(), "stub too big"); __ end_a_stub(); } --- old/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp 2016-10-31 17:46:09.000000000 -0700 +++ new/src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.hpp 2016-10-31 17:46:09.000000000 -0700 @@ -68,14 +68,17 @@ void deoptimize_trap(CodeEmitInfo *info); + enum { + _call_stub_size = 12 * NativeInstruction::instruction_size, + _call_aot_stub_size = 0, + _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), + _deopt_handler_size = 7 * NativeInstruction::instruction_size + }; + public: void store_parameter(Register r, int offset_from_esp_in_words); void store_parameter(jint c, int offset_from_esp_in_words); void store_parameter(jobject c, int offset_from_esp_in_words); -enum { call_stub_size = 12 * NativeInstruction::instruction_size, - exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), - deopt_handler_size = 7 * NativeInstruction::instruction_size }; - #endif // CPU_AARCH64_VM_C1_LIRASSEMBLER_AARCH64_HPP --- old/src/cpu/aarch64/vm/compiledIC_aarch64.cpp 2016-10-31 17:46:10.000000000 -0700 +++ new/src/cpu/aarch64/vm/compiledIC_aarch64.cpp 2016-10-31 17:46:10.000000000 -0700 @@ -76,13 +76,13 @@ return 4; // 3 in emit_to_interp_stub + 1 in emit_call } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub = find_stub(); +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(false /* is_aot */); guarantee(stub != NULL, "stub not found"); if (TraceICs) { ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", p2i(instruction_address()), callee->name_and_sig_as_C_string()); } @@ -107,7 +107,7 @@ set_destination_mt_safe(stub); } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset stub. address stub = static_stub->addr(); @@ -121,15 +121,15 @@ // Non-product mode code #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { // Verify call. - NativeCall::verify(); + _call->verify(); if (os::is_MP()) { - verify_alignment(); + _call->verify_alignment(); } // Verify stub. - address stub = find_stub(); + address stub = find_stub(false /* is_aot */); assert(stub != NULL, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); --- old/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp 2016-10-31 17:46:10.000000000 -0700 +++ new/src/cpu/ppc/vm/c1_LIRAssembler_ppc.cpp 2016-10-31 17:46:10.000000000 -0700 @@ -153,7 +153,7 @@ __ nop(); // Generate code for the exception handler. - address handler_base = __ start_a_stub(exception_handler_size); + address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler. @@ -168,7 +168,7 @@ __ mtctr(R0); __ bctr(); - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -233,7 +233,7 @@ __ nop(); // Generate code for deopt handler. - address handler_base = __ start_a_stub(deopt_handler_size); + address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler. @@ -244,7 +244,7 @@ int offset = code_offset(); __ bl64_patchable(SharedRuntime::deopt_blob()->unpack(), relocInfo::runtime_call_type); - guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -1307,7 +1307,7 @@ void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); - address stub = __ start_a_stub(max_static_call_stub_size); + address stub = __ start_a_stub(static_call_stub_size()); if (stub == NULL) { bailout("static call stub overflow"); return; @@ -1346,7 +1346,7 @@ return; } - assert(__ offset() - start <= max_static_call_stub_size, "stub too big"); + assert(__ offset() - start <= static_call_stub_size(), "stub too big"); __ end_a_stub(); } --- old/src/cpu/ppc/vm/c1_LIRAssembler_ppc.hpp 2016-10-31 17:46:11.000000000 -0700 +++ new/src/cpu/ppc/vm/c1_LIRAssembler_ppc.hpp 2016-10-31 17:46:11.000000000 -0700 @@ -60,10 +60,21 @@ bool emit_trampoline_stub_for_call(address target, Register Rtoc = noreg); enum { - max_static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, - call_stub_size = max_static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller - exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller - deopt_handler_size = MacroAssembler::bl64_patchable_size + _static_call_stub_size = 4 * BytesPerInstWord + MacroAssembler::b64_patchable_size, // or smaller + _call_stub_size = _static_call_stub_size + MacroAssembler::trampoline_stub_size, // or smaller + _call_aot_stub_size = 0, + _exception_handler_size = MacroAssembler::b64_patchable_size, // or smaller + _deopt_handler_size = MacroAssembler::bl64_patchable_size }; + // '_static_call_stub_size' is only used on ppc (see LIR_Assembler::emit_static_call_stub() + // in c1_LIRAssembler_ppc.cpp. The other, shared getters are defined in c1_LIRAssembler.hpp + static int static_call_stub_size() { + if (UseAOT) { + return _static_call_stub_size + _call_aot_stub_size; + } else { + return _static_call_stub_size; + } + } + #endif // CPU_PPC_VM_C1_LIRASSEMBLER_PPC_HPP --- old/src/cpu/ppc/vm/compiledIC_ppc.cpp 2016-10-31 17:46:12.000000000 -0700 +++ new/src/cpu/ppc/vm/compiledIC_ppc.cpp 2016-10-31 17:46:12.000000000 -0700 @@ -37,7 +37,7 @@ // ---------------------------------------------------------------------------- -// A PPC CompiledStaticCall looks like this: +// A PPC CompiledDirectStaticCall looks like this: // // >>>> consts // @@ -163,13 +163,13 @@ return 5; } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub = find_stub(); +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(/*is_aot*/ false); guarantee(stub != NULL, "stub not found"); if (TraceICs) { ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", p2i(instruction_address()), callee->name_and_sig_as_C_string()); } @@ -196,7 +196,7 @@ set_destination_mt_safe(stub); } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset stub. address stub = static_stub->addr(); @@ -212,15 +212,15 @@ // Non-product mode code #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { // Verify call. - NativeCall::verify(); + _call->verify(); if (os::is_MP()) { - verify_alignment(); + _call->verify_alignment(); } // Verify stub. - address stub = find_stub(); + address stub = find_stub(/*is_aot*/ false); assert(stub != NULL, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub); --- old/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp 2016-10-31 17:46:12.000000000 -0700 +++ new/src/cpu/s390/vm/c1_LIRAssembler_s390.cpp 2016-10-31 17:46:12.000000000 -0700 @@ -153,7 +153,7 @@ __ nop(); // Generate code for exception handler. - address handler_base = __ start_a_stub(exception_handler_size); + address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler. bailout("exception handler overflow"); @@ -166,7 +166,7 @@ address call_addr = emit_call_c(a); CHECK_BAILOUT_(-1); __ should_not_reach_here(); - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -251,7 +251,7 @@ __ nop(); // Generate code for exception handler. - address handler_base = __ start_a_stub(deopt_handler_size); + address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // Not enough space left for the handler. bailout("deopt handler overflow"); @@ -260,7 +260,7 @@ // Size must be constant (see HandlerImpl::emit_deopt_handler). __ load_const(Z_R1_scratch, SharedRuntime::deopt_blob()->unpack()); __ call(Z_R1_scratch); - guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -1158,7 +1158,7 @@ // compiled code to calling interpreted code. address call_pc = __ pc(); - address stub = __ start_a_stub(call_stub_size); + address stub = __ start_a_stub(call_stub_size()); if (stub == NULL) { bailout("static call stub overflow"); return; @@ -1181,7 +1181,7 @@ } __ z_br(Z_R1); - assert(__ offset() - start <= call_stub_size, "stub too big"); + assert(__ offset() - start <= call_stub_size(), "stub too big"); __ end_a_stub(); // Update current stubs pointer and restore insts_end. } --- old/src/cpu/s390/vm/c1_LIRAssembler_s390.hpp 2016-10-31 17:46:13.000000000 -0700 +++ new/src/cpu/s390/vm/c1_LIRAssembler_s390.hpp 2016-10-31 17:46:13.000000000 -0700 @@ -46,9 +46,10 @@ } enum { - call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub. - exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), - deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) + _call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub. + _call_aot_stub_size = 0, + _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), + _deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) }; #endif // CPU_S390_VM_C1_LIRASSEMBLER_S390_HPP --- old/src/cpu/s390/vm/compiledIC_s390.cpp 2016-10-31 17:46:14.000000000 -0700 +++ new/src/cpu/s390/vm/compiledIC_s390.cpp 2016-10-31 17:46:13.000000000 -0700 @@ -90,19 +90,19 @@ return 5; // 4 in emit_java_to_interp + 1 in Java_Static_Call } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub = find_stub(); +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(/*is_aot*/ false); guarantee(stub != NULL, "stub not found"); if (TraceICs) { ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", p2i(instruction_address()), callee->name_and_sig_as_C_string()); } // Creation also verifies the object. - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub()); + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); // A generated lambda form might be deleted from the Lambdaform @@ -123,13 +123,13 @@ set_destination_mt_safe(stub); } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset stub. address stub = static_stub->addr(); assert(stub != NULL, "stub not found"); // Creation also verifies the object. - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub()); + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); method_holder->set_data(0); jump->set_jump_destination((address)-1); @@ -139,18 +139,18 @@ #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { // Verify call. - NativeCall::verify(); + _call->verify(); if (os::is_MP()) { - verify_alignment(); + _call->verify_alignment(); } // Verify stub. - address stub = find_stub(); + address stub = find_stub(/*is_aot*/ false); assert(stub != NULL, "no stub found for static call"); // Creation also verifies the object. - NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + get_IC_pos_in_java_to_interp_stub()); + NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub()); NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); // Verify state. --- old/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2016-10-31 17:46:14.000000000 -0700 +++ new/src/cpu/sparc/vm/c1_LIRAssembler_sparc.cpp 2016-10-31 17:46:14.000000000 -0700 @@ -287,7 +287,7 @@ // generate code for exception handler ciMethod* method = compilation()->method(); - address handler_base = __ start_a_stub(exception_handler_size); + address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler @@ -300,7 +300,7 @@ __ call(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id), relocInfo::runtime_call_type); __ delayed()->nop(); __ should_not_reach_here(); - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -375,7 +375,7 @@ // generate code for deopt handler ciMethod* method = compilation()->method(); - address handler_base = __ start_a_stub(deopt_handler_size); + address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler bailout("deopt handler overflow"); @@ -386,7 +386,7 @@ AddressLiteral deopt_blob(SharedRuntime::deopt_blob()->unpack()); __ JUMP(deopt_blob, G3_scratch, 0); // sethi;jmp __ delayed()->nop(); - guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -1493,7 +1493,7 @@ void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); - address stub = __ start_a_stub(call_stub_size); + address stub = __ start_a_stub(call_stub_size()); if (stub == NULL) { bailout("static call stub overflow"); return; @@ -1508,7 +1508,7 @@ __ jump_to(addrlit, G3); __ delayed()->nop(); - assert(__ offset() - start <= call_stub_size, "stub too big"); + assert(__ offset() - start <= call_stub_size(), "stub too big"); __ end_a_stub(); } --- old/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp 2016-10-31 17:46:15.000000000 -0700 +++ new/src/cpu/sparc/vm/c1_LIRAssembler_sparc.hpp 2016-10-31 17:46:15.000000000 -0700 @@ -59,17 +59,20 @@ // Setup pointers to MDO, MDO slot, also compute offset bias to access the slot. void setup_md_access(ciMethod* method, int bci, ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias); - public: - void pack64(LIR_Opr src, LIR_Opr dst); - void unpack64(LIR_Opr src, LIR_Opr dst); -enum { + enum { #ifdef _LP64 - call_stub_size = 68, + _call_stub_size = 68, #else - call_stub_size = 20, + _call_stub_size = 20, #endif // _LP64 - exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), - deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) }; + _call_aot_stub_size = 0, + _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128), + _deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64) + }; + + public: + void pack64(LIR_Opr src, LIR_Opr dst); + void unpack64(LIR_Opr src, LIR_Opr dst); #endif // CPU_SPARC_VM_C1_LIRASSEMBLER_SPARC_HPP --- old/src/cpu/sparc/vm/compiledIC_sparc.cpp 2016-10-31 17:46:15.000000000 -0700 +++ new/src/cpu/sparc/vm/compiledIC_sparc.cpp 2016-10-31 17:46:15.000000000 -0700 @@ -85,13 +85,13 @@ return 10; // 4 in emit_java_to_interp + 1 in Java_Static_Call } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub = find_stub(); +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(/*is_aot*/ false); guarantee(stub != NULL, "stub not found"); if (TraceICs) { ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", p2i(instruction_address()), callee->name_and_sig_as_C_string()); } @@ -118,7 +118,7 @@ set_destination_mt_safe(stub); } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset stub. address stub = static_stub->addr(); @@ -134,15 +134,15 @@ // Non-product mode code #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { // Verify call. - NativeCall::verify(); + _call->verify(); if (os::is_MP()) { - verify_alignment(); + _call->verify_alignment(); } // Verify stub. - address stub = find_stub(); + address stub = find_stub(/*is_aot*/ false); assert(stub != NULL, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2016-10-31 17:46:16.000000000 -0700 +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp 2016-10-31 17:46:16.000000000 -0700 @@ -393,7 +393,7 @@ __ nop(); // generate code for exception handler - address handler_base = __ start_a_stub(exception_handler_size); + address handler_base = __ start_a_stub(exception_handler_size()); if (handler_base == NULL) { // not enough space left for the handler bailout("exception handler overflow"); @@ -412,7 +412,7 @@ // search an exception handler (rax: exception oop, rdx: throwing pc) __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::handle_exception_from_callee_id))); __ should_not_reach_here(); - guarantee(code_offset() - offset <= exception_handler_size, "overflow"); + guarantee(code_offset() - offset <= exception_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -490,7 +490,7 @@ __ nop(); // generate code for exception handler - address handler_base = __ start_a_stub(deopt_handler_size); + address handler_base = __ start_a_stub(deopt_handler_size()); if (handler_base == NULL) { // not enough space left for the handler bailout("deopt handler overflow"); @@ -502,7 +502,7 @@ __ pushptr(here.addr()); __ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); - guarantee(code_offset() - offset <= deopt_handler_size, "overflow"); + guarantee(code_offset() - offset <= deopt_handler_size(), "overflow"); __ end_a_stub(); return offset; @@ -2805,7 +2805,7 @@ void LIR_Assembler::emit_static_call_stub() { address call_pc = __ pc(); - address stub = __ start_a_stub(call_stub_size); + address stub = __ start_a_stub(call_stub_size()); if (stub == NULL) { bailout("static call stub overflow"); return; @@ -2816,14 +2816,24 @@ // make sure that the displacement word of the call ends up word aligned __ align(BytesPerWord, __ offset() + NativeMovConstReg::instruction_size + NativeCall::displacement_offset); } - __ relocate(static_stub_Relocation::spec(call_pc)); + __ relocate(static_stub_Relocation::spec(call_pc, false /* is_aot */)); __ mov_metadata(rbx, (Metadata*)NULL); // must be set to -1 at code generation time assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP"); // On 64bit this will die since it will take a movq & jmp, must be only a jmp __ jump(RuntimeAddress(__ pc())); - assert(__ offset() - start <= call_stub_size, "stub too big"); + if (UseAOT) { + // Trampoline to aot code + __ relocate(static_stub_Relocation::spec(call_pc, true /* is_aot */)); +#ifdef _LP64 + __ mov64(rax, CONST64(0)); // address is zapped till fixup time. +#else + __ movl(rax, 0xdeadffff); // address is zapped till fixup time. +#endif + __ jmp(rax); + } + assert(__ offset() - start <= call_stub_size(), "stub too big"); __ end_a_stub(); } --- old/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp 2016-10-31 17:46:17.000000000 -0700 +++ new/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp 2016-10-31 17:46:17.000000000 -0700 @@ -47,6 +47,14 @@ void type_profile_helper(Register mdo, ciMethodData *md, ciProfileData *data, Register recv, Label* update_done); + + enum { + _call_stub_size = NOT_LP64(15) LP64_ONLY(28), + _call_aot_stub_size = NOT_LP64(7) LP64_ONLY(12), + _exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), + _deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) + }; + public: void store_parameter(Register r, int offset_from_esp_in_words); @@ -54,9 +62,4 @@ void store_parameter(jobject c, int offset_from_esp_in_words); void store_parameter(Metadata* c, int offset_from_esp_in_words); - enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28), - exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175), - deopt_handler_size = NOT_LP64(10) LP64_ONLY(17) - }; - #endif // CPU_X86_VM_C1_LIRASSEMBLER_X86_HPP --- old/src/cpu/x86/vm/compiledIC_x86.cpp 2016-10-31 17:46:17.000000000 -0700 +++ new/src/cpu/x86/vm/compiledIC_x86.cpp 2016-10-31 17:46:17.000000000 -0700 @@ -24,6 +24,7 @@ #include "precompiled.hpp" #include "asm/macroAssembler.inline.hpp" +#include "code/codeCache.hpp" #include "code/compiledIC.hpp" #include "code/icBuffer.hpp" #include "code/nmethod.hpp" @@ -53,7 +54,7 @@ return NULL; // CodeBuffer::expand failed. } // Static stub relocation stores the instruction address of the call. - __ relocate(static_stub_Relocation::spec(mark), Assembler::imm_operand); + __ relocate(static_stub_Relocation::spec(mark, false), Assembler::imm_operand); // Static stub relocation also tags the Method* in the code-stream. __ mov_metadata(rbx, (Metadata*) NULL); // Method is zapped till fixup time. // This is recognized as unresolved by relocs/nativeinst/ic code. @@ -77,13 +78,73 @@ return 4; // 3 in emit_to_interp_stub + 1 in emit_call } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { - address stub = find_stub(); +#if INCLUDE_AOT +#define __ _masm. +void CompiledStaticCall::emit_to_aot_stub(CodeBuffer &cbuf, address mark) { + if (!UseAOT) { + return; + } + // Stub is fixed up when the corresponding call is converted from + // calling compiled code to calling aot code. + // movq rax, imm64_aot_code_address + // jmp rax + + if (mark == NULL) { + mark = cbuf.insts_mark(); // Get mark within main instrs section. + } + + // Note that the code buffer's insts_mark is always relative to insts. + // That's why we must use the macroassembler to generate a stub. + MacroAssembler _masm(&cbuf); + + address base = + __ start_a_stub(to_aot_stub_size()); + guarantee(base != NULL, "out of space"); + + // Static stub relocation stores the instruction address of the call. + __ relocate(static_stub_Relocation::spec(mark, true /* is_aot */), Assembler::imm_operand); + // Load destination AOT code address. +#ifdef _LP64 + __ mov64(rax, CONST64(0)); // address is zapped till fixup time. +#else + __ movl(rax, 0); // address is zapped till fixup time. +#endif + // This is recognized as unresolved by relocs/nativeinst/ic code. + __ jmp(rax); + + assert(__ pc() - base <= to_aot_stub_size(), "wrong stub size"); + + // Update current stubs pointer and restore insts_end. + __ end_a_stub(); +} +#undef __ + +int CompiledStaticCall::to_aot_stub_size() { + if (UseAOT) { + return NOT_LP64(7) // movl; jmp + LP64_ONLY(12); // movq (1+1+8); jmp (2) + } else { + return 0; + } +} + +// Relocation entries for call stub, compiled java to aot. +int CompiledStaticCall::reloc_to_aot_stub() { + if (UseAOT) { + return 2; // 1 in emit_to_aot_stub + 1 in emit_call + } else { + return 0; + } +} +#endif // INCLUDE_AOT + +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(false /* is_aot */); guarantee(stub != NULL, "stub not found"); if (TraceICs) { ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", p2i(instruction_address()), callee->name_and_sig_as_C_string()); } @@ -110,7 +171,7 @@ set_destination_mt_safe(stub); } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); // Reset stub. address stub = static_stub->addr(); @@ -118,8 +179,10 @@ // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); method_holder->set_data(0); - NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); - jump->set_jump_destination((address)-1); + if (!static_stub->is_aot()) { + NativeJump* jump = nativeJump_at(method_holder->next_instruction_address()); + jump->set_jump_destination((address)-1); + } } @@ -127,15 +190,20 @@ // Non-product mode code #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { // Verify call. - NativeCall::verify(); + _call->verify(); if (os::is_MP()) { - verify_alignment(); + _call->verify_alignment(); } +#ifdef ASSERT + CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call); + assert(cb && !cb->is_aot(), "CompiledDirectStaticCall cannot be used on AOTCompiledMethod"); +#endif + // Verify stub. - address stub = find_stub(); + address stub = find_stub(false /* is_aot */); assert(stub != NULL, "no stub found for static call"); // Creation also verifies the object. NativeMovConstReg* method_holder = nativeMovConstReg_at(stub); --- old/src/cpu/x86/vm/icBuffer_x86.cpp 2016-10-31 17:46:18.000000000 -0700 +++ new/src/cpu/x86/vm/icBuffer_x86.cpp 2016-10-31 17:46:18.000000000 -0700 @@ -33,12 +33,18 @@ #include "oops/oop.inline.hpp" int InlineCacheBuffer::ic_stub_code_size() { - return NativeMovConstReg::instruction_size + - NativeJump::instruction_size + - 1; - // so that code_end can be set in CodeBuffer - // 64bit 16 = 5 + 10 bytes + 1 byte - // 32bit 11 = 10 bytes + 1 byte + // Worst case, if destination is not a near call: + // lea rax, lit1 + // lea scratch, lit2 + // jmp scratch + + // Best case + // lea rax, lit1 + // jmp lit2 + + int best = NativeMovConstReg::instruction_size + NativeJump::instruction_size; + int worst = 2 * NativeMovConstReg::instruction_size + 3; + return MAX2(best, worst); } @@ -59,8 +65,16 @@ address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) { NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object - NativeJump* jump = nativeJump_at(move->next_instruction_address()); - return jump->jump_destination(); + address jmp = move->next_instruction_address(); + NativeInstruction* ni = nativeInstruction_at(jmp); + if (ni->is_jump()) { + NativeJump* jump = nativeJump_at(jmp); + return jump->jump_destination(); + } else { + assert(ni->is_far_jump(), "unexpected instruction"); + NativeFarJump* jump = nativeFarJump_at(jmp); + return jump->jump_destination(); + } } @@ -68,7 +82,14 @@ // creation also verifies the object NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Verifies the jump - NativeJump* jump = nativeJump_at(move->next_instruction_address()); + address jmp = move->next_instruction_address(); + NativeInstruction* ni = nativeInstruction_at(jmp); + if (ni->is_jump()) { + NativeJump* jump = nativeJump_at(jmp); + } else { + assert(ni->is_far_jump(), "unexpected instruction"); + NativeFarJump* jump = nativeFarJump_at(jmp); + } void* o = (void*)move->data(); return o; } --- old/src/cpu/x86/vm/nativeInst_x86.cpp 2016-10-31 17:46:19.000000000 -0700 +++ new/src/cpu/x86/vm/nativeInst_x86.cpp 2016-10-31 17:46:18.000000000 -0700 @@ -39,6 +39,124 @@ ICache::invalidate_word(addr_at(offset)); } +void NativeLoadGot::report_and_fail() const { + tty->print_cr("Addr: " INTPTR_FORMAT, p2i(instruction_address())); + fatal("not a indirect rip mov to rbx"); +} + +void NativeLoadGot::verify() const { + if (has_rex) { + int rex = ubyte_at(0); + if (rex != rex_prefix) { + report_and_fail(); + } + } + + int inst = ubyte_at(rex_size); + if (inst != instruction_code) { + report_and_fail(); + } + int modrm = ubyte_at(rex_size + 1); + if (modrm != modrm_rbx_code && modrm != modrm_rax_code) { + report_and_fail(); + } +} + +intptr_t NativeLoadGot::data() const { + return *(intptr_t *) got_address(); +} + +address NativePltCall::destination() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + return jump->destination(); +} + +address NativePltCall::plt_entry() const { + return return_address() + displacement(); +} + +address NativePltCall::plt_jump() const { + address entry = plt_entry(); + // Virtual PLT code has move instruction first + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + return nativeLoadGot_at(entry)->next_instruction_address(); + } +} + +address NativePltCall::plt_load_got() const { + address entry = plt_entry(); + if (!((NativeGotJump*)entry)->is_GotJump()) { + // Virtual PLT code has move instruction first + return entry; + } else { + // Static PLT code has move instruction second (from c2i stub) + return nativeGotJump_at(entry)->next_instruction_address(); + } +} + +address NativePltCall::plt_c2i_stub() const { + address entry = plt_load_got(); + // This method should be called only for static calls which has C2I stub. + NativeLoadGot* load = nativeLoadGot_at(entry); + return entry; +} + +address NativePltCall::plt_resolve_call() const { + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address entry = jump->next_instruction_address(); + if (((NativeGotJump*)entry)->is_GotJump()) { + return entry; + } else { + // c2i stub 2 instructions + entry = nativeLoadGot_at(entry)->next_instruction_address(); + return nativeGotJump_at(entry)->next_instruction_address(); + } +} + +void NativePltCall::reset_to_plt_resolve_call() { + set_destination_mt_safe(plt_resolve_call()); +} + +void NativePltCall::set_destination_mt_safe(address dest) { + // rewriting the value in the GOT, it should always be aligned + NativeGotJump* jump = nativeGotJump_at(plt_jump()); + address* got = (address *) jump->got_address(); + *got = dest; +} + +void NativePltCall::set_stub_to_clean() { + NativeLoadGot* method_loader = nativeLoadGot_at(plt_c2i_stub()); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + method_loader->set_data(0); + jump->set_jump_destination((address)-1); +} + +void NativePltCall::verify() const { + // Make sure code pattern is actually a call rip+off32 instruction. + int inst = ubyte_at(0); + if (inst != instruction_code) { + tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), + inst); + fatal("not a call rip+off32"); + } +} + +address NativeGotJump::destination() const { + address *got_entry = (address *) got_address(); + return *got_entry; +} + +void NativeGotJump::verify() const { + int inst = ubyte_at(0); + if (inst != instruction_code) { + tty->print_cr("Addr: " INTPTR_FORMAT " Code: 0x%x", p2i(instruction_address()), + inst); + fatal("not a indirect rip jump"); + } +} + void NativeCall::verify() { // Make sure code pattern is actually a call imm32 instruction. int inst = ubyte_at(0); @@ -422,7 +540,12 @@ void NativeJump::verify() { if (*(u_char*)instruction_address() != instruction_code) { - fatal("not a jump instruction"); + // far jump + NativeMovConstReg* mov = nativeMovConstReg_at(instruction_address()); + NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); + if (!jmp->is_jump_reg()) { + fatal("not a jump instruction"); + } } } @@ -514,6 +637,20 @@ } +address NativeFarJump::jump_destination() const { + NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); + return (address)mov->data(); +} + +void NativeFarJump::verify() { + if (is_far_jump()) { + NativeMovConstReg* mov = nativeMovConstReg_at(addr_at(0)); + NativeInstruction* jmp = nativeInstruction_at(mov->next_instruction_address()); + if (jmp->is_jump_reg()) return; + } + fatal("not a jump instruction"); +} + void NativePopReg::insert(address code_pos, Register reg) { assert(reg->encoding() < 8, "no space for REX"); assert(NativePopReg::instruction_size == sizeof(char), "right address unit for update"); --- old/src/cpu/x86/vm/nativeInst_x86.hpp 2016-10-31 17:46:19.000000000 -0700 +++ new/src/cpu/x86/vm/nativeInst_x86.hpp 2016-10-31 17:46:19.000000000 -0700 @@ -38,6 +38,7 @@ // - - NativeMovRegMem // - - NativeMovRegMemPatching // - - NativeJump +// - - NativeFarJump // - - NativeIllegalOpCode // - - NativeGeneralJump // - - NativeReturn @@ -63,6 +64,8 @@ inline bool is_illegal(); inline bool is_return(); inline bool is_jump(); + inline bool is_jump_reg(); + inline bool is_far_jump(); inline bool is_cond_jump(); inline bool is_safepoint_poll(); inline bool is_mov_literal64(); @@ -105,6 +108,47 @@ return inst; } +class NativePltCall: public NativeInstruction { +public: + enum Intel_specific_constants { + instruction_code = 0xE8, + instruction_size = 5, + instruction_offset = 0, + displacement_offset = 1, + return_address_offset = 5 + }; + address instruction_address() const { return addr_at(instruction_offset); } + address next_instruction_address() const { return addr_at(return_address_offset); } + address displacement_address() const { return addr_at(displacement_offset); } + int displacement() const { return (jint) int_at(displacement_offset); } + address return_address() const { return addr_at(return_address_offset); } + address destination() const; + address plt_entry() const; + address plt_jump() const; + address plt_load_got() const; + address plt_resolve_call() const; + address plt_c2i_stub() const; + void set_stub_to_clean(); + + void reset_to_plt_resolve_call(); + void set_destination_mt_safe(address dest); + + void verify() const; +}; + +inline NativePltCall* nativePltCall_at(address address) { + NativePltCall* call = (NativePltCall*) address; +#ifdef ASSERT + call->verify(); +#endif + return call; +} + +inline NativePltCall* nativePltCall_before(address addr) { + address at = addr - NativePltCall::instruction_size; + return nativePltCall_at(at); +} + inline NativeCall* nativeCall_at(address address); // The NativeCall is an abstraction for accessing/manipulating native call imm32/rel32off // instructions (used to manipulate inline caches, primitive & dll calls, etc.). @@ -129,9 +173,8 @@ address destination() const; void set_destination(address dest) { #ifdef AMD64 - assert((labs((intptr_t) dest - (intptr_t) return_address()) & - 0xFFFFFFFF00000000) == 0, - "must be 32bit offset"); + intptr_t disp = dest - return_address(); + guarantee(disp == (intptr_t)(jint)disp, "must be 32-bit offset"); #endif // AMD64 set_int_at(displacement_offset, dest - return_address()); } @@ -158,6 +201,13 @@ nativeCall_at(instr)->destination() == target; } +#if INCLUDE_AOT + static bool is_far_call(address instr, address target) { + intptr_t disp = target - (instr + sizeof(int32_t)); + return !Assembler::is_simm32(disp); + } +#endif + // MT-safe patching of a call instruction. static void insert(address code_pos, address entry); @@ -380,6 +430,51 @@ } }; +// destination is rbx or rax +// mov rbx, [rip + offset] +class NativeLoadGot: public NativeInstruction { +#ifdef AMD64 + static const bool has_rex = true; + static const int rex_size = 1; +#else + static const bool has_rex = false; + static const int rex_size = 0; +#endif +public: + enum Intel_specific_constants { + rex_prefix = 0x48, + instruction_code = 0x8b, + modrm_rbx_code = 0x1d, + modrm_rax_code = 0x05, + instruction_length = 6 + rex_size, + offset_offset = 2 + rex_size + }; + + address instruction_address() const { return addr_at(0); } + address rip_offset_address() const { return addr_at(offset_offset); } + int rip_offset() const { return int_at(offset_offset); } + address return_address() const { return addr_at(instruction_length); } + address got_address() const { return return_address() + rip_offset(); } + address next_instruction_address() const { return return_address(); } + intptr_t data() const; + void set_data(intptr_t data) { + intptr_t *addr = (intptr_t *) got_address(); + *addr = data; + } + + void verify() const; +private: + void report_and_fail() const; +}; + +inline NativeLoadGot* nativeLoadGot_at(address addr) { + NativeLoadGot* load = (NativeLoadGot*) addr; +#ifdef ASSERT + load->verify(); +#endif + return load; +} + // jump rel32off class NativeJump: public NativeInstruction { @@ -440,6 +535,29 @@ return jump; } +// far jump reg +class NativeFarJump: public NativeInstruction { + public: + address jump_destination() const; + + // Creation + inline friend NativeFarJump* nativeFarJump_at(address address); + + void verify(); + + // Unit testing stuff + static void test() {} + +}; + +inline NativeFarJump* nativeFarJump_at(address address) { + NativeFarJump* jump = (NativeFarJump*)(address); +#ifdef ASSERT + jump->verify(); +#endif + return jump; +} + // Handles all kinds of jump on Intel. Long/far, conditional/unconditional class NativeGeneralJump: public NativeInstruction { public: @@ -473,6 +591,36 @@ return jump; } +class NativeGotJump: public NativeInstruction { +public: + enum Intel_specific_constants { + instruction_code = 0xff, + instruction_offset = 0, + instruction_size = 6, + rip_offset = 2 + }; + + void verify() const; + address instruction_address() const { return addr_at(instruction_offset); } + address destination() const; + address return_address() const { return addr_at(instruction_size); } + int got_offset() const { return (jint) int_at(rip_offset); } + address got_address() const { return return_address() + got_offset(); } + address next_instruction_address() const { return addr_at(instruction_size); } + bool is_GotJump() const { return ubyte_at(0) == instruction_code; } + + void set_jump_destination(address dest) { + address *got_entry = (address *) got_address(); + *got_entry = dest; + } +}; + +inline NativeGotJump* nativeGotJump_at(address addr) { + NativeGotJump* jump = (NativeGotJump*)(addr); + debug_only(jump->verify()); + return jump; +} + class NativePopReg : public NativeInstruction { public: enum Intel_specific_constants { @@ -544,6 +692,12 @@ ubyte_at(0) == NativeReturnX::instruction_code; } inline bool NativeInstruction::is_jump() { return ubyte_at(0) == NativeJump::instruction_code || ubyte_at(0) == 0xEB; /* short jump */ } +inline bool NativeInstruction::is_jump_reg() { + int pos = 0; + if (ubyte_at(0) == Assembler::REX_B) pos = 1; + return ubyte_at(pos) == 0xFF && (ubyte_at(pos + 1) & 0xF0) == 0xE0; +} +inline bool NativeInstruction::is_far_jump() { return is_mov_literal64(); } inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) == 0x800F /* long jump */ || (ubyte_at(0) & 0xF0) == 0x70; /* short jump */ } inline bool NativeInstruction::is_safepoint_poll() { --- old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2016-10-31 17:46:20.000000000 -0700 +++ new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2016-10-31 17:46:20.000000000 -0700 @@ -800,7 +800,7 @@ __ movptr(r11, Address(rbx, in_bytes(Method::from_compiled_offset()))); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { // check if this call should be routed towards a specific entry point __ cmpptr(Address(r15_thread, in_bytes(JavaThread::jvmci_alternate_call_target_offset())), 0); Label no_alternative_target; @@ -2758,7 +2758,7 @@ // Setup code generation tools int pad = 0; #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { pad += 512; // Increase the buffer size when compiling for JVMCI } #endif @@ -2832,7 +2832,7 @@ int implicit_exception_uncommon_trap_offset = 0; int uncommon_trap_offset = 0; - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { implicit_exception_uncommon_trap_offset = __ pc() - start; __ pushptr(Address(r15_thread, in_bytes(JavaThread::jvmci_implicit_exception_pc_offset()))); @@ -2947,7 +2947,7 @@ __ reset_last_Java_frame(false); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { __ bind(after_fetch_unroll_info_call); } #endif @@ -3112,7 +3112,7 @@ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words); _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset); #if INCLUDE_JVMCI - if (EnableJVMCI) { + if (EnableJVMCI || UseAOT) { _deopt_blob->set_uncommon_trap_offset(uncommon_trap_offset); _deopt_blob->set_implicit_exception_uncommon_trap_offset(implicit_exception_uncommon_trap_offset); } --- old/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp 2016-10-31 17:46:20.000000000 -0700 +++ new/src/cpu/x86/vm/templateInterpreterGenerator_x86.cpp 2016-10-31 17:46:20.000000000 -0700 @@ -256,7 +256,7 @@ #if INCLUDE_JVMCI // Check if we need to take lock at entry of synchronized method. This can // only occur on method entry so emit it only for vtos with step 0. - if (UseJVMCICompiler && state == vtos && step == 0) { + if ((UseJVMCICompiler || UseAOT) && state == vtos && step == 0) { Label L; __ cmpb(Address(thread, JavaThread::pending_monitorenter_offset()), 0); __ jcc(Assembler::zero, L); --- old/src/cpu/x86/vm/x86_64.ad 2016-10-31 17:46:21.000000000 -0700 +++ new/src/cpu/x86/vm/x86_64.ad 2016-10-31 17:46:21.000000000 -0700 @@ -2147,6 +2147,9 @@ ciEnv::current()->record_failure("CodeCache is full"); return; } +#if INCLUDE_AOT + CompiledStaticCall::emit_to_aot_stub(cbuf, mark); +#endif } %} --- old/src/cpu/zero/vm/compiledIC_zero.cpp 2016-10-31 17:46:22.000000000 -0700 +++ new/src/cpu/zero/vm/compiledIC_zero.cpp 2016-10-31 17:46:22.000000000 -0700 @@ -60,11 +60,11 @@ return 0; } -void CompiledStaticCall::set_to_interpreted(methodHandle callee, address entry) { +void CompiledDirectStaticCall::set_to_interpreted(methodHandle callee, address entry) { ShouldNotReachHere(); // Only needed for COMPILER2. } -void CompiledStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { +void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { ShouldNotReachHere(); // Only needed for COMPILER2. } @@ -72,7 +72,7 @@ // Non-product mode code. #ifndef PRODUCT -void CompiledStaticCall::verify() { +void CompiledDirectStaticCall::verify() { ShouldNotReachHere(); // Only needed for COMPILER2. } --- old/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c 2016-10-31 17:46:23.000000000 -0700 +++ new/src/jdk.hotspot.agent/linux/native/libsaproc/symtab.c 2016-10-31 17:46:22.000000000 -0700 @@ -387,8 +387,8 @@ if (shdr->sh_type == sym_section) { ELF_SYM *syms; - int j, n, rslt; - size_t size; + int rslt; + size_t size, n, j, htab_sz; // FIXME: there could be multiple data buffers associated with the // same ELF section. Here we can handle only one buffer. See man page @@ -407,6 +407,15 @@ // create hash table, we use hcreate_r, hsearch_r and hdestroy_r to // manipulate the hash table. + + // NOTES section in the man page of hcreate_r says + // "Hash table implementations are usually more efficient when + // the table contains enough free space to minimize collisions. + // Typically, this means that nel should be at least 25% larger + // than the maximum number of elements that the caller expects + // to store in the table." + htab_sz = n*1.25; + symtab->hash_table = (struct hsearch_data*) calloc(1, sizeof(struct hsearch_data)); rslt = hcreate_r(n, symtab->hash_table); // guarantee(rslt, "unexpected failure: hcreate_r"); @@ -452,7 +461,6 @@ symtab->symbols[j].offset = sym_value - baseaddr; item.key = sym_name; item.data = (void *)&(symtab->symbols[j]); - hsearch_r(item, ENTER, &ret, symtab->hash_table); } } --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java 2016-10-31 17:46:23.000000000 -0700 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/memory/Dictionary.java 2016-10-31 17:46:23.000000000 -0700 @@ -102,4 +102,17 @@ } return null; } + + public boolean contains(Klass c, Oop classLoader) { + long hash = computeHash(c.getName(), classLoader); + int index = hashToIndex(hash); + + for (DictionaryEntry entry = (DictionaryEntry) bucket(index); entry != null; + entry = (DictionaryEntry) entry.next()) { + if (entry.literalValue().equals(c.getAddress())) { + return true; + } + } + return false; + } } --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java 2016-10-31 17:46:24.000000000 -0700 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops/InstanceKlass.java 2016-10-31 17:46:24.000000000 -0700 @@ -29,6 +29,7 @@ import sun.jvm.hotspot.classfile.ClassLoaderData; import sun.jvm.hotspot.debugger.*; import sun.jvm.hotspot.memory.*; +import sun.jvm.hotspot.memory.Dictionary; import sun.jvm.hotspot.runtime.*; import sun.jvm.hotspot.types.*; import sun.jvm.hotspot.utilities.*; @@ -64,6 +65,21 @@ private static int CLASS_STATE_FULLY_INITIALIZED; private static int CLASS_STATE_INITIALIZATION_ERROR; + // _misc_flags constants + private static int MISC_REWRITTEN; + private static int MISC_HAS_NONSTATIC_FIELDS; + private static int MISC_SHOULD_VERIFY_CLASS; + private static int MISC_IS_ANONYMOUS; + private static int MISC_IS_CONTENDED; + private static int MISC_HAS_DEFAULT_METHODS; + private static int MISC_DECLARES_DEFAULT_METHODS; + private static int MISC_HAS_BEEN_REDEFINED; + private static int MISC_HAS_PASSED_FINGERPRINT_CHECK; + private static int MISC_IS_SCRATCH_CLASS; + private static int MISC_IS_SHARED_BOOT_CLASS; + private static int MISC_IS_SHARED_PLATFORM_CLASS; + private static int MISC_IS_SHARED_APP_CLASS; + private static synchronized void initialize(TypeDataBase db) throws WrongTypeException { Type type = db.lookupType("InstanceKlass"); arrayKlasses = new MetadataField(type.getAddressField("_array_klasses"), 0); @@ -90,6 +106,7 @@ breakpoints = type.getAddressField("_breakpoints"); } genericSignatureIndex = new CIntField(type.getCIntegerField("_generic_signature_index"), 0); + miscFlags = new CIntField(type.getCIntegerField("_misc_flags"), 0); majorVersion = new CIntField(type.getCIntegerField("_major_version"), 0); minorVersion = new CIntField(type.getCIntegerField("_minor_version"), 0); headerSize = type.getSize(); @@ -114,6 +131,19 @@ CLASS_STATE_FULLY_INITIALIZED = db.lookupIntConstant("InstanceKlass::fully_initialized").intValue(); CLASS_STATE_INITIALIZATION_ERROR = db.lookupIntConstant("InstanceKlass::initialization_error").intValue(); + MISC_REWRITTEN = db.lookupIntConstant("InstanceKlass::_misc_rewritten").intValue(); + MISC_HAS_NONSTATIC_FIELDS = db.lookupIntConstant("InstanceKlass::_misc_has_nonstatic_fields").intValue(); + MISC_SHOULD_VERIFY_CLASS = db.lookupIntConstant("InstanceKlass::_misc_should_verify_class").intValue(); + MISC_IS_ANONYMOUS = db.lookupIntConstant("InstanceKlass::_misc_is_anonymous").intValue(); + MISC_IS_CONTENDED = db.lookupIntConstant("InstanceKlass::_misc_is_contended").intValue(); + MISC_HAS_DEFAULT_METHODS = db.lookupIntConstant("InstanceKlass::_misc_has_default_methods").intValue(); + MISC_DECLARES_DEFAULT_METHODS = db.lookupIntConstant("InstanceKlass::_misc_declares_default_methods").intValue(); + MISC_HAS_BEEN_REDEFINED = db.lookupIntConstant("InstanceKlass::_misc_has_been_redefined").intValue(); + MISC_HAS_PASSED_FINGERPRINT_CHECK = db.lookupIntConstant("InstanceKlass::_misc_has_passed_fingerprint_check").intValue(); + MISC_IS_SCRATCH_CLASS = db.lookupIntConstant("InstanceKlass::_misc_is_scratch_class").intValue(); + MISC_IS_SHARED_BOOT_CLASS = db.lookupIntConstant("InstanceKlass::_misc_is_shared_boot_class").intValue(); + MISC_IS_SHARED_PLATFORM_CLASS = db.lookupIntConstant("InstanceKlass::_misc_is_shared_platform_class").intValue(); + MISC_IS_SHARED_APP_CLASS = db.lookupIntConstant("InstanceKlass::_misc_is_shared_app_class").intValue(); } public InstanceKlass(Address addr) { @@ -149,6 +179,7 @@ private static CIntField itableLen; private static AddressField breakpoints; private static CIntField genericSignatureIndex; + private static CIntField miscFlags; private static CIntField majorVersion; private static CIntField minorVersion; @@ -243,7 +274,7 @@ return getSizeHelper() * VM.getVM().getAddressSize(); } - public long getSize() { + public long getSize() { // in number of bytes long wordLength = VM.getVM().getBytesPerWord(); long size = getHeaderSize() + (getVtableLen() + @@ -252,9 +283,59 @@ if (isInterface()) { size += wordLength; } + if (isAnonymous()) { + size += wordLength; + } + if (hasStoredFingerprint()) { + size += 8; // uint64_t + } return alignSize(size); } + private int getMiscFlags() { + return (int) miscFlags.getValue(this); + } + + public boolean isAnonymous() { + return (getMiscFlags() & MISC_IS_ANONYMOUS) != 0; + } + + public static boolean shouldStoreFingerprint() { + VM vm = VM.getVM(); + if (vm.getCommandLineBooleanFlag("EnableJVMCI") && !vm.getCommandLineBooleanFlag("UseJVMCICompiler")) { + return true; + } + if (vm.getCommandLineBooleanFlag("DumpSharedSpaces")) { + return true; + } + return false; + } + + public boolean hasStoredFingerprint() { + return shouldStoreFingerprint() || isShared(); + } + + public boolean isShared() { + VM vm = VM.getVM(); + if (vm.isSharingEnabled()) { + // This is not the same implementation as the C++ function MetaspaceObj::is_shared() + // bool MetaspaceObj::is_shared() const { + // return MetaspaceShared::is_in_shared_space(this); + // } + // However, MetaspaceShared::is_in_shared_space is complicated and hard to emulate in + // Java code, so let's do this by looking up from the shared dictionary. Of course, + // this works for shared InstanceKlass only and does not work for other types of + // MetaspaceObj in the CDS shared archive. + Dictionary sharedDictionary = vm.getSystemDictionary().sharedDictionary(); + if (sharedDictionary != null) { + if (sharedDictionary.contains(this, null)) { + return true; + } + } + } + return false; + } + public static long getHeaderSize() { return headerSize; } public short getFieldAccessFlags(int index) { --- old/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java 2016-10-31 17:46:24.000000000 -0700 +++ new/src/jdk.hotspot.agent/share/classes/sun/jvm/hotspot/runtime/VM.java 2016-10-31 17:46:24.000000000 -0700 @@ -861,6 +861,12 @@ return (flag == null) ? false: flag.getBool(); } + public boolean getCommandLineBooleanFlag(String name) { + Flag flag = getCommandLineFlag(name); + return (flag == null) ? Boolean.FALSE: + (flag.getBool()? Boolean.TRUE: Boolean.FALSE); + } + // returns null, if not available. public Flag[] getCommandLineFlags() { if (commandLineFlags == null) { --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/DebugInfo.java 2016-10-31 17:46:25.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.code/src/jdk/vm/ci/code/DebugInfo.java 2016-10-31 17:46:25.000000000 -0700 @@ -40,7 +40,7 @@ private final BytecodePosition bytecodePosition; private ReferenceMap referenceMap; - @SuppressWarnings("unused") private final VirtualObject[] virtualObjectMapping; + private final VirtualObject[] virtualObjectMapping; private RegisterSaveLayout calleeSaveInfo; /** @@ -102,6 +102,10 @@ return referenceMap; } + public VirtualObject[] getVirtualObjectMapping() { + return virtualObjectMapping; + } + /** * Sets the map from the registers (in the caller's frame) to the slots where they are saved in * the current frame. --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java 2016-10-31 17:46:26.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/CompilerToVM.java 2016-10-31 17:46:26.000000000 -0700 @@ -333,7 +333,7 @@ * {@link HotSpotVMConfig#codeInstallResultDependenciesInvalid}. * @throws JVMCIError if there is something wrong with the compiled code or the metadata */ - public native int getMetadata(TargetDescription target, HotSpotCompiledCode compiledCode, HotSpotMetaData metaData); + native int getMetadata(TargetDescription target, HotSpotCompiledCode compiledCode, HotSpotMetaData metaData); /** * Resets all compilation statistics. @@ -605,6 +605,14 @@ native int methodDataProfileDataSize(long metaspaceMethodData, int position); /** + * Gets the fingerprint for a given Klass* + * + * @param metaspaceKlass + * @return the value of the fingerprint (zero for arrays and synthetic classes). + */ + native long getFingerprint(long metaspaceKlass); + + /** * Return the amount of native stack required for the interpreter frames represented by * {@code frame}. This is used when emitting the stack banging code to ensure that there is * enough space for the frames during deoptimization. --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java 2016-10-31 17:46:26.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotJVMCIRuntime.java 2016-10-31 17:46:26.000000000 -0700 @@ -96,7 +96,8 @@ TraceMethodDataFilter(String.class, null, "Enables tracing of profiling info when read by JVMCI.", "Empty value: trace all methods", - "Non-empty value: trace methods whose fully qualified name contains the value."); + "Non-empty value: trace methods whose fully qualified name contains the value."), + UseProfilingInformation(Boolean.class, true, ""); // @formatter:on /** --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaData.java 2016-10-31 17:46:27.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotMetaData.java 2016-10-31 17:46:27.000000000 -0700 @@ -22,38 +22,51 @@ */ package jdk.vm.ci.hotspot; -/** - * Encapsulates the VM metadata generated by {@link CompilerToVM#getMetadata}. - */ +import static jdk.vm.ci.hotspot.HotSpotJVMCIRuntime.runtime; + +import jdk.vm.ci.code.TargetDescription; + public class HotSpotMetaData { - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private byte[] pcDescBytes; - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private byte[] scopesDescBytes; - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private byte[] relocBytes; - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private byte[] exceptionBytes; - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private byte[] oopMaps; - @SuppressFBWarnings(value = "UWF_UNWRITTEN_FIELD", justification = "field is set by the native part") private String[] metadata; + private byte[] pcDescBytes; + private byte[] scopesDescBytes; + private byte[] relocBytes; + private byte[] exceptionBytes; + private byte[] oopMaps; + private String[] metadata; + + public HotSpotMetaData(TargetDescription target, HotSpotCompiledCode compiledMethod) { + // Assign the fields default values... + pcDescBytes = new byte[0]; + scopesDescBytes = new byte[0]; + relocBytes = new byte[0]; + exceptionBytes = new byte[0]; + oopMaps = new byte[0]; + metadata = new String[0]; + // ...some of them will be overwritten by the VM: + runtime().getCompilerToVM().getMetadata(target, compiledMethod, this); + } public byte[] pcDescBytes() { - return pcDescBytes != null ? pcDescBytes : new byte[0]; + return pcDescBytes; } public byte[] scopesDescBytes() { - return scopesDescBytes != null ? scopesDescBytes : new byte[0]; + return scopesDescBytes; } public byte[] relocBytes() { - return relocBytes != null ? relocBytes : new byte[0]; + return relocBytes; } public byte[] exceptionBytes() { - return exceptionBytes != null ? exceptionBytes : new byte[0]; + return exceptionBytes; } public byte[] oopMaps() { - return oopMaps != null ? oopMaps : new byte[0]; + return oopMaps; } public String[] metadataEntries() { - return metadata != null ? metadata : new String[0]; + return metadata; } } --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java 2016-10-31 17:46:28.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedJavaMethodImpl.java 2016-10-31 17:46:27.000000000 -0700 @@ -428,7 +428,7 @@ public ProfilingInfo getProfilingInfo(boolean includeNormal, boolean includeOSR) { ProfilingInfo info; - if (methodData == null) { + if (Option.UseProfilingInformation.getBoolean() && methodData == null) { long metaspaceMethodData = UNSAFE.getAddress(metaspaceMethod + config().methodDataOffset); if (metaspaceMethodData != 0) { methodData = new HotSpotMethodData(metaspaceMethodData, this); --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectType.java 2016-10-31 17:46:28.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectType.java 2016-10-31 17:46:28.000000000 -0700 @@ -103,6 +103,8 @@ int layoutHelper(); + long getFingerprint(); + HotSpotResolvedObjectType getEnclosingType(); ResolvedJavaMethod getClassInitializer(); --- old/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java 2016-10-31 17:46:29.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/jdk.vm.ci.hotspot/src/jdk/vm/ci/hotspot/HotSpotResolvedObjectTypeImpl.java 2016-10-31 17:46:29.000000000 -0700 @@ -478,6 +478,11 @@ return UNSAFE.getInt(getMetaspaceKlass() + config.klassLayoutHelperOffset); } + @Override + public long getFingerprint() { + return compilerToVM().getFingerprint(getMetaspaceKlass()); + } + synchronized HotSpotResolvedJavaMethod createMethod(long metaspaceMethod) { HotSpotResolvedJavaMethodImpl method = null; if (methodCache == null) { --- old/src/jdk.vm.ci/share/classes/module-info.java 2016-10-31 17:46:30.000000000 -0700 +++ new/src/jdk.vm.ci/share/classes/module-info.java 2016-10-31 17:46:29.000000000 -0700 @@ -35,4 +35,38 @@ jdk.vm.ci.hotspot.amd64.AMD64HotSpotJVMCIBackendFactory; provides jdk.vm.ci.hotspot.HotSpotJVMCIBackendFactory with jdk.vm.ci.hotspot.sparc.SPARCHotSpotJVMCIBackendFactory; + + exports jdk.vm.ci.aarch64 to + jdk.vm.compiler; + exports jdk.vm.ci.amd64 to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.code to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.code.site to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.code.stack to + jdk.vm.compiler; + exports jdk.vm.ci.common to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.hotspot to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.hotspot.aarch64 to + jdk.vm.compiler; + exports jdk.vm.ci.hotspot.amd64 to + jdk.vm.compiler; + exports jdk.vm.ci.hotspot.sparc to + jdk.vm.compiler; + exports jdk.vm.ci.meta to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.runtime to + jdk.aot, + jdk.vm.compiler; + exports jdk.vm.ci.sparc to + jdk.vm.compiler; } --- old/src/share/vm/c1/c1_Compilation.cpp 2016-10-31 17:46:30.000000000 -0700 +++ new/src/share/vm/c1/c1_Compilation.cpp 2016-10-31 17:46:30.000000000 -0700 @@ -328,10 +328,9 @@ locs_buffer_size / sizeof(relocInfo)); code->initialize_consts_size(Compilation::desired_max_constant_size()); // Call stubs + two deopt handlers (regular and MH) + exception handler - int call_stub_size = LIR_Assembler::call_stub_size; - int stub_size = (call_stub_estimate * call_stub_size) + - LIR_Assembler::exception_handler_size + - (2 * LIR_Assembler::deopt_handler_size); + int stub_size = (call_stub_estimate * LIR_Assembler::call_stub_size()) + + LIR_Assembler::exception_handler_size() + + (2 * LIR_Assembler::deopt_handler_size()); if (stub_size >= code->insts_capacity()) return false; code->initialize_stubs_size(stub_size); return true; --- old/src/share/vm/c1/c1_LIRAssembler.hpp 2016-10-31 17:46:31.000000000 -0700 +++ new/src/share/vm/c1/c1_LIRAssembler.hpp 2016-10-31 17:46:31.000000000 -0700 @@ -260,6 +260,21 @@ #include CPU_HEADER(c1_LIRAssembler) + static int call_stub_size() { + if (UseAOT) { + return _call_stub_size + _call_aot_stub_size; + } else { + return _call_stub_size; + } + } + + static int exception_handler_size() { + return _exception_handler_size; + } + + static int deopt_handler_size() { + return _deopt_handler_size; + } }; #endif // SHARE_VM_C1_C1_LIRASSEMBLER_HPP --- old/src/share/vm/classfile/classFileParser.cpp 2016-10-31 17:46:31.000000000 -0700 +++ new/src/share/vm/classfile/classFileParser.cpp 2016-10-31 17:46:31.000000000 -0700 @@ -30,6 +30,7 @@ #include "classfile/javaClasses.inline.hpp" #include "classfile/moduleEntry.hpp" #include "classfile/symbolTable.hpp" +#include "classfile/dictionary.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/verificationType.hpp" #include "classfile/verifier.hpp" @@ -71,6 +72,7 @@ #if INCLUDE_CDS #include "classfile/systemDictionaryShared.hpp" #endif +#include "aot/aotLoader.hpp" // We generally try to create the oops directly when parsing, rather than // allocating temporary data structures and copying the bytes twice. A @@ -5190,6 +5192,19 @@ assert(_klass == ik, "invariant"); + ik->set_has_passed_fingerprint_check(false); + if (UseAOT && ik->supers_have_passed_fingerprint_checks()) { + uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik); + if (aot_fp != 0 && aot_fp == _stream->compute_fingerprint()) { + // This class matches with a class saved in an AOT library + ik->set_has_passed_fingerprint_check(true); + } else { + ResourceMark rm; + log_info(classfingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT, + ik->external_name(), aot_fp, _stream->compute_fingerprint()); + } + } + return ik; } --- old/src/share/vm/classfile/classFileStream.cpp 2016-10-31 17:46:32.000000000 -0700 +++ new/src/share/vm/classfile/classFileStream.cpp 2016-10-31 17:46:32.000000000 -0700 @@ -24,6 +24,8 @@ #include "precompiled.hpp" #include "classfile/classFileStream.hpp" +#include "classfile/classLoader.hpp" +#include "classfile/dictionary.hpp" #include "classfile/vmSymbols.hpp" #include "memory/resourceArea.hpp" @@ -133,3 +135,12 @@ } _current += length * 4; } + +uint64_t ClassFileStream::compute_fingerprint() const { + int classfile_size = length(); + int classfile_crc = ClassLoader::crc32(0, (const char*)buffer(), length()); + uint64_t fingerprint = (uint64_t(classfile_size) << 32) | uint64_t(uint32_t(classfile_crc)); + assert(fingerprint != 0, "must not be zero"); + + return fingerprint; +} --- old/src/share/vm/classfile/classFileStream.hpp 2016-10-31 17:46:33.000000000 -0700 +++ new/src/share/vm/classfile/classFileStream.hpp 2016-10-31 17:46:33.000000000 -0700 @@ -150,6 +150,8 @@ // Tells whether eos is reached bool at_eos() const { return _current == _buffer_end; } + + uint64_t compute_fingerprint() const; }; #endif // SHARE_VM_CLASSFILE_CLASSFILESTREAM_HPP --- old/src/share/vm/classfile/klassFactory.cpp 2016-10-31 17:46:33.000000000 -0700 +++ new/src/share/vm/classfile/klassFactory.cpp 2016-10-31 17:46:33.000000000 -0700 @@ -212,6 +212,10 @@ result->set_cached_class_file(cached_class_file); } + if (InstanceKlass::should_store_fingerprint()) { + result->store_fingerprint(!result->is_anonymous() ? stream->compute_fingerprint() : 0); + } + TRACE_KLASS_CREATION(result, parser, THREAD); #if INCLUDE_CDS && INCLUDE_JVMTI --- old/src/share/vm/classfile/systemDictionary.cpp 2016-10-31 17:46:34.000000000 -0700 +++ new/src/share/vm/classfile/systemDictionary.cpp 2016-10-31 17:46:34.000000000 -0700 @@ -81,6 +81,7 @@ #if INCLUDE_TRACE #include "trace/tracing.hpp" #endif +#include "aot/aotLoader.hpp" Dictionary* SystemDictionary::_dictionary = NULL; PlaceholderTable* SystemDictionary::_placeholders = NULL; @@ -1158,13 +1159,21 @@ Symbol* h_name = k->name(); assert(class_name == NULL || class_name == h_name, "name mismatch"); + bool define_succeeded = false; // Add class just loaded // If a class loader supports parallel classloading handle parallel define requests // find_or_define_instance_class may return a different InstanceKlass if (is_parallelCapable(class_loader)) { - k = find_or_define_instance_class(h_name, class_loader, k, CHECK_NULL); + instanceKlassHandle defined_k = find_or_define_instance_class(h_name, class_loader, k, CHECK_NULL); + if (k() == defined_k()) { + // we have won over other concurrent threads (if any) that are + // competing to define the same class. + define_succeeded = true; + } + k = defined_k; } else { define_instance_class(k, CHECK_NULL); + define_succeeded = true; } // Make sure we have an entry in the SystemDictionary on success @@ -1408,6 +1417,19 @@ // notify a class loaded from shared object ClassLoadingService::notify_class_loaded(ik(), true /* shared class */); } + + ik->set_has_passed_fingerprint_check(false); + if (UseAOT && ik->supers_have_passed_fingerprint_checks()) { + uint64_t aot_fp = AOTLoader::get_saved_fingerprint(ik()); + uint64_t cds_fp = ik->get_stored_fingerprint(); + if (aot_fp != 0 && aot_fp == cds_fp) { + // This class matches with a class saved in an AOT library + ik->set_has_passed_fingerprint_check(true); + } else { + ResourceMark rm; + log_info(classfingerprint)("%s : expected = " PTR64_FORMAT " actual = " PTR64_FORMAT, ik->external_name(), aot_fp, cds_fp); + } + } return ik; } #endif // INCLUDE_CDS @@ -1494,7 +1516,9 @@ // find_or_define_instance_class may return a different InstanceKlass if (!k.is_null()) { - k = find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh)); + instanceKlassHandle defined_k = + find_or_define_instance_class(class_name, class_loader, k, CHECK_(nh)); + k = defined_k; } return k; } else { --- old/src/share/vm/code/codeBlob.hpp 2016-10-31 17:46:35.000000000 -0700 +++ new/src/share/vm/code/codeBlob.hpp 2016-10-31 17:46:35.000000000 -0700 @@ -41,7 +41,8 @@ NonNMethod = 2, // Non-nmethods like Buffers, Adapters and Runtime Stubs All = 3, // All types (No code cache segmentation) Pregenerated = 4, // Special blobs, managed by CodeCacheExtensions - NumTypes = 5 // Number of CodeBlobTypes + AOT = 5, // AOT methods + NumTypes = 6 // Number of CodeBlobTypes }; }; @@ -118,6 +119,7 @@ virtual bool is_safepoint_stub() const { return false; } virtual bool is_adapter_blob() const { return false; } virtual bool is_method_handles_adapter_blob() const { return false; } + virtual bool is_aot() const { return false; } virtual bool is_compiled() const { return false; } inline bool is_compiled_by_c1() const { return _type == compiler_c1; }; @@ -131,6 +133,7 @@ nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; } CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : NULL; } CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; } + CodeBlob* as_codeblob_or_null() const { return (CodeBlob*) this; } // Boundaries address header_begin() const { return (address) this; } @@ -205,6 +208,7 @@ // Transfer ownership of comments to this CodeBlob void set_strings(CodeStrings& strings) { + assert(!is_aot(), "invalid on aot"); _strings.assign(strings); } --- old/src/share/vm/code/codeCache.cpp 2016-10-31 17:46:35.000000000 -0700 +++ new/src/share/vm/code/codeCache.cpp 2016-10-31 17:46:35.000000000 -0700 @@ -59,6 +59,7 @@ #include "opto/compile.hpp" #include "opto/node.hpp" #endif +#include "aot/aotLoader.hpp" // Helper class for printing in CodeCache class CodeBlob_sizes { @@ -128,6 +129,8 @@ // Iterate over all CodeHeaps #define FOR_ALL_HEAPS(heap) for (GrowableArrayIterator heap = _heaps->begin(); heap != _heaps->end(); ++heap) +#define FOR_ALL_NMETHOD_HEAPS(heap) for (GrowableArrayIterator heap = _nmethod_heaps->begin(); heap != _nmethod_heaps->end(); ++heap) + // Iterate over all CodeBlobs (cb) on the given CodeHeap #define FOR_ALL_BLOBS(cb, heap) for (CodeBlob* cb = first_blob(heap); cb != NULL; cb = next_blob(heap, cb)) @@ -139,6 +142,8 @@ // Initialize array of CodeHeaps GrowableArray* CodeCache::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (CodeBlobType::All, true); +GrowableArray* CodeCache::_compiled_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (CodeBlobType::All, true); +GrowableArray* CodeCache::_nmethod_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (CodeBlobType::All, true); void CodeCache::check_heap_sizes(size_t non_nmethod_size, size_t profiled_size, size_t non_profiled_size, size_t cache_size, bool all_set) { size_t total_size = non_nmethod_size + profiled_size + non_profiled_size; @@ -365,6 +370,28 @@ return NULL; } +int CodeCache::code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs) { + if (lhs->code_blob_type() == rhs->code_blob_type()) { + return (lhs > rhs) ? 1 : ((lhs < rhs) ? -1 : 0); + } else { + return lhs->code_blob_type() - rhs->code_blob_type(); + } +} + +void CodeCache::add_heap(CodeHeap* heap) { + assert(!Universe::is_fully_initialized(), "late heap addition?"); + + _heaps->insert_sorted(heap); + + int type = heap->code_blob_type(); + if (code_blob_type_accepts_compiled(type)) { + _compiled_heaps->insert_sorted(heap); + } + if (code_blob_type_accepts_nmethod(type)) { + _nmethod_heaps->insert_sorted(heap); + } +} + void CodeCache::add_heap(ReservedSpace rs, const char* name, int code_blob_type) { // Check if heap is needed if (!heap_available(code_blob_type)) { @@ -373,7 +400,7 @@ // Create CodeHeap CodeHeap* heap = new CodeHeap(name, code_blob_type); - _heaps->append(heap); + add_heap(heap); // Reserve Space size_t size_initial = MIN2(InitialCodeCacheSize, rs.size()); @@ -389,7 +416,7 @@ CodeHeap* CodeCache::get_code_heap(const CodeBlob* cb) { assert(cb != NULL, "CodeBlob is null"); FOR_ALL_HEAPS(heap) { - if ((*heap)->contains(cb)) { + if ((*heap)->contains(cb->code_begin())) { return *heap; } } @@ -426,10 +453,6 @@ return (CodeBlob*)heap->next(cb); } -CodeBlob* CodeCache::next_blob(CodeBlob* cb) { - return next_blob(get_code_heap(cb), cb); -} - /** * Do not seize the CodeCache lock here--if the caller has not * already done so, we are going to lose bigtime, since the code @@ -494,7 +517,7 @@ } if (PrintCodeCacheExtension) { ResourceMark rm; - if (_heaps->length() >= 1) { + if (_nmethod_heaps->length() >= 1) { tty->print("%s", heap->name()); } else { tty->print("CodeCache"); @@ -559,6 +582,10 @@ return false; } +bool CodeCache::contains(nmethod *nm) { + return contains((void *)nm); +} + // This method is safe to call without holding the CodeCache_lock, as long as a dead CodeBlob is not // looked up (i.e., one that has been marked for deletion). It only depends on the _segmap to contain // valid indices, which it will always do, as long as the CodeBlob is not in the process of being recycled. @@ -575,8 +602,8 @@ // NMT can walk the stack before code cache is created if (_heaps != NULL && !_heaps->is_empty()) { FOR_ALL_HEAPS(heap) { - CodeBlob* result = (CodeBlob*) (*heap)->find_start(start); - if (result != NULL && result->blob_contains((address)start)) { + CodeBlob* result = (*heap)->find_blob_unsafe(start); + if (result != NULL) { return result; } } @@ -592,7 +619,7 @@ void CodeCache::blobs_do(void f(CodeBlob* nm)) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { f(cb); } @@ -613,6 +640,7 @@ while(iter.next_alive()) { iter.method()->metadata_do(f); } + AOTLoader::metadata_do(f); } int CodeCache::alignment_unit() { @@ -634,11 +662,10 @@ void CodeCache::blobs_do(CodeBlobClosure* f) { assert_locked_or_safepoint(CodeCache_lock); - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { if (cb->is_alive()) { f->do_code_blob(cb); - #ifdef ASSERT if (cb->is_nmethod()) ((nmethod*)cb)->verify_scavenge_root_oops(); @@ -835,13 +862,12 @@ int count = 0; FOR_ALL_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { - if (cb->is_nmethod()) { - nmethod* nm = (nmethod*)cb; + CompiledMethod *nm = cb->as_compiled_method_or_null(); + if (nm != NULL) { count += nm->verify_icholder_relocations(); } } } - assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() == CompiledICHolder::live_count(), "must agree"); #endif @@ -902,7 +928,7 @@ int CodeCache::nmethod_count() { int count = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { count += (*heap)->nmethod_count(); } return count; @@ -933,7 +959,7 @@ size_t CodeCache::capacity() { size_t cap = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { cap += (*heap)->capacity(); } return cap; @@ -946,7 +972,7 @@ size_t CodeCache::unallocated_capacity() { size_t unallocated_cap = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { unallocated_cap += (*heap)->unallocated_capacity(); } return unallocated_cap; @@ -954,7 +980,7 @@ size_t CodeCache::max_capacity() { size_t max_cap = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { max_cap += (*heap)->max_capacity(); } return max_cap; @@ -980,7 +1006,7 @@ size_t CodeCache::bytes_allocated_in_freelists() { size_t allocated_bytes = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { allocated_bytes += (*heap)->allocated_in_freelist(); } return allocated_bytes; @@ -988,7 +1014,7 @@ int CodeCache::allocated_segments() { int number_of_segments = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { number_of_segments += (*heap)->allocated_segments(); } return number_of_segments; @@ -996,7 +1022,7 @@ size_t CodeCache::freelists_length() { size_t length = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { length += (*heap)->freelist_length(); } return length; @@ -1039,6 +1065,8 @@ void codeCache_init() { CodeCache::initialize(); + // Load AOT libraries and add AOT code heaps. + AOTLoader::initialize(); } //------------------------------------------------------------------------------------------------ @@ -1102,6 +1130,15 @@ return (CompiledMethod*)cb; } +bool CodeCache::is_far_target(address target) { +#if INCLUDE_AOT + return NativeCall::is_far_call(_low_bound, target) || + NativeCall::is_far_call(_high_bound, target); +#else + return false; +#endif +} + #ifdef HOTSWAP int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) { MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); @@ -1204,7 +1241,7 @@ void CodeCache::flush_evol_dependents_on(instanceKlassHandle ev_k_h) { // --- Compile_lock is not held. However we are at a safepoint. assert_locked_or_safepoint(Compile_lock); - if (number_of_nmethods_with_dependencies() == 0) return; + if (number_of_nmethods_with_dependencies() == 0 && !UseAOT) return; // CodeCache can only be updated by a thread_in_VM and they will all be // stopped during the safepoint so CodeCache will be safe to update without @@ -1316,7 +1353,7 @@ void CodeCache::print_memory_overhead() { size_t wasted_bytes = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { CodeHeap* curr_heap = *heap; for (CodeBlob* cb = (CodeBlob*)curr_heap->first(); cb != NULL; cb = (CodeBlob*)curr_heap->next(cb)) { HeapBlock* heap_block = ((HeapBlock*)cb) - 1; @@ -1362,8 +1399,8 @@ ResourceMark rm; int i = 0; - FOR_ALL_HEAPS(heap) { - if ((_heaps->length() >= 1) && Verbose) { + FOR_ALL_NMETHOD_HEAPS(heap) { + if ((_nmethod_heaps->length() >= 1) && Verbose) { tty->print_cr("-- %s --", (*heap)->name()); } FOR_ALL_BLOBS(cb, *heap) { @@ -1459,7 +1496,7 @@ CodeBlob_sizes live; CodeBlob_sizes dead; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { if (!cb->is_alive()) { dead.add(cb); @@ -1485,7 +1522,7 @@ int number_of_blobs = 0; int number_of_oop_maps = 0; int map_size = 0; - FOR_ALL_HEAPS(heap) { + FOR_ALL_NMETHOD_HEAPS(heap) { FOR_ALL_BLOBS(cb, *heap) { if (cb->is_alive()) { number_of_blobs++; @@ -1568,35 +1605,3 @@ unallocated_capacity()); } -// Initialize iterator to given compiled method -void CompiledMethodIterator::initialize(CompiledMethod* cm) { - _code_blob = (CodeBlob*)cm; - if (!SegmentedCodeCache) { - // Iterate over all CodeBlobs - _code_blob_type = CodeBlobType::All; - } else if (cm != NULL) { - _code_blob_type = CodeCache::get_code_blob_type(cm); - } else { - // Only iterate over method code heaps, starting with non-profiled - _code_blob_type = CodeBlobType::MethodNonProfiled; - } -} - -// Advance iterator to the next compiled method in the current code heap -bool CompiledMethodIterator::next_compiled_method() { - // Get first method CodeBlob - if (_code_blob == NULL) { - _code_blob = CodeCache::first_blob(_code_blob_type); - if (_code_blob == NULL) { - return false; - } else if (_code_blob->is_nmethod()) { - return true; - } - } - // Search for next method CodeBlob - _code_blob = CodeCache::next_blob(_code_blob); - while (_code_blob != NULL && !_code_blob->is_compiled()) { - _code_blob = CodeCache::next_blob(_code_blob); - } - return _code_blob != NULL; -} --- old/src/share/vm/code/codeCache.hpp 2016-10-31 17:46:36.000000000 -0700 +++ new/src/share/vm/code/codeCache.hpp 2016-10-31 17:46:36.000000000 -0700 @@ -26,6 +26,7 @@ #define SHARE_VM_CODE_CODECACHE_HPP #include "code/codeBlob.hpp" +#include "code/codeCacheExtensions.hpp" #include "code/nmethod.hpp" #include "memory/allocation.hpp" #include "memory/heap.hpp" @@ -77,13 +78,14 @@ class CodeCache : AllStatic { friend class VMStructs; friend class JVMCIVMStructs; - friend class NMethodIterator; - friend class CompiledMethodIterator; + template friend class CodeBlobIterator; friend class WhiteBox; friend class CodeCacheLoader; private: // CodeHeaps of the cache static GrowableArray* _heaps; + static GrowableArray* _compiled_heaps; + static GrowableArray* _nmethod_heaps; static address _low_bound; // Lower bound of CodeHeap addresses static address _high_bound; // Upper bound of CodeHeap addresses @@ -110,8 +112,7 @@ // Iteration static CodeBlob* first_blob(CodeHeap* heap); // Returns the first CodeBlob on the given CodeHeap static CodeBlob* first_blob(int code_blob_type); // Returns the first CodeBlob of the given type - static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the first alive CodeBlob on the given CodeHeap - static CodeBlob* next_blob(CodeBlob* cb); // Returns the next CodeBlob of the given type succeeding the given CodeBlob + static CodeBlob* next_blob(CodeHeap* heap, CodeBlob* cb); // Returns the next CodeBlob on the given CodeHeap static size_t bytes_allocated_in_freelists(); static int allocated_segments(); @@ -121,10 +122,20 @@ static void prune_scavenge_root_nmethods(); static void unlink_scavenge_root_nmethod(nmethod* nm, nmethod* prev); + // Make private to prevent unsafe calls. Not all CodeBlob*'s are embedded in a CodeHeap. + static bool contains(CodeBlob *p) { fatal("don't call me!"); return false; } + public: // Initialization static void initialize(); + static int code_heap_compare(CodeHeap* const &lhs, CodeHeap* const &rhs); + + static void add_heap(CodeHeap* heap); + static const GrowableArray* heaps() { return _heaps; } + static const GrowableArray* compiled_heaps() { return _compiled_heaps; } + static const GrowableArray* nmethod_heaps() { return _nmethod_heaps; } + // Allocation/administration static CodeBlob* allocate(int size, int code_blob_type, int orig_code_blob_type = CodeBlobType::All); // allocates a new CodeBlob static void commit(CodeBlob* cb); // called when the allocated CodeBlob has been filled @@ -132,6 +143,7 @@ static int alignment_offset(); // guaranteed offset of first CodeBlob byte within alignment unit (i.e., allocation header) static void free(CodeBlob* cb); // frees a CodeBlob static bool contains(void *p); // returns whether p is included + static bool contains(nmethod* nm); // returns whether nm is included static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods @@ -192,6 +204,9 @@ static address high_bound() { return _high_bound; } static address high_bound(int code_blob_type); + // Have to use far call instructions to call this pc. + static bool is_far_target(address pc); + // Profiling static size_t capacity(); static size_t unallocated_capacity(int code_blob_type); @@ -208,11 +223,21 @@ // Returns true if an own CodeHeap for the given CodeBlobType is available static bool heap_available(int code_blob_type); - // Returns the CodeBlobType for the given nmethod + // Returns the CodeBlobType for the given CompiledMethod static int get_code_blob_type(CompiledMethod* cm) { return get_code_heap(cm)->code_blob_type(); } + static bool code_blob_type_accepts_compiled(int type) { + bool result = type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; + AOT_ONLY( result = result || type == CodeBlobType::AOT; ) + return result; + } + + static bool code_blob_type_accepts_nmethod(int type) { + return type == CodeBlobType::All || type <= CodeBlobType::MethodProfiled; + } + // Returns the CodeBlobType for the given compilation level static int get_code_blob_type(int comp_level) { if (comp_level == CompLevel_none || @@ -264,35 +289,47 @@ // Iterator to iterate over nmethods in the CodeCache. -class NMethodIterator : public StackObj { +template class CodeBlobIterator : public StackObj { private: CodeBlob* _code_blob; // Current CodeBlob - int _code_blob_type; // Refers to current CodeHeap + GrowableArrayIterator _heap; + GrowableArrayIterator _end; public: - NMethodIterator() { - initialize(NULL); // Set to NULL, initialized by first call to next() - } - - NMethodIterator(nmethod* nm) { - initialize(nm); + CodeBlobIterator(T* nm = NULL) { + if (Filter::heaps() == NULL) { + return; + } + _heap = Filter::heaps()->begin(); + _end = Filter::heaps()->end(); + // If set to NULL, initialized by first call to next() + _code_blob = (CodeBlob*)nm; + if (nm != NULL) { + address start = nm->code_begin(); + while(!(*_heap)->contains(start)) { + ++_heap; + } + assert((*_heap)->contains(start), "match not found"); + } } - // Advance iterator to next nmethod + // Advance iterator to next blob bool next() { assert_locked_or_safepoint(CodeCache_lock); - assert(_code_blob_type < CodeBlobType::NumTypes, "end reached"); - bool result = next_nmethod(); - while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) { - // Advance to next code heap if segmented code cache - _code_blob_type++; - result = next_nmethod(); + bool result = next_blob(); + while (!result && _heap != _end) { + // Advance to next code heap of segmented code cache + if (++_heap == _end) { + break; + } + result = next_blob(); } + return result; } - // Advance iterator to next alive nmethod + // Advance iterator to next alive blob bool next_alive() { bool result = next(); while(result && !_code_blob->is_alive()) { @@ -302,90 +339,48 @@ } bool end() const { return _code_blob == NULL; } - nmethod* method() const { return (nmethod*)_code_blob; } + T* method() const { return (T*)_code_blob; } private: - // Initialize iterator to given nmethod - void initialize(nmethod* nm) { - _code_blob = (CodeBlob*)nm; - if (!SegmentedCodeCache) { - // Iterate over all CodeBlobs - _code_blob_type = CodeBlobType::All; - } else if (nm != NULL) { - _code_blob_type = CodeCache::get_code_blob_type(nm); - } else { - // Only iterate over method code heaps, starting with non-profiled - _code_blob_type = CodeBlobType::MethodNonProfiled; - } - } - // Advance iterator to the next nmethod in the current code heap - bool next_nmethod() { + // Advance iterator to the next blob in the current code heap + bool next_blob() { + if (_heap == _end) { + return false; + } + CodeHeap *heap = *_heap; // Get first method CodeBlob if (_code_blob == NULL) { - _code_blob = CodeCache::first_blob(_code_blob_type); + _code_blob = CodeCache::first_blob(heap); if (_code_blob == NULL) { return false; - } else if (_code_blob->is_nmethod()) { + } else if (Filter::apply(_code_blob)) { return true; } } // Search for next method CodeBlob - _code_blob = CodeCache::next_blob(_code_blob); - while (_code_blob != NULL && !_code_blob->is_nmethod()) { - _code_blob = CodeCache::next_blob(_code_blob); + _code_blob = CodeCache::next_blob(heap, _code_blob); + while (_code_blob != NULL && !Filter::apply(_code_blob)) { + _code_blob = CodeCache::next_blob(heap, _code_blob); } return _code_blob != NULL; } }; -// Iterator to iterate over compiled methods in the CodeCache. -class CompiledMethodIterator : public StackObj { - private: - CodeBlob* _code_blob; // Current CodeBlob - int _code_blob_type; // Refers to current CodeHeap - - public: - CompiledMethodIterator() { - initialize(NULL); // Set to NULL, initialized by first call to next() - } - - CompiledMethodIterator(CompiledMethod* cm) { - initialize(cm); - } - // Advance iterator to next compiled method - bool next() { - assert_locked_or_safepoint(CodeCache_lock); - assert(_code_blob_type < CodeBlobType::NumTypes, "end reached"); +struct CompiledMethodFilter { + static bool apply(CodeBlob* cb) { return cb->is_compiled(); } + static const GrowableArray* heaps() { return CodeCache::compiled_heaps(); } +}; - bool result = next_compiled_method(); - while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) { - // Advance to next code heap if segmented code cache - _code_blob_type++; - result = next_compiled_method(); - } - return result; - } - // Advance iterator to next alive compiled method - bool next_alive() { - bool result = next(); - while(result && !_code_blob->is_alive()) { - result = next(); - } - return result; - } +struct NMethodFilter { + static bool apply(CodeBlob* cb) { return cb->is_nmethod(); } + static const GrowableArray* heaps() { return CodeCache::nmethod_heaps(); } +}; - bool end() const { return _code_blob == NULL; } - CompiledMethod* method() const { return (_code_blob != NULL) ? _code_blob->as_compiled_method() : NULL; } -private: - // Initialize iterator to given compiled method - void initialize(CompiledMethod* cm); - - // Advance iterator to the next compiled method in the current code heap - bool next_compiled_method(); -}; +typedef CodeBlobIterator CompiledMethodIterator; +typedef CodeBlobIterator NMethodIterator; #endif // SHARE_VM_CODE_CODECACHE_HPP --- old/src/share/vm/code/compiledIC.cpp 2016-10-31 17:46:37.000000000 -0700 +++ new/src/share/vm/code/compiledIC.cpp 2016-10-31 17:46:36.000000000 -0700 @@ -55,7 +55,7 @@ assert (!is_optimized(), "an optimized virtual call does not have a cached metadata"); if (!is_in_transition_state()) { - void* data = (void*)_value->data(); + void* data = get_data(); // If we let the metadata value here be initialized to zero... assert(data != NULL || Universe::non_oop_word() == NULL, "no raw nulls in CompiledIC metadatas, because of patching races"); @@ -77,13 +77,13 @@ // Don't use ic_destination for this test since that forwards // through ICBuffer instead of returning the actual current state of // the CompiledIC. - if (is_icholder_entry(_ic_call->destination())) { + if (is_icholder_entry(_call->destination())) { // When patching for the ICStub case the cached value isn't // overwritten until the ICStub copied into the CompiledIC during // the next safepoint. Make sure that the CompiledICHolder* is // marked for release at this point since it won't be identifiable // once the entry point is overwritten. - InlineCacheBuffer::queue_for_release((CompiledICHolder*)_value->data()); + InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data()); } if (TraceCompiledIC) { @@ -102,10 +102,10 @@ { MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); #ifdef ASSERT - CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call); + CodeBlob* cb = CodeCache::find_blob_unsafe(_call->instruction_address()); assert(cb != NULL && cb->is_compiled(), "must be compiled"); #endif - _ic_call->set_destination_mt_safe(entry_point); + _call->set_destination_mt_safe(entry_point); } if (is_optimized() || is_icstub) { @@ -118,7 +118,7 @@ if (cache == NULL) cache = (void*)Universe::non_oop_word(); - _value->set_data((intptr_t)cache); + set_data((intptr_t)cache); } @@ -131,7 +131,7 @@ address CompiledIC::ic_destination() const { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); if (!is_in_transition_state()) { - return _ic_call->destination(); + return _call->destination(); } else { return InlineCacheBuffer::ic_destination_for((CompiledIC *)this); } @@ -140,7 +140,7 @@ bool CompiledIC::is_in_transition_state() const { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); - return InlineCacheBuffer::contains(_ic_call->destination()); + return InlineCacheBuffer::contains(_call->destination());; } @@ -153,7 +153,7 @@ // the InlineCacheBuffer when it needs to find the stub. address CompiledIC::stub_address() const { assert(is_in_transition_state(), "should only be called when we are in a transition state"); - return _ic_call->destination(); + return _call->destination(); } // Clears the IC stub if the compiled IC is in transition state @@ -164,17 +164,16 @@ } } - //----------------------------------------------------------------------------- // High-level access to an inline cache. Guaranteed to be MT-safe. void CompiledIC::initialize_from_iter(RelocIterator* iter) { - assert(iter->addr() == _ic_call->instruction_address(), "must find ic_call"); + assert(iter->addr() == _call->instruction_address(), "must find ic_call"); if (iter->type() == relocInfo::virtual_call_type) { virtual_call_Relocation* r = iter->virtual_call_reloc(); _is_optimized = false; - _value = nativeMovConstReg_at(r->cached_value()); + _value = _call->get_load_instruction(r); } else { assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call"); _is_optimized = true; @@ -183,9 +182,10 @@ } CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call) - : _ic_call(call) + : _method(cm) { - address ic_call = _ic_call->instruction_address(); + _call = _method->call_wrapper_at((address) call); + address ic_call = _call->instruction_address(); assert(ic_call != NULL, "ic_call address must be set"); assert(cm != NULL, "must pass compiled method"); @@ -201,9 +201,10 @@ } CompiledIC::CompiledIC(RelocIterator* iter) - : _ic_call(nativeCall_at(iter->addr())) + : _method(iter->code()) { - address ic_call = _ic_call->instruction_address(); + _call = _method->call_wrapper_at(iter->addr()); + address ic_call = _call->instruction_address(); CompiledMethod* nm = iter->code(); assert(ic_call != NULL, "ic_call address must be set"); @@ -311,20 +312,17 @@ assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != NULL), "sanity check"); } else { // Check if we are calling into our own codeblob (i.e., to a stub) - CodeBlob* cb = CodeCache::find_blob(_ic_call->instruction_address()); address dest = ic_destination(); #ifdef ASSERT { - CodeBlob* db = CodeCache::find_blob_unsafe(dest); - assert(!db->is_adapter_blob(), "must use stub!"); + _call->verify_resolve_call(dest); } #endif /* ASSERT */ - is_call_to_interpreted = cb->contains(dest); + is_call_to_interpreted = _call->is_call_to_interpreted(dest); } return is_call_to_interpreted; } - void CompiledIC::set_to_clean(bool in_use) { assert(SafepointSynchronize::is_at_safepoint() || CompiledIC_lock->is_locked() , "MT-unsafe call"); if (TraceInlineCacheClearing || TraceICs) { @@ -332,16 +330,11 @@ print(); } - address entry; - if (is_optimized()) { - entry = SharedRuntime::get_resolve_opt_virtual_call_stub(); - } else { - entry = SharedRuntime::get_resolve_virtual_call_stub(); - } + address entry = _call->get_resolve_call_stub(is_optimized()); // A zombie transition will always be safe, since the metadata has already been set to NULL, so // we only need to patch the destination - bool safe_transition = !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); + bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint(); if (safe_transition) { // Kill any leftover stub we might have too @@ -364,18 +357,15 @@ // assert(is_clean(), "sanity check"); } - bool CompiledIC::is_clean() const { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); bool is_clean = false; address dest = ic_destination(); - is_clean = dest == SharedRuntime::get_resolve_opt_virtual_call_stub() || - dest == SharedRuntime::get_resolve_virtual_call_stub(); + is_clean = dest == _call->get_resolve_call_stub(is_optimized()); assert(!is_clean || is_optimized() || cached_value() == NULL, "sanity check"); return is_clean; } - void CompiledIC::set_to_monomorphic(CompiledICInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), ""); // Updating a cache to the wrong entry can cause bugs that are very hard @@ -391,7 +381,7 @@ // transitions are mt_safe Thread *thread = Thread::current(); - if (info.to_interpreter()) { + if (info.to_interpreter() || info.to_aot()) { // Call to interpreter if (info.is_optimized() && is_optimized()) { assert(is_clean(), "unsafe IC path"); @@ -401,13 +391,14 @@ // At code generation time, this call has been emitted as static call // Call via stub assert(info.cached_metadata() != NULL && info.cached_metadata()->is_method(), "sanity check"); - CompiledStaticCall* csc = compiledStaticCall_at(instruction_address()); methodHandle method (thread, (Method*)info.cached_metadata()); - csc->set_to_interpreted(method, info.entry()); + _call->set_to_interpreted(method, info); + if (TraceICs) { ResourceMark rm(thread); - tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s", + tty->print_cr ("IC@" INTPTR_FORMAT ": monomorphic to %s: %s", p2i(instruction_address()), + (info.to_aot() ? "aot" : "interpreter"), method->print_value_string()); } } else { @@ -467,6 +458,7 @@ KlassHandle receiver_klass, bool is_optimized, bool static_bound, + bool caller_is_nmethod, CompiledICInfo& info, TRAPS) { CompiledMethod* method_code = method->code(); @@ -481,8 +473,9 @@ entry = method_code->entry_point(); } } - if (entry != NULL) { - // Call to compiled code + bool far_c2a = entry != NULL && caller_is_nmethod && method_code->is_far_code(); + if (entry != NULL && !far_c2a) { + // Call to near compiled code (nmethod or aot). info.set_compiled_entry(entry, (static_bound || is_optimized) ? NULL : receiver_klass(), is_optimized); } else { // Note: the following problem exists with Compiler1: @@ -518,8 +511,13 @@ #endif // TIERED #endif // COMPILER2 if (is_optimized) { - // Use stub entry - info.set_interpreter_entry(method()->get_c2i_entry(), method()); + if (far_c2a) { + // Call to aot code from nmethod. + info.set_aot_entry(entry, method()); + } else { + // Use stub entry + info.set_interpreter_entry(method()->get_c2i_entry(), method()); + } } else { // Use icholder entry assert(method_code == NULL || method_code->is_compiled(), "must be compiled"); @@ -536,8 +534,15 @@ return (cb != NULL && cb->is_adapter_blob()); } +bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { + // This call site might have become stale so inspect it carefully. + address dest = cm->call_wrapper_at(call_site->addr())->destination(); + return is_icholder_entry(dest); +} + // Release the CompiledICHolder* associated with this call site is there is one. -void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) { +void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) { + assert(cm->is_nmethod(), "must be nmethod"); // This call site might have become stale so inspect it carefully. NativeCall* call = nativeCall_at(call_site->addr()); if (is_icholder_entry(call->destination())) { @@ -546,12 +551,6 @@ } } -bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) { - // This call site might have become stale so inspect it carefully. - NativeCall* call = nativeCall_at(call_site->addr()); - return is_icholder_entry(call->destination()); -} - // ---------------------------------------------------------------------------- void CompiledStaticCall::set_to_clean() { @@ -559,33 +558,52 @@ // Reset call site MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag); #ifdef ASSERT - CodeBlob* cb = CodeCache::find_blob_unsafe(this); + CodeBlob* cb = CodeCache::find_blob_unsafe(instruction_address()); assert(cb != NULL && cb->is_compiled(), "must be compiled"); #endif - set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub()); + + set_destination_mt_safe(resolve_call_stub()); // Do not reset stub here: It is too expensive to call find_stub. // Instead, rely on caller (nmethod::clear_inline_caches) to clear // both the call and its stub. } - bool CompiledStaticCall::is_clean() const { - return destination() == SharedRuntime::get_resolve_static_call_stub(); + return destination() == resolve_call_stub(); } bool CompiledStaticCall::is_call_to_compiled() const { return CodeCache::contains(destination()); } - -bool CompiledStaticCall::is_call_to_interpreted() const { +bool CompiledDirectStaticCall::is_call_to_interpreted() const { // It is a call to interpreted, if it calls to a stub. Hence, the destination // must be in the stub part of the nmethod that contains the call CompiledMethod* cm = CodeCache::find_compiled(instruction_address()); return cm->stub_contains(destination()); } +bool CompiledDirectStaticCall::is_call_to_far() const { + // It is a call to aot method, if it calls to a stub. Hence, the destination + // must be in the stub part of the nmethod that contains the call + CodeBlob* desc = CodeCache::find_blob(instruction_address()); + return desc->as_compiled_method()->stub_contains(destination()); +} + +void CompiledStaticCall::set_to_compiled(address entry) { + if (TraceICs) { + ResourceMark rm; + tty->print_cr("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, + name(), + p2i(instruction_address()), + p2i(entry)); + } + // Call to compiled code + assert(CodeCache::contains(entry), "wrong entry point"); + set_destination_mt_safe(entry); +} + void CompiledStaticCall::set(const StaticCallInfo& info) { assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); @@ -598,26 +616,28 @@ if (info._to_interpreter) { // Call to interpreted code set_to_interpreted(info.callee(), info.entry()); +#if INCLUDE_AOT + } else if (info._to_aot) { + // Call to far code + set_to_far(info.callee(), info.entry()); +#endif } else { - if (TraceICs) { - ResourceMark rm; - tty->print_cr("CompiledStaticCall@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT, - p2i(instruction_address()), - p2i(info.entry())); - } - // Call to compiled code - assert (CodeCache::contains(info.entry()), "wrong entry point"); - set_destination_mt_safe(info.entry()); + set_to_compiled(info.entry()); } } - // Compute settings for a CompiledStaticCall. Since we might have to set // the stub when calling to the interpreter, we need to return arguments. -void CompiledStaticCall::compute_entry(const methodHandle& m, StaticCallInfo& info) { +void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) { CompiledMethod* m_code = m->code(); info._callee = m; if (m_code != NULL && m_code->is_in_use()) { + if (caller_is_nmethod && m_code->is_far_code()) { + // Call to far aot code from nmethod. + info._to_aot = true; + } else { + info._to_aot = false; + } info._to_interpreter = false; info._entry = m_code->verified_entry_point(); } else { @@ -629,18 +649,18 @@ } } -address CompiledStaticCall::find_stub() { +address CompiledDirectStaticCall::find_stub_for(address instruction, bool is_aot) { // Find reloc. information containing this call-site - RelocIterator iter((nmethod*)NULL, instruction_address()); + RelocIterator iter((nmethod*)NULL, instruction); while (iter.next()) { - if (iter.addr() == instruction_address()) { + if (iter.addr() == instruction) { switch(iter.type()) { case relocInfo::static_call_type: - return iter.static_call_reloc()->static_stub(); + return iter.static_call_reloc()->static_stub(is_aot); // We check here for opt_virtual_call_type, since we reuse the code // from the CompiledIC implementation case relocInfo::opt_virtual_call_type: - return iter.opt_virtual_call_reloc()->static_stub(); + return iter.opt_virtual_call_reloc()->static_stub(is_aot); case relocInfo::poll_type: case relocInfo::poll_return_type: // A safepoint can't overlap a call. default: @@ -651,17 +671,20 @@ return NULL; } +address CompiledDirectStaticCall::find_stub(bool is_aot) { + return CompiledDirectStaticCall::find_stub_for(instruction_address(), is_aot); +} + +address CompiledDirectStaticCall::resolve_call_stub() const { + return SharedRuntime::get_resolve_static_call_stub(); +} //----------------------------------------------------------------------------- // Non-product mode code #ifndef PRODUCT void CompiledIC::verify() { - // make sure code pattern is actually a call imm32 instruction - _ic_call->verify(); - if (os::is_MP()) { - _ic_call->verify_alignment(); - } + _call->verify(); assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted() || is_optimized() || is_megamorphic(), "sanity check"); } @@ -676,12 +699,14 @@ p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? NULL : cached_value())); } -void CompiledStaticCall::print() { +void CompiledDirectStaticCall::print() { tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address())); if (is_clean()) { tty->print("clean"); } else if (is_call_to_compiled()) { tty->print("compiled"); + } else if (is_call_to_far()) { + tty->print("far"); } else if (is_call_to_interpreted()) { tty->print("interpreted"); } --- old/src/share/vm/code/compiledIC.hpp 2016-10-31 17:46:37.000000000 -0700 +++ new/src/share/vm/code/compiledIC.hpp 2016-10-31 17:46:37.000000000 -0700 @@ -68,6 +68,7 @@ bool _is_icholder; // Is the cached value a CompiledICHolder* bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound) bool _to_interpreter; // Call it to interpreter + bool _to_aot; // Call it to aot code bool _release_icholder; public: address entry() const { return _entry; } @@ -81,12 +82,14 @@ return icholder; } bool is_optimized() const { return _is_optimized; } - bool to_interpreter() const { return _to_interpreter; } + bool to_interpreter() const { return _to_interpreter; } + bool to_aot() const { return _to_aot; } void set_compiled_entry(address entry, Klass* klass, bool is_optimized) { _entry = entry; _cached_value = (void*)klass; _to_interpreter = false; + _to_aot = false; _is_icholder = false; _is_optimized = is_optimized; _release_icholder = false; @@ -96,6 +99,17 @@ _entry = entry; _cached_value = (void*)method; _to_interpreter = true; + _to_aot = false; + _is_icholder = false; + _is_optimized = true; + _release_icholder = false; + } + + void set_aot_entry(address entry, Method* method) { + _entry = entry; + _cached_value = (void*)method; + _to_interpreter = false; + _to_aot = true; _is_icholder = false; _is_optimized = true; _release_icholder = false; @@ -105,13 +119,14 @@ _entry = entry; _cached_value = (void*)icholder; _to_interpreter = true; + _to_aot = false; _is_icholder = true; _is_optimized = false; _release_icholder = true; } CompiledICInfo(): _entry(NULL), _cached_value(NULL), _is_icholder(false), - _to_interpreter(false), _is_optimized(false), _release_icholder(false) { + _to_interpreter(false), _to_aot(false), _is_optimized(false), _release_icholder(false) { } ~CompiledICInfo() { // In rare cases the info is computed but not used, so release any @@ -125,15 +140,36 @@ } }; +class NativeCallWrapper: public ResourceObj { +public: + virtual address destination() const = 0; + virtual address instruction_address() const = 0; + virtual address next_instruction_address() const = 0; + virtual address return_address() const = 0; + virtual address get_resolve_call_stub(bool is_optimized) const = 0; + virtual void set_destination_mt_safe(address dest) = 0; + virtual void set_to_interpreted(methodHandle method, CompiledICInfo& info) = 0; + virtual void verify() const = 0; + virtual void verify_resolve_call(address dest) const = 0; + + virtual bool is_call_to_interpreted(address dest) const = 0; + virtual bool is_safe_for_patching() const = 0; + + virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0; + + virtual void *get_data(NativeInstruction* instruction) const = 0; + virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0; +}; + class CompiledIC: public ResourceObj { friend class InlineCacheBuffer; friend class ICStub; - private: - NativeCall* _ic_call; // the call instruction - NativeMovConstReg* _value; // patchable value cell for this IC + NativeCallWrapper* _call; + NativeInstruction* _value; // patchable value cell for this IC bool _is_optimized; // an optimized virtual call (i.e., no compiled IC) + CompiledMethod* _method; CompiledIC(CompiledMethod* cm, NativeCall* ic_call); CompiledIC(RelocIterator* iter); @@ -177,8 +213,8 @@ // This is used to release CompiledICHolder*s from nmethods that // are about to be freed. The callsite might contain other stale // values of other kinds so it must be careful. - static void cleanup_call_site(virtual_call_Relocation* call_site); - static bool is_icholder_call_site(virtual_call_Relocation* call_site); + static void cleanup_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm); + static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm); // Return the cached_metadata/destination associated with this inline cache. If the cache currently points // to a transition stub, it will read the values from the transition stub. @@ -192,6 +228,14 @@ return (Metadata*) cached_value(); } + void* get_data() const { + return _call->get_data(_value); + } + + void set_data(intptr_t data) { + _call->set_data(_value, data); + } + address ic_destination() const; bool is_optimized() const { return _is_optimized; } @@ -204,7 +248,7 @@ bool is_icholder_call() const; - address end_of_call() { return _ic_call->return_address(); } + address end_of_call() { return _call->return_address(); } // MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock // so you are guaranteed that no patching takes place. The same goes for verify. @@ -223,10 +267,11 @@ bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, TRAPS); static void compute_monomorphic_entry(const methodHandle& method, KlassHandle receiver_klass, - bool is_optimized, bool static_bound, CompiledICInfo& info, TRAPS); + bool is_optimized, bool static_bound, bool caller_is_nmethod, + CompiledICInfo& info, TRAPS); // Location - address instruction_address() const { return _ic_call->instruction_address(); } + address instruction_address() const { return _call->instruction_address(); } // Misc void print() PRODUCT_RETURN; @@ -278,42 +323,38 @@ // Interpreted code: Calls to stub that set Method* reference // // -class CompiledStaticCall; class StaticCallInfo { private: address _entry; // Entrypoint methodHandle _callee; // Callee (used when calling interpreter) bool _to_interpreter; // call to interpreted method (otherwise compiled) + bool _to_aot; // call to aot method (otherwise compiled) friend class CompiledStaticCall; + friend class CompiledDirectStaticCall; + friend class CompiledPltStaticCall; public: address entry() const { return _entry; } methodHandle callee() const { return _callee; } }; - -class CompiledStaticCall: public NativeCall { - friend class CompiledIC; - - // Also used by CompiledIC - void set_to_interpreted(methodHandle callee, address entry); - bool is_optimized_virtual(); - +class CompiledStaticCall : public ResourceObj { public: - friend CompiledStaticCall* compiledStaticCall_before(address return_addr); - friend CompiledStaticCall* compiledStaticCall_at(address native_call); - friend CompiledStaticCall* compiledStaticCall_at(Relocation* call_site); - // Code static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = NULL); static int to_interp_stub_size(); static int reloc_to_interp_stub(); + static void emit_to_aot_stub(CodeBuffer &cbuf, address mark = NULL); + static int to_aot_stub_size(); + static int reloc_to_aot_stub(); - // State - bool is_clean() const; - bool is_call_to_compiled() const; - bool is_call_to_interpreted() const; + // Compute entry point given a method + static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info); + +public: + // Clean static call (will force resolving on next use) + virtual address destination() const = 0; // Clean static call (will force resolving on next use) void set_to_clean(); @@ -323,33 +364,77 @@ // a OptoRuntime::resolve_xxx. void set(const StaticCallInfo& info); - // Compute entry point given a method - static void compute_entry(const methodHandle& m, StaticCallInfo& info); + // State + bool is_clean() const; + bool is_call_to_compiled() const; + virtual bool is_call_to_interpreted() const = 0; + + virtual address instruction_address() const = 0; +protected: + virtual address resolve_call_stub() const = 0; + virtual void set_destination_mt_safe(address dest) = 0; +#if INCLUDE_AOT + virtual void set_to_far(methodHandle callee, address entry) = 0; +#endif + virtual void set_to_interpreted(methodHandle callee, address entry) = 0; + virtual const char* name() const = 0; + + void set_to_compiled(address entry); +}; + +class CompiledDirectStaticCall : public CompiledStaticCall { +private: + friend class CompiledIC; + friend class DirectNativeCallWrapper; + + // Also used by CompiledIC + void set_to_interpreted(methodHandle callee, address entry); +#if INCLUDE_AOT + void set_to_far(methodHandle callee, address entry); +#endif + address instruction_address() const { return _call->instruction_address(); } + void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); } + + NativeCall* _call; + + CompiledDirectStaticCall(NativeCall* call) : _call(call) {} + + public: + static inline CompiledDirectStaticCall* before(address return_addr) { + CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr)); + st->verify(); + return st; + } + + static inline CompiledDirectStaticCall* at(address native_call) { + CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call)); + st->verify(); + return st; + } + + static inline CompiledDirectStaticCall* at(Relocation* call_site) { + return at(call_site->addr()); + } + + // Delegation + address destination() const { return _call->destination(); } + + // State + virtual bool is_call_to_interpreted() const; + bool is_call_to_far() const; // Stub support - address find_stub(); + static address find_stub_for(address instruction, bool is_aot); + address find_stub(bool is_aot); static void set_stub_to_clean(static_stub_Relocation* static_stub); // Misc. void print() PRODUCT_RETURN; void verify() PRODUCT_RETURN; -}; - - -inline CompiledStaticCall* compiledStaticCall_before(address return_addr) { - CompiledStaticCall* st = (CompiledStaticCall*)nativeCall_before(return_addr); - st->verify(); - return st; -} -inline CompiledStaticCall* compiledStaticCall_at(address native_call) { - CompiledStaticCall* st = (CompiledStaticCall*)native_call; - st->verify(); - return st; -} - -inline CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) { - return compiledStaticCall_at(call_site->addr()); -} + protected: + virtual address resolve_call_stub() const; + virtual const char* name() const { return "CompiledDirectStaticCall"; } +}; #endif // SHARE_VM_CODE_COMPILEDIC_HPP --- old/src/share/vm/code/compiledMethod.cpp 2016-10-31 17:46:38.000000000 -0700 +++ new/src/share/vm/code/compiledMethod.cpp 2016-10-31 17:46:38.000000000 -0700 @@ -274,7 +274,7 @@ RelocIterator iter(this); while(iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { - if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) { + if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc(), this)) { CompiledIC *ic = CompiledIC_at(&iter); if (TraceCompiledIC) { tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder())); @@ -410,6 +410,7 @@ BoolObjectClosure* CheckClass::_is_alive = NULL; #endif // ASSERT + void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) { if (ic->is_icholder_call()) { // The only exception is compiledICHolder oops which may --- old/src/share/vm/code/compiledMethod.hpp 2016-10-31 17:46:38.000000000 -0700 +++ new/src/share/vm/code/compiledMethod.hpp 2016-10-31 17:46:38.000000000 -0700 @@ -35,6 +35,7 @@ class AbstractCompiler; class xmlStream; class CompiledStaticCall; +class NativeCallWrapper; // This class is used internally by nmethods, to cache // exception/pc/handler information. @@ -334,6 +335,14 @@ // corresponds to the given method as well. virtual bool is_dependent_on_method(Method* dependee) = 0; + virtual NativeCallWrapper* call_wrapper_at(address call) const = 0; + virtual NativeCallWrapper* call_wrapper_before(address return_pc) const = 0; + virtual address call_instruction_address(address pc) const = 0; + + virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const = 0; + virtual CompiledStaticCall* compiledStaticCall_at(address addr) const = 0; + virtual CompiledStaticCall* compiledStaticCall_before(address addr) const = 0; + Method* attached_method(address call_pc); Method* attached_method_before_pc(address pc); --- old/src/share/vm/code/nmethod.cpp 2016-10-31 17:46:39.000000000 -0700 +++ new/src/share/vm/code/nmethod.cpp 2016-10-31 17:46:39.000000000 -0700 @@ -400,6 +400,7 @@ _lock_count = 0; _stack_traversal_mark = 0; _unload_reported = false; // jvmti state + _is_far_code = false; // nmethods are located in CodeCache #ifdef ASSERT _oops_are_stale = false; @@ -2054,6 +2055,7 @@ // should pass zombie_ok == true. void nmethodLocker::lock_nmethod(CompiledMethod* cm, bool zombie_ok) { if (cm == NULL) return; + if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method nmethod* nm = cm->as_nmethod(); Atomic::inc(&nm->_lock_count); assert(zombie_ok || !nm->is_zombie(), "cannot lock a zombie method"); @@ -2061,6 +2063,7 @@ void nmethodLocker::unlock_nmethod(CompiledMethod* cm) { if (cm == NULL) return; + if (cm->is_aot()) return; // FIXME: Revisit once _lock_count is added to aot_method nmethod* nm = cm->as_nmethod(); Atomic::dec(&nm->_lock_count); assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); @@ -2170,11 +2173,11 @@ verify_interrupt_point(iter.addr()); break; case relocInfo::opt_virtual_call_type: - stub = iter.opt_virtual_call_reloc()->static_stub(); + stub = iter.opt_virtual_call_reloc()->static_stub(false); verify_interrupt_point(iter.addr()); break; case relocInfo::static_call_type: - stub = iter.static_call_reloc()->static_stub(); + stub = iter.static_call_reloc()->static_stub(false); //verify_interrupt_point(iter.addr()); break; case relocInfo::runtime_call_type: @@ -2724,6 +2727,114 @@ } +class DirectNativeCallWrapper: public NativeCallWrapper { +private: + NativeCall* _call; + +public: + DirectNativeCallWrapper(NativeCall* call) : _call(call) {} + + virtual address destination() const { return _call->destination(); } + virtual address instruction_address() const { return _call->instruction_address(); } + virtual address next_instruction_address() const { return _call->next_instruction_address(); } + virtual address return_address() const { return _call->return_address(); } + + virtual address get_resolve_call_stub(bool is_optimized) const { + if (is_optimized) { + return SharedRuntime::get_resolve_opt_virtual_call_stub(); + } + return SharedRuntime::get_resolve_virtual_call_stub(); + } + + virtual void set_destination_mt_safe(address dest) { +#if INCLUDE_AOT + if (UseAOT) { + CodeBlob* callee = CodeCache::find_blob(dest); + CompiledMethod* cm = callee->as_compiled_method_or_null(); + if (cm != NULL && cm->is_far_code()) { + // Temporary fix, see JDK-8143106 + CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address()); + csc->set_to_far(methodHandle(cm->method()), dest); + return; + } + } +#endif + _call->set_destination_mt_safe(dest); + } + + virtual void set_to_interpreted(methodHandle method, CompiledICInfo& info) { + CompiledDirectStaticCall* csc = CompiledDirectStaticCall::at(instruction_address()); +#if INCLUDE_AOT + if (info.to_aot()) { + csc->set_to_far(method, info.entry()); + } else +#endif + { + csc->set_to_interpreted(method, info.entry()); + } + } + + virtual void verify() const { + // make sure code pattern is actually a call imm32 instruction + _call->verify(); + if (os::is_MP()) { + _call->verify_alignment(); + } + } + + virtual void verify_resolve_call(address dest) const { + CodeBlob* db = CodeCache::find_blob_unsafe(dest); + assert(!db->is_adapter_blob(), "must use stub!"); + } + + virtual bool is_call_to_interpreted(address dest) const { + CodeBlob* cb = CodeCache::find_blob(_call->instruction_address()); + return cb->contains(dest); + } + + virtual bool is_safe_for_patching() const { return false; } + + virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const { + return nativeMovConstReg_at(r->cached_value()); + } + + virtual void *get_data(NativeInstruction* instruction) const { + return (void*)((NativeMovConstReg*) instruction)->data(); + } + + virtual void set_data(NativeInstruction* instruction, intptr_t data) { + ((NativeMovConstReg*) instruction)->set_data(data); + } +}; + +NativeCallWrapper* nmethod::call_wrapper_at(address call) const { + return new DirectNativeCallWrapper((NativeCall*) call); +} + +NativeCallWrapper* nmethod::call_wrapper_before(address return_pc) const { + return new DirectNativeCallWrapper(nativeCall_before(return_pc)); +} + +address nmethod::call_instruction_address(address pc) const { + if (NativeCall::is_call_before(pc)) { + NativeCall *ncall = nativeCall_before(pc); + return ncall->instruction_address(); + } + return NULL; +} + +CompiledStaticCall* nmethod::compiledStaticCall_at(Relocation* call_site) const { + return CompiledDirectStaticCall::at(call_site); +} + +CompiledStaticCall* nmethod::compiledStaticCall_at(address call_site) const { + return CompiledDirectStaticCall::at(call_site); +} + +CompiledStaticCall* nmethod::compiledStaticCall_before(address return_addr) const { + return CompiledDirectStaticCall::before(return_addr); +} + #ifndef PRODUCT void nmethod::print_value_on(outputStream* st) const { @@ -2743,7 +2854,7 @@ } case relocInfo::static_call_type: st->print_cr("Static call at " INTPTR_FORMAT, p2i(iter.reloc()->addr())); - compiledStaticCall_at(iter.reloc())->print(); + CompiledDirectStaticCall::at(iter.reloc())->print(); break; } } --- old/src/share/vm/code/nmethod.hpp 2016-10-31 17:46:40.000000000 -0700 +++ new/src/share/vm/code/nmethod.hpp 2016-10-31 17:46:40.000000000 -0700 @@ -302,13 +302,6 @@ address entry_point() const { return _entry_point; } // normal entry point address verified_entry_point() const { return _verified_entry_point; } // if klass is correct - enum { in_use = 0, // executable nmethod - not_entrant = 1, // marked for deoptimization but activations may still exist, - // will be transformed to zombie when all activations are gone - zombie = 2, // no activations exist, nmethod is ready for purge - unloaded = 3 }; // there should be no activations, should not be called, - // will be transformed to zombie immediately - // flag accessing and manipulation bool is_in_use() const { return _state == in_use; } bool is_alive() const { unsigned char s = _state; return s < zombie; } @@ -583,6 +576,14 @@ static int state_offset() { return offset_of(nmethod, _state); } virtual void metadata_do(void f(Metadata*)); + + NativeCallWrapper* call_wrapper_at(address call) const; + NativeCallWrapper* call_wrapper_before(address return_pc) const; + address call_instruction_address(address pc) const; + + virtual CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; + virtual CompiledStaticCall* compiledStaticCall_at(address addr) const; + virtual CompiledStaticCall* compiledStaticCall_before(address addr) const; }; // Locks an nmethod so its code will not get removed and it will not --- old/src/share/vm/code/relocInfo.cpp 2016-10-31 17:46:40.000000000 -0700 +++ new/src/share/vm/code/relocInfo.cpp 2016-10-31 17:46:40.000000000 -0700 @@ -565,14 +565,18 @@ short* p = (short*) dest->locs_end(); CodeSection* insts = dest->outer()->insts(); normalize_address(_static_call, insts); - p = pack_1_int_to(p, scaled_offset(_static_call, insts->start())); + jint is_aot = _is_aot ? 1 : 0; + p = pack_2_ints_to(p, scaled_offset(_static_call, insts->start()), is_aot); dest->set_locs_end((relocInfo*) p); } void static_stub_Relocation::unpack_data() { address base = binding()->section_start(CodeBuffer::SECT_INSTS); - jint offset = unpack_1_int(); + jint offset; + jint is_aot; + unpack_2_ints(offset, is_aot); _static_call = address_from_scaled_offset(offset, base); + _is_aot = (is_aot == 1); } void trampoline_stub_Relocation::pack_data_to(CodeSection* dest ) { @@ -796,14 +800,14 @@ } -address opt_virtual_call_Relocation::static_stub() { +address opt_virtual_call_Relocation::static_stub(bool is_aot) { // search for the static stub who points back to this static call address static_call_addr = addr(); RelocIterator iter(code()); while (iter.next()) { if (iter.type() == relocInfo::static_stub_type) { static_stub_Relocation* stub_reloc = iter.static_stub_reloc(); - if (stub_reloc->static_call() == static_call_addr) { + if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) { return iter.addr(); } } @@ -832,19 +836,19 @@ void static_call_Relocation::clear_inline_cache() { // Safe call site info - CompiledStaticCall* handler = compiledStaticCall_at(this); + CompiledStaticCall* handler = this->code()->compiledStaticCall_at(this); handler->set_to_clean(); } -address static_call_Relocation::static_stub() { +address static_call_Relocation::static_stub(bool is_aot) { // search for the static stub who points back to this static call address static_call_addr = addr(); RelocIterator iter(code()); while (iter.next()) { if (iter.type() == relocInfo::static_stub_type) { static_stub_Relocation* stub_reloc = iter.static_stub_reloc(); - if (stub_reloc->static_call() == static_call_addr) { + if (stub_reloc->static_call() == static_call_addr && stub_reloc->is_aot() == is_aot) { return iter.addr(); } } @@ -875,7 +879,7 @@ void static_stub_Relocation::clear_inline_cache() { // Call stub is only used when calling the interpreted code. // It does not really need to be cleared, except that we want to clean out the methodoop. - CompiledStaticCall::set_stub_to_clean(this); + CompiledDirectStaticCall::set_stub_to_clean(this); } --- old/src/share/vm/code/relocInfo.hpp 2016-10-31 17:46:41.000000000 -0700 +++ new/src/share/vm/code/relocInfo.hpp 2016-10-31 17:46:41.000000000 -0700 @@ -1090,7 +1090,7 @@ void clear_inline_cache(); // find the matching static_stub - address static_stub(); + address static_stub(bool is_aot); }; @@ -1124,24 +1124,26 @@ void clear_inline_cache(); // find the matching static_stub - address static_stub(); + address static_stub(bool is_aot); }; class static_stub_Relocation : public Relocation { relocInfo::relocType type() { return relocInfo::static_stub_type; } public: - static RelocationHolder spec(address static_call) { + static RelocationHolder spec(address static_call, bool is_aot = false) { RelocationHolder rh = newHolder(); - new(rh) static_stub_Relocation(static_call); + new(rh) static_stub_Relocation(static_call, is_aot); return rh; } private: address _static_call; // location of corresponding static_call + bool _is_aot; // trampoline to aot code - static_stub_Relocation(address static_call) { + static_stub_Relocation(address static_call, bool is_aot) { _static_call = static_call; + _is_aot = is_aot; } friend class RelocIterator; @@ -1151,6 +1153,7 @@ void clear_inline_cache(); address static_call() { return _static_call; } + bool is_aot() { return _is_aot; } // data is packed as a scaled offset in "1_int" format: [c] or [Cc] void pack_data_to(CodeSection* dest); --- old/src/share/vm/compiler/compileTask.cpp 2016-10-31 17:46:42.000000000 -0700 +++ new/src/share/vm/compiler/compileTask.cpp 2016-10-31 17:46:42.000000000 -0700 @@ -292,8 +292,7 @@ if (_osr_bci != CompileBroker::standard_entry_bci) { log->print(" osr_bci='%d'", _osr_bci); } - // Always print the level in tiered. - if (_comp_level != CompLevel_highest_tier || TieredCompilation) { + if (_comp_level != CompLevel_highest_tier) { log->print(" level='%d'", _comp_level); } if (_is_blocking) { @@ -329,24 +328,6 @@ // ------------------------------------------------------------------ -// CompileTask::log_task_dequeued -void CompileTask::log_task_dequeued(const char* comment) { - if (LogCompilation && xtty != NULL) { - Thread* thread = Thread::current(); - ttyLocker ttyl; - ResourceMark rm(thread); - - xtty->begin_elem("task_dequeued"); - log_task(xtty); - if (comment != NULL) { - xtty->print(" comment='%s'", comment); - } - xtty->end_elem(); - } -} - - -// ------------------------------------------------------------------ // CompileTask::log_task_start void CompileTask::log_task_start(CompileLog* log) { log->begin_head("task"); --- old/src/share/vm/compiler/compileTask.hpp 2016-10-31 17:46:42.000000000 -0700 +++ new/src/share/vm/compiler/compileTask.hpp 2016-10-31 17:46:42.000000000 -0700 @@ -193,7 +193,6 @@ void log_task(xmlStream* log); void log_task_queued(); - void log_task_dequeued(const char* comment); void log_task_start(CompileLog* log); void log_task_done(CompileLog* log); --- old/src/share/vm/compiler/compilerDefinitions.hpp 2016-10-31 17:46:43.000000000 -0700 +++ new/src/share/vm/compiler/compilerDefinitions.hpp 2016-10-31 17:46:43.000000000 -0700 @@ -47,8 +47,9 @@ // Enumeration to distinguish tiers of compilation enum CompLevel { - CompLevel_any = -1, - CompLevel_all = -1, + CompLevel_any = -2, + CompLevel_all = -2, + CompLevel_aot = -1, CompLevel_none = 0, // Interpreter CompLevel_simple = 1, // C1 CompLevel_limited_profile = 2, // C1, invocation & backedge counters --- old/src/share/vm/compiler/disassembler.cpp 2016-10-31 17:46:44.000000000 -0700 +++ new/src/share/vm/compiler/disassembler.cpp 2016-10-31 17:46:43.000000000 -0700 @@ -505,7 +505,21 @@ } decode_env env(cb, st); env.output()->print_cr("----------------------------------------------------------------------"); - env.output()->print_cr("%s", cb->name()); + if (cb->is_aot()) { + env.output()->print("A "); + if (cb->is_compiled()) { + CompiledMethod* cm = (CompiledMethod*)cb; + env.output()->print("%d ",cm->compile_id()); + cm->method()->method_holder()->name()->print_symbol_on(env.output()); + env.output()->print("."); + cm->method()->name()->print_symbol_on(env.output()); + cm->method()->signature()->print_symbol_on(env.output()); + } else { + env.output()->print_cr("%s", cb->name()); + } + } else { + env.output()->print_cr("%s", cb->name()); + } env.output()->print_cr(" at [" PTR_FORMAT ", " PTR_FORMAT "] " JLONG_FORMAT " bytes", p2i(cb->code_begin()), p2i(cb->code_end()), ((jlong)(cb->code_end() - cb->code_begin())) * sizeof(unsigned char*)); env.decode_instructions(cb->code_begin(), cb->code_end()); } --- old/src/share/vm/gc/g1/g1GCPhaseTimes.cpp 2016-10-31 17:46:44.000000000 -0700 +++ new/src/share/vm/gc/g1/g1GCPhaseTimes.cpp 2016-10-31 17:46:44.000000000 -0700 @@ -67,6 +67,9 @@ } _gc_par_phases[ScanRS] = new WorkerDataArray(max_gc_threads, "Scan RS (ms):"); _gc_par_phases[CodeRoots] = new WorkerDataArray(max_gc_threads, "Code Root Scanning (ms):"); +#if INCLUDE_AOT + _gc_par_phases[AOTCodeRoots] = new WorkerDataArray(max_gc_threads, "AOT Root Scanning (ms):"); +#endif _gc_par_phases[ObjCopy] = new WorkerDataArray(max_gc_threads, "Object Copy (ms):"); _gc_par_phases[Termination] = new WorkerDataArray(max_gc_threads, "Termination (ms):"); _gc_par_phases[GCWorkerTotal] = new WorkerDataArray(max_gc_threads, "GC Worker Total (ms):"); @@ -264,6 +267,9 @@ } debug_phase(_gc_par_phases[ScanRS]); debug_phase(_gc_par_phases[CodeRoots]); +#if INCLUDE_AOT + debug_phase(_gc_par_phases[AOTCodeRoots]); +#endif debug_phase(_gc_par_phases[ObjCopy]); debug_phase(_gc_par_phases[Termination]); debug_phase(_gc_par_phases[Other]); --- old/src/share/vm/gc/g1/g1GCPhaseTimes.hpp 2016-10-31 17:46:45.000000000 -0700 +++ new/src/share/vm/gc/g1/g1GCPhaseTimes.hpp 2016-10-31 17:46:45.000000000 -0700 @@ -58,6 +58,9 @@ ScanHCC, ScanRS, CodeRoots, +#if INCLUDE_AOT + AOTCodeRoots, +#endif ObjCopy, Termination, Other, --- old/src/share/vm/gc/g1/g1RootProcessor.cpp 2016-10-31 17:46:45.000000000 -0700 +++ new/src/share/vm/gc/g1/g1RootProcessor.cpp 2016-10-31 17:46:45.000000000 -0700 @@ -37,6 +37,7 @@ #include "gc/g1/g1RootProcessor.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "memory/allocation.inline.hpp" +#include "aot/aotLoader.hpp" #include "runtime/fprofiler.hpp" #include "runtime/mutex.hpp" #include "services/management.hpp" @@ -290,6 +291,15 @@ } } +#if INCLUDE_AOT + if (UseAOT) { + G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::AOTCodeRoots, worker_i); + if (!_process_strong_tasks.is_task_claimed(G1RP_PS_aot_oops_do)) { + AOTLoader::oops_do(strong_roots); + } + } +#endif + { G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::SystemDictionaryRoots, worker_i); if (!_process_strong_tasks.is_task_claimed(G1RP_PS_SystemDictionary_oops_do)) { --- old/src/share/vm/gc/g1/g1RootProcessor.hpp 2016-10-31 17:46:46.000000000 -0700 +++ new/src/share/vm/gc/g1/g1RootProcessor.hpp 2016-10-31 17:46:46.000000000 -0700 @@ -64,6 +64,7 @@ G1RP_PS_ClassLoaderDataGraph_oops_do, G1RP_PS_jvmti_oops_do, G1RP_PS_CodeCache_oops_do, + G1RP_PS_aot_oops_do, G1RP_PS_filter_satb_buffers, G1RP_PS_refProcessor_oops_do, // Leave this one last. --- old/src/share/vm/gc/g1/heapRegion.cpp 2016-10-31 17:46:47.000000000 -0700 +++ new/src/share/vm/gc/g1/heapRegion.cpp 2016-10-31 17:46:47.000000000 -0700 @@ -516,7 +516,7 @@ _hr(hr), _failures(false) {} void do_code_blob(CodeBlob* cb) { - nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); + nmethod* nm = (cb == NULL) ? NULL : cb->as_compiled_method()->as_nmethod_or_null(); if (nm != NULL) { // Verify that the nemthod is live if (!nm->is_alive()) { --- old/src/share/vm/gc/parallel/pcTasks.cpp 2016-10-31 17:46:47.000000000 -0700 +++ new/src/share/vm/gc/parallel/pcTasks.cpp 2016-10-31 17:46:47.000000000 -0700 @@ -38,6 +38,7 @@ #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "prims/jvmtiExport.hpp" +#include "aot/aotLoader.hpp" #include "runtime/fprofiler.hpp" #include "runtime/jniHandles.hpp" #include "runtime/thread.hpp" @@ -127,6 +128,9 @@ case code_cache: // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure)); + if (UseAOT) { + AOTLoader::oops_do(&mark_and_push_closure); + } break; default: --- old/src/share/vm/gc/parallel/psMarkSweep.cpp 2016-10-31 17:46:48.000000000 -0700 +++ new/src/share/vm/gc/parallel/psMarkSweep.cpp 2016-10-31 17:46:48.000000000 -0700 @@ -48,6 +48,7 @@ #include "gc/shared/spaceDecorator.hpp" #include "logging/log.hpp" #include "oops/oop.inline.hpp" +#include "aot/aotLoader.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" #include "runtime/safepoint.hpp" @@ -515,6 +516,9 @@ ClassLoaderDataGraph::always_strong_cld_do(follow_cld_closure()); // Do not treat nmethods as strong roots for mark/sweep, since we can unload them. //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(mark_and_push_closure())); + if (UseAOT) { + AOTLoader::oops_do(mark_and_push_closure()); + } } // Flush marking stack. @@ -611,6 +615,9 @@ CodeBlobToOopClosure adjust_from_blobs(adjust_pointer_closure(), CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); + if (UseAOT) { + AOTLoader::oops_do(adjust_pointer_closure()); + } StringTable::oops_do(adjust_pointer_closure()); ref_processor()->weak_oops_do(adjust_pointer_closure()); PSScavenge::reference_processor()->weak_oops_do(adjust_pointer_closure()); --- old/src/share/vm/gc/parallel/psParallelCompact.cpp 2016-10-31 17:46:49.000000000 -0700 +++ new/src/share/vm/gc/parallel/psParallelCompact.cpp 2016-10-31 17:46:48.000000000 -0700 @@ -57,6 +57,7 @@ #include "oops/methodData.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" +#include "aot/aotLoader.hpp" #include "runtime/atomic.hpp" #include "runtime/fprofiler.hpp" #include "runtime/safepoint.hpp" @@ -2183,6 +2184,9 @@ CodeBlobToOopClosure adjust_from_blobs(&oop_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::blobs_do(&adjust_from_blobs); + if (UseAOT) { + AOTLoader::oops_do(&oop_closure); + } StringTable::oops_do(&oop_closure); ref_processor()->weak_oops_do(&oop_closure); // Roots were visited so references into the young gen in roots --- old/src/share/vm/gc/parallel/psTasks.cpp 2016-10-31 17:46:49.000000000 -0700 +++ new/src/share/vm/gc/parallel/psTasks.cpp 2016-10-31 17:46:49.000000000 -0700 @@ -37,6 +37,7 @@ #include "memory/resourceArea.hpp" #include "memory/universe.hpp" #include "oops/oop.inline.hpp" +#include "aot/aotLoader.hpp" #include "runtime/fprofiler.hpp" #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" @@ -101,6 +102,9 @@ { MarkingCodeBlobClosure each_scavengable_code_blob(&roots_to_old_closure, CodeBlobToOopClosure::FixRelocations); CodeCache::scavenge_root_nmethods_do(&each_scavengable_code_blob); + if (UseAOT) { + AOTLoader::oops_do(&roots_closure); + } } break; --- old/src/share/vm/gc/shared/genCollectedHeap.cpp 2016-10-31 17:46:50.000000000 -0700 +++ new/src/share/vm/gc/shared/genCollectedHeap.cpp 2016-10-31 17:46:50.000000000 -0700 @@ -44,6 +44,7 @@ #include "memory/filemap.hpp" #include "memory/resourceArea.hpp" #include "oops/oop.inline.hpp" +#include "aot/aotLoader.hpp" #include "runtime/biasedLocking.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.hpp" @@ -73,6 +74,7 @@ GCH_PS_ClassLoaderDataGraph_oops_do, GCH_PS_jvmti_oops_do, GCH_PS_CodeCache_oops_do, + GCH_PS_aot_oops_do, GCH_PS_younger_gens, // Leave this one last. GCH_PS_NumElements @@ -608,6 +610,9 @@ if (!_process_strong_tasks->is_task_claimed(GCH_PS_jvmti_oops_do)) { JvmtiExport::oops_do(strong_roots); } + if (UseAOT && !_process_strong_tasks->is_task_claimed(GCH_PS_aot_oops_do)) { + AOTLoader::oops_do(strong_roots); + } if (!_process_strong_tasks->is_task_claimed(GCH_PS_SystemDictionary_oops_do)) { SystemDictionary::roots_oops_do(strong_roots, weak_roots); --- old/src/share/vm/jvmci/jvmciCodeInstaller.cpp 2016-10-31 17:46:50.000000000 -0700 +++ new/src/share/vm/jvmci/jvmciCodeInstaller.cpp 2016-10-31 17:46:50.000000000 -0700 @@ -172,6 +172,69 @@ return map; } +AOTOopRecorder::AOTOopRecorder(Arena* arena, bool deduplicate) : OopRecorder(arena, deduplicate) { + _meta_strings = new GrowableArray(); +} + +int AOTOopRecorder::nr_meta_strings() const { + return _meta_strings->length(); +} + +const char* AOTOopRecorder::meta_element(int pos) const { + return _meta_strings->at(pos); +} + +int AOTOopRecorder::find_index(Metadata* h) { + int index = this->OopRecorder::find_index(h); + + Klass* klass = NULL; + if (h->is_klass()) { + klass = (Klass*) h; + record_meta_string(klass->signature_name(), index); + } else if (h->is_method()) { + Method* method = (Method*) h; + // Need klass->signature_name() in method name + klass = method->method_holder(); + const char* klass_name = klass->signature_name(); + int klass_name_len = (int)strlen(klass_name); + Symbol* method_name = method->name(); + Symbol* signature = method->signature(); + int method_name_len = method_name->utf8_length(); + int method_sign_len = signature->utf8_length(); + int len = klass_name_len + 1 + method_name_len + method_sign_len; + char* dest = NEW_RESOURCE_ARRAY(char, len + 1); + strcpy(dest, klass_name); + dest[klass_name_len] = '.'; + strcpy(&dest[klass_name_len + 1], method_name->as_C_string()); + strcpy(&dest[klass_name_len + 1 + method_name_len], signature->as_C_string()); + dest[len] = 0; + record_meta_string(dest, index); + } + + return index; +} + +int AOTOopRecorder::find_index(jobject h) { + if (h == NULL) { + return 0; + } + oop javaMirror = JNIHandles::resolve(h); + Klass* klass = java_lang_Class::as_Klass(javaMirror); + return find_index(klass); +} + +void AOTOopRecorder::record_meta_string(const char* name, int index) { + assert(index > 0, "must be 1..n"); + index -= 1; // reduce by one to convert to array index + + if (index < _meta_strings->length()) { + assert(strcmp(name, _meta_strings->at(index)) == 0, "must match"); + } else { + assert(index == _meta_strings->length(), "must be last"); + _meta_strings->append(name); + } +} + void* CodeInstaller::record_metadata_reference(CodeSection* section, address dest, Handle constant, TRAPS) { /* * This method needs to return a raw (untyped) pointer, since the value of a pointer to the base @@ -481,7 +544,10 @@ JVMCIEnv::CodeInstallResult CodeInstaller::gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS) { CodeBuffer buffer("JVMCI Compiler CodeBuffer for Metadata"); jobject compiled_code_obj = JNIHandles::make_local(compiled_code()); - initialize_dependencies(JNIHandles::resolve(compiled_code_obj), NULL, CHECK_OK); + AOTOopRecorder* recorder = new AOTOopRecorder(&_arena, true); + initialize_dependencies(JNIHandles::resolve(compiled_code_obj), recorder, CHECK_OK); + + metadata.set_oop_recorder(recorder); // Get instructions and constants CodeSections early because we need it. _instructions = buffer.insts(); @@ -553,7 +619,7 @@ stack_slots, _debug_recorder->_oopmaps, &_exception_handler_table, compiler, _debug_recorder, _dependencies, env, id, has_unsafe_access, _has_wide_vector, installed_code, compiled_code, speculation_log); - cb = nm; + cb = nm->as_codeblob_or_null(); if (nm != NULL && env == NULL) { DirectiveSet* directive = DirectivesStack::getMatchingDirective(method, compiler); bool printnmethods = directive->PrintAssemblyOption || directive->PrintNMethodsOption; @@ -623,25 +689,40 @@ } int CodeInstaller::estimate_stubs_size(TRAPS) { - // Estimate the number of static call stubs that might be emitted. + // Estimate the number of static and aot call stubs that might be emitted. int static_call_stubs = 0; + int aot_call_stubs = 0; objArrayOop sites = this->sites(); for (int i = 0; i < sites->length(); i++) { oop site = sites->obj_at(i); - if (site != NULL && site->is_a(site_Mark::klass())) { - oop id_obj = site_Mark::id(site); - if (id_obj != NULL) { - if (!java_lang_boxing_object::is_instance(id_obj, T_INT)) { - JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name()); + if (site != NULL) { + if (site->is_a(site_Mark::klass())) { + oop id_obj = site_Mark::id(site); + if (id_obj != NULL) { + if (!java_lang_boxing_object::is_instance(id_obj, T_INT)) { + JVMCI_ERROR_0("expected Integer id, got %s", id_obj->klass()->signature_name()); + } + jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT)); + if (id == INVOKESTATIC || id == INVOKESPECIAL) { + static_call_stubs++; + } } - jint id = id_obj->int_field(java_lang_boxing_object::value_offset_in_bytes(T_INT)); - if (id == INVOKESTATIC || id == INVOKESPECIAL) { - static_call_stubs++; + } + if (UseAOT && site->is_a(site_Call::klass())) { + oop target = site_Call::target(site); + InstanceKlass* target_klass = InstanceKlass::cast(target->klass()); + if (!target_klass->is_subclass_of(SystemDictionary::HotSpotForeignCallTarget_klass())) { + // Add far aot trampolines. + aot_call_stubs++; } } } } - return static_call_stubs * CompiledStaticCall::to_interp_stub_size(); + int size = static_call_stubs * CompiledStaticCall::to_interp_stub_size(); +#if INCLUDE_AOT + size += aot_call_stubs * CompiledStaticCall::to_aot_stub_size(); +#endif + return size; } // perform data and call relocation on the CodeBuffer @@ -1063,6 +1144,10 @@ if (foreign_call.not_null()) { jlong foreign_call_destination = HotSpotForeignCallTarget::address(foreign_call); + if (_immutable_pic_compilation) { + // Use fake short distance during PIC compilation. + foreign_call_destination = (jlong)(_instructions->start() + pc_offset); + } CodeInstaller::pd_relocate_ForeignCall(inst, foreign_call_destination, CHECK); } else { // method != NULL if (debug_info.is_null()) { @@ -1075,6 +1160,10 @@ // Need a static call stub for transitions from compiled to interpreted. CompiledStaticCall::emit_to_interp_stub(buffer, _instructions->start() + pc_offset); } +#if INCLUDE_AOT + // Trampoline to far aot code. + CompiledStaticCall::emit_to_aot_stub(buffer, _instructions->start() + pc_offset); +#endif } _next_call_type = INVOKE_INVALID; @@ -1093,9 +1182,18 @@ if (constant.is_null()) { THROW(vmSymbols::java_lang_NullPointerException()); } else if (constant->is_a(HotSpotObjectConstantImpl::klass())) { - pd_patch_OopConstant(pc_offset, constant, CHECK); + if (!_immutable_pic_compilation) { + // Do not patch during PIC compilation. + pd_patch_OopConstant(pc_offset, constant, CHECK); + } } else if (constant->is_a(HotSpotMetaspaceConstantImpl::klass())) { - pd_patch_MetaspaceConstant(pc_offset, constant, CHECK); + if (!_immutable_pic_compilation) { + pd_patch_MetaspaceConstant(pc_offset, constant, CHECK); + } + } else if (constant->is_a(HotSpotSentinelConstant::klass())) { + if (!_immutable_pic_compilation) { + JVMCI_ERROR("sentinel constant not supported for normal compiles: %s", constant->klass()->signature_name()); + } } else { JVMCI_ERROR("unknown constant type in data patch: %s", constant->klass()->signature_name()); } @@ -1158,6 +1256,7 @@ case HEAP_END_ADDRESS: case NARROW_KLASS_BASE_ADDRESS: case CRC_TABLE_ADDRESS: + case LOG_OF_HEAP_REGION_GRAIN_BYTES: break; default: JVMCI_ERROR("invalid mark id: %d", id); --- old/src/share/vm/jvmci/jvmciCodeInstaller.hpp 2016-10-31 17:46:51.000000000 -0700 +++ new/src/share/vm/jvmci/jvmciCodeInstaller.hpp 2016-10-31 17:46:51.000000000 -0700 @@ -43,6 +43,21 @@ char *_buffer; }; +class AOTOopRecorder : public OopRecorder { +public: + AOTOopRecorder(Arena* arena = NULL, bool deduplicate = false); + + virtual int find_index(Metadata* h); + virtual int find_index(jobject h); + int nr_meta_strings() const; + const char* meta_element(int pos) const; + +private: + void record_meta_string(const char* name, int index); + + GrowableArray* _meta_strings; +}; + class CodeMetadata { public: CodeMetadata() {} @@ -57,6 +72,8 @@ RelocBuffer* get_reloc_buffer() { return &_reloc_buffer; } + AOTOopRecorder* get_oop_recorder() { return _oop_recorder; } + ExceptionHandlerTable* get_exception_table() { return _exception_table; } void set_pc_desc(PcDesc* desc, int count) { @@ -69,6 +86,10 @@ _nr_scopes_desc = size; } + void set_oop_recorder(AOTOopRecorder* recorder) { + _oop_recorder = recorder; + } + void set_exception_table(ExceptionHandlerTable* table) { _exception_table = table; } @@ -82,6 +103,7 @@ int _nr_scopes_desc; RelocBuffer _reloc_buffer; + AOTOopRecorder* _oop_recorder; ExceptionHandlerTable* _exception_table; }; @@ -92,27 +114,28 @@ friend class JVMCIVMStructs; private: enum MarkId { - VERIFIED_ENTRY = 1, - UNVERIFIED_ENTRY = 2, - OSR_ENTRY = 3, - EXCEPTION_HANDLER_ENTRY = 4, - DEOPT_HANDLER_ENTRY = 5, - INVOKEINTERFACE = 6, - INVOKEVIRTUAL = 7, - INVOKESTATIC = 8, - INVOKESPECIAL = 9, - INLINE_INVOKE = 10, - POLL_NEAR = 11, - POLL_RETURN_NEAR = 12, - POLL_FAR = 13, - POLL_RETURN_FAR = 14, - CARD_TABLE_ADDRESS = 15, - CARD_TABLE_SHIFT = 16, - HEAP_TOP_ADDRESS = 17, - HEAP_END_ADDRESS = 18, - NARROW_KLASS_BASE_ADDRESS = 19, - CRC_TABLE_ADDRESS = 20, - INVOKE_INVALID = -1 + VERIFIED_ENTRY = 1, + UNVERIFIED_ENTRY = 2, + OSR_ENTRY = 3, + EXCEPTION_HANDLER_ENTRY = 4, + DEOPT_HANDLER_ENTRY = 5, + INVOKEINTERFACE = 6, + INVOKEVIRTUAL = 7, + INVOKESTATIC = 8, + INVOKESPECIAL = 9, + INLINE_INVOKE = 10, + POLL_NEAR = 11, + POLL_RETURN_NEAR = 12, + POLL_FAR = 13, + POLL_RETURN_FAR = 14, + CARD_TABLE_ADDRESS = 15, + CARD_TABLE_SHIFT = 16, + HEAP_TOP_ADDRESS = 17, + HEAP_END_ADDRESS = 18, + NARROW_KLASS_BASE_ADDRESS = 19, + CRC_TABLE_ADDRESS = 20, + LOG_OF_HEAP_REGION_GRAIN_BYTES = 21, + INVOKE_INVALID = -1 }; Arena _arena; @@ -146,6 +169,8 @@ Dependencies* _dependencies; ExceptionHandlerTable _exception_handler_table; + bool _immutable_pic_compilation; // Installer is called for Immutable PIC compilation. + static ConstantOopWriteValue* _oop_null_scope_value; static ConstantIntValue* _int_m1_scope_value; static ConstantIntValue* _int_0_scope_value; @@ -173,7 +198,7 @@ public: - CodeInstaller() : _arena(mtCompiler) {} + CodeInstaller(bool immutable_pic_compilation) : _arena(mtCompiler), _immutable_pic_compilation(immutable_pic_compilation) {} JVMCIEnv::CodeInstallResult gather_metadata(Handle target, Handle compiled_code, CodeMetadata& metadata, TRAPS); JVMCIEnv::CodeInstallResult install(JVMCICompiler* compiler, Handle target, Handle compiled_code, CodeBlob*& cb, Handle installed_code, Handle speculation_log, TRAPS); --- old/src/share/vm/jvmci/jvmciCompilerToVM.cpp 2016-10-31 17:46:52.000000000 -0700 +++ new/src/share/vm/jvmci/jvmciCompilerToVM.cpp 2016-10-31 17:46:52.000000000 -0700 @@ -847,7 +847,8 @@ JVMCICompiler* compiler = JVMCICompiler::instance(CHECK_JNI_ERR); TraceTime install_time("installCode", JVMCICompiler::codeInstallTimer()); - CodeInstaller installer; + bool is_immutable_PIC = HotSpotCompiledCode::isImmutablePIC(compiled_code_handle) > 0; + CodeInstaller installer(is_immutable_PIC); JVMCIEnv::CodeInstallResult result = installer.install(compiler, target_handle, compiled_code_handle, cb, installed_code_handle, speculation_log_handle, CHECK_0); if (PrintCodeCacheOnCompilation) { @@ -905,7 +906,7 @@ CodeMetadata code_metadata; CodeBlob *cb = NULL; - CodeInstaller installer; + CodeInstaller installer(true /* immutable PIC compilation */); JVMCIEnv::CodeInstallResult result = installer.gather_metadata(target_handle, compiled_code_handle, code_metadata, CHECK_0); if (result != JVMCIEnv::ok) { @@ -941,7 +942,16 @@ HotSpotMetaData::set_oopMaps(metadata_handle, oopMapArrayHandle()); } - HotSpotMetaData::set_metadata(metadata_handle, NULL); + AOTOopRecorder* recorder = code_metadata.get_oop_recorder(); + + int nr_meta_strings = recorder->nr_meta_strings(); + objArrayHandle metadataArrayHandle = oopFactory::new_objectArray(nr_meta_strings, CHECK_(JVMCIEnv::cache_full)); + for (int i = 0; i < nr_meta_strings; ++i) { + const char* element = recorder->meta_element(i); + Handle java_string = java_lang_String::create_from_str(element, CHECK_(JVMCIEnv::cache_full)); + metadataArrayHandle->obj_at_put(i, java_string()); + } + HotSpotMetaData::set_metadata(metadata_handle, metadataArrayHandle()); ExceptionHandlerTable* handler = code_metadata.get_exception_table(); int table_size = handler->size_in_bytes(); @@ -1493,6 +1503,15 @@ THROW_MSG_0(vmSymbols::java_lang_IllegalArgumentException(), err_msg("Invalid profile data position %d", position)); C2V_END +C2V_VMENTRY(jlong, getFingerprint, (JNIEnv*, jobject, jlong metaspace_klass)) + Klass *k = CompilerToVM::asKlass(metaspace_klass); + if (k->is_instance_klass()) { + return InstanceKlass::cast(k)->get_stored_fingerprint(); + } else { + return 0; + } +C2V_END + C2V_VMENTRY(int, interpreterFrameSize, (JNIEnv*, jobject, jobject bytecode_frame_handle)) if (bytecode_frame_handle == NULL) { THROW_0(vmSymbols::java_lang_NullPointerException()); @@ -1621,6 +1640,7 @@ {CC "writeDebugOutput", CC "([BII)V", FN_PTR(writeDebugOutput)}, {CC "flushDebugOutput", CC "()V", FN_PTR(flushDebugOutput)}, {CC "methodDataProfileDataSize", CC "(JI)I", FN_PTR(methodDataProfileDataSize)}, + {CC "getFingerprint", CC "(J)J", FN_PTR(getFingerprint)}, {CC "interpreterFrameSize", CC "(" BYTECODE_FRAME ")I", FN_PTR(interpreterFrameSize)}, {CC "compileToBytecode", CC "(" OBJECT ")V", FN_PTR(compileToBytecode)}, }; --- old/src/share/vm/jvmci/jvmciCompilerToVM.hpp 2016-10-31 17:46:52.000000000 -0700 +++ new/src/share/vm/jvmci/jvmciCompilerToVM.hpp 2016-10-31 17:46:52.000000000 -0700 @@ -125,6 +125,10 @@ return java_lang_Class::as_Klass(HotSpotResolvedObjectTypeImpl::javaClass(jvmci_type)); } + static inline Klass* asKlass(jlong metaspaceKlass) { + return (Klass*) (address) metaspaceKlass; + } + static inline MethodData* asMethodData(jlong metaspaceMethodData) { return (MethodData*) (address) metaspaceMethodData; } --- old/src/share/vm/jvmci/jvmciJavaClasses.cpp 2016-10-31 17:46:53.000000000 -0700 +++ new/src/share/vm/jvmci/jvmciJavaClasses.cpp 2016-10-31 17:46:53.000000000 -0700 @@ -44,7 +44,7 @@ fieldDescriptor fd; if (!ik->find_field(name_symbol, signature_symbol, &fd)) { ResourceMark rm; - fatal("Invalid layout of %s at %s", name_symbol->as_C_string(), ik->external_name()); + fatal("Invalid layout of %s %s at %s", name_symbol->as_C_string(), signature_symbol->as_C_string(), ik->external_name()); } guarantee(fd.is_static() == static_field, "static/instance mismatch"); dest_offset = fd.offset(); --- old/src/share/vm/jvmci/vmStructs_jvmci.cpp 2016-10-31 17:46:54.000000000 -0700 +++ new/src/share/vm/jvmci/vmStructs_jvmci.cpp 2016-10-31 17:46:53.000000000 -0700 @@ -30,6 +30,7 @@ #include "jvmci/jvmciCompilerToVM.hpp" #include "jvmci/jvmciEnv.hpp" #include "jvmci/jvmciRuntime.hpp" +#include "jvmci/vmStructs_compiler_runtime.hpp" #include "jvmci/vmStructs_jvmci.hpp" #include "oops/oop.hpp" #include "oops/objArrayKlass.hpp" @@ -43,7 +44,6 @@ #include "gc/g1/heapRegion.hpp" #endif - #define VM_STRUCTS(nonstatic_field, static_field, unchecked_nonstatic_field, volatile_nonstatic_field) \ static_field(CompilerToVM::Data, Klass_vtable_start_offset, int) \ static_field(CompilerToVM::Data, Klass_vtable_length_offset, int) \ @@ -413,6 +413,7 @@ declare_constant(CodeInstaller::HEAP_END_ADDRESS) \ declare_constant(CodeInstaller::NARROW_KLASS_BASE_ADDRESS) \ declare_constant(CodeInstaller::CRC_TABLE_ADDRESS) \ + declare_constant(CodeInstaller::LOG_OF_HEAP_REGION_GRAIN_BYTES) \ declare_constant(CodeInstaller::INVOKE_INVALID) \ \ declare_constant(ConstantPool::CPCACHE_INDEX_TAG) \ @@ -879,7 +880,9 @@ VM_ADDRESSES(GENERATE_VM_ADDRESS_ENTRY, GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY, GENERATE_VM_FUNCTION_ENTRY) - + VM_ADDRESSES_COMPILER_RUNTIME(GENERATE_VM_ADDRESS_ENTRY, + GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY, + GENERATE_VM_FUNCTION_ENTRY) VM_ADDRESSES_OS(GENERATE_VM_ADDRESS_ENTRY, GENERATE_PREPROCESSOR_VM_ADDRESS_ENTRY, GENERATE_VM_FUNCTION_ENTRY) --- old/src/share/vm/logging/logTag.hpp 2016-10-31 17:46:54.000000000 -0700 +++ new/src/share/vm/logging/logTag.hpp 2016-10-31 17:46:54.000000000 -0700 @@ -35,6 +35,9 @@ LOG_TAG(add) \ LOG_TAG(age) \ LOG_TAG(alloc) \ + LOG_TAG(aotclassfingerprint) \ + LOG_TAG(aotclassload) \ + LOG_TAG(aotclassresolve) \ LOG_TAG(annotation) \ LOG_TAG(arguments) \ LOG_TAG(attach) \ @@ -45,6 +48,7 @@ LOG_TAG(breakpoint) \ LOG_TAG(census) \ LOG_TAG(class) \ + LOG_TAG(classfingerprint) \ LOG_TAG(classhisto) \ LOG_TAG(cleanup) \ LOG_TAG(compaction) \ --- old/src/share/vm/memory/heap.cpp 2016-10-31 17:46:55.000000000 -0700 +++ new/src/share/vm/memory/heap.cpp 2016-10-31 17:46:55.000000000 -0700 @@ -275,6 +275,13 @@ return h->allocated_space(); } +CodeBlob* CodeHeap::find_blob_unsafe(void* start) const { + CodeBlob* result = (CodeBlob*)CodeHeap::find_start(start); + if (result != NULL && result->blob_contains((address)start)) { + return result; + } + return NULL; +} size_t CodeHeap::alignment_unit() const { // this will be a power of two --- old/src/share/vm/memory/heap.hpp 2016-10-31 17:46:56.000000000 -0700 +++ new/src/share/vm/memory/heap.hpp 2016-10-31 17:46:55.000000000 -0700 @@ -82,7 +82,7 @@ class CodeHeap : public CHeapObj { friend class VMStructs; friend class PregeneratedCodeHeap; - private: + protected: VirtualSpace _memory; // the memory holding the blocks VirtualSpace _segmap; // the memory holding the segment map @@ -156,6 +156,7 @@ virtual bool contains(const void* p) const { return low_boundary() <= p && p < high(); } virtual void* find_start(void* p) const; // returns the block containing p or NULL + virtual CodeBlob* find_blob_unsafe(void* start) const; size_t alignment_unit() const; // alignment of any block size_t alignment_offset() const; // offset of first byte of any block, within the enclosing alignment unit static size_t header_size(); // returns the header size for each heap block --- old/src/share/vm/memory/metaspace.cpp 2016-10-31 17:46:56.000000000 -0700 +++ new/src/share/vm/memory/metaspace.cpp 2016-10-31 17:46:56.000000000 -0700 @@ -48,6 +48,7 @@ #include "utilities/copy.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" +#include "aot/aotLoader.hpp" typedef BinaryTreeDictionary > BlockTreeDictionary; typedef BinaryTreeDictionary > ChunkTreeDictionary; @@ -3012,6 +3013,7 @@ assert(!UseSharedSpaces, "Cannot shift with UseSharedSpaces"); Universe::set_narrow_klass_shift(LogKlassAlignmentInBytes); } + AOTLoader::set_narrow_klass_shift(); } #if INCLUDE_CDS --- old/src/share/vm/memory/universe.cpp 2016-10-31 17:46:57.000000000 -0700 +++ new/src/share/vm/memory/universe.cpp 2016-10-31 17:46:57.000000000 -0700 @@ -55,6 +55,7 @@ #include "oops/objArrayOop.inline.hpp" #include "oops/oop.inline.hpp" #include "oops/typeArrayKlass.hpp" +#include "aot/aotLoader.hpp" #include "runtime/arguments.hpp" #include "runtime/atomic.hpp" #include "runtime/commandLineFlagConstraintList.hpp" @@ -671,6 +672,8 @@ Metaspace::global_initialize(); + AOTLoader::universe_init(); + // Checks 'AfterMemoryInit' constraints. if (!CommandLineFlagConstraintList::check_constraints(CommandLineFlagConstraint::AfterMemoryInit)) { return JNI_EINVAL; --- old/src/share/vm/memory/virtualspace.hpp 2016-10-31 17:46:58.000000000 -0700 +++ new/src/share/vm/memory/virtualspace.hpp 2016-10-31 17:46:58.000000000 -0700 @@ -184,6 +184,11 @@ char* low_boundary() const { return _low_boundary; } char* high_boundary() const { return _high_boundary; } + void set_low_boundary(char *p) { _low_boundary = p; } + void set_high_boundary(char *p) { _high_boundary = p; } + void set_low(char *p) { _low = p; } + void set_high(char *p) { _high = p; } + bool special() const { return _special; } public: --- old/src/share/vm/oops/instanceKlass.cpp 2016-10-31 17:46:58.000000000 -0700 +++ new/src/share/vm/oops/instanceKlass.cpp 2016-10-31 17:46:58.000000000 -0700 @@ -25,6 +25,7 @@ #include "precompiled.hpp" #include "classfile/classFileParser.hpp" #include "classfile/classFileStream.hpp" +#include "classfile/classLoader.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/systemDictionaryShared.hpp" @@ -57,6 +58,7 @@ #include "prims/jvmtiRedefineClasses.hpp" #include "prims/jvmtiThreadState.hpp" #include "prims/methodComparator.hpp" +#include "aot/aotLoader.hpp" #include "runtime/atomic.hpp" #include "runtime/fieldDescriptor.hpp" #include "runtime/handles.inline.hpp" @@ -144,7 +146,8 @@ parser.itable_size(), nonstatic_oop_map_size(parser.total_oop_map_count()), parser.is_interface(), - parser.is_anonymous()); + parser.is_anonymous(), + should_store_fingerprint()); const Symbol* const class_name = parser.class_name(); assert(class_name != NULL, "invariant"); @@ -787,6 +790,11 @@ } + if (UseAOT) { + // Look for aot compiled methods for this klass, including class initializer. + AOTLoader::load_for_klass(this_k, THREAD); + } + // Step 8 { assert(THREAD->is_Java_thread(), "non-JavaThread in initialize_impl"); @@ -1950,6 +1958,73 @@ } } +bool InstanceKlass::supers_have_passed_fingerprint_checks() { + if (java_super() != NULL && !java_super()->has_passed_fingerprint_check()) { + ResourceMark rm; + log_trace(classfingerprint)("%s : super %s not fingerprinted", external_name(), java_super()->external_name()); + return false; + } + + Array* local_interfaces = this->local_interfaces(); + if (local_interfaces != NULL) { + int length = local_interfaces->length(); + for (int i = 0; i < length; i++) { + InstanceKlass* intf = InstanceKlass::cast(local_interfaces->at(i)); + if (!intf->has_passed_fingerprint_check()) { + ResourceMark rm; + log_trace(classfingerprint)("%s : interface %s not fingerprinted", external_name(), intf->external_name()); + return false; + } + } + } + + return true; +} + +bool InstanceKlass::should_store_fingerprint() { +#if INCLUDE_AOT + // We store the fingerprint into the InstanceKlass only in the following 2 cases: + if (EnableJVMCI && !UseJVMCICompiler) { + // (1) We are running AOT to generate a shared library. + return true; + } + if (DumpSharedSpaces) { + // (2) We are running -Xshare:dump to create a shared archive + return true; + } +#endif + + // In all other cases we might set the _misc_has_passed_fingerprint_check bit, + // but do not store the 64-bit fingerprint to save space. + return false; +} + +bool InstanceKlass::has_stored_fingerprint() const { +#if INCLUDE_AOT + return should_store_fingerprint() || is_shared(); +#else + return false; +#endif +} + +uint64_t InstanceKlass::get_stored_fingerprint() const { + if (has_stored_fingerprint()) { + address adr = adr_fingerprint(); + assert(adr != NULL, "sanity"); + return (uint64_t)Bytes::get_native_u8(adr); // adr may not be 64-bit aligned + } + return 0; +} + +void InstanceKlass::store_fingerprint(uint64_t fingerprint) { + assert(should_store_fingerprint(), "must be"); + address adr = adr_fingerprint(); + assert(adr != NULL, "sanity"); + Bytes::put_native_u8(adr, (u8)fingerprint); // adr may not be 64-bit aligned + + ResourceMark rm; + log_trace(classfingerprint)("stored as " PTR64_FORMAT " for class %s", fingerprint, external_name()); +} static void remove_unshareable_in_class(Klass* k) { // remove klass's unshareable info --- old/src/share/vm/oops/instanceKlass.hpp 2016-10-31 17:46:59.000000000 -0700 +++ new/src/share/vm/oops/instanceKlass.hpp 2016-10-31 17:46:59.000000000 -0700 @@ -54,6 +54,7 @@ // indicating where oops are located in instances of this klass. // [EMBEDDED implementor of the interface] only exist for interface // [EMBEDDED host klass ] only exist for an anonymous class (JSR 292 enabled) +// [EMBEDDED fingerprint ] only if should_store_fingerprint()==true // forward declaration for class -- see below for definition @@ -215,10 +216,12 @@ _misc_has_nonstatic_concrete_methods = 1 << 7, // class/superclass/implemented interfaces has non-static, concrete methods _misc_declares_nonstatic_concrete_methods = 1 << 8, // directly declares non-static, concrete methods _misc_has_been_redefined = 1 << 9, // class has been redefined - _misc_is_scratch_class = 1 << 10, // class is the redefined scratch class - _misc_is_shared_boot_class = 1 << 11, // defining class loader is boot class loader - _misc_is_shared_platform_class = 1 << 12, // defining class loader is platform class loader - _misc_is_shared_app_class = 1 << 13 // defining class loader is app class loader + _misc_has_passed_fingerprint_check = 1 << 10, // when this class was loaded, the fingerprint computed from its + // code source was found to be matching the value recorded by AOT. + _misc_is_scratch_class = 1 << 11, // class is the redefined scratch class + _misc_is_shared_boot_class = 1 << 12, // defining class loader is boot class loader + _misc_is_shared_platform_class = 1 << 13, // defining class loader is platform class loader + _misc_is_shared_app_class = 1 << 14 // defining class loader is app class loader }; u2 loader_type_bits() { return _misc_is_shared_boot_class|_misc_is_shared_platform_class|_misc_is_shared_app_class; @@ -732,6 +735,23 @@ _misc_flags |= _misc_has_been_redefined; } + bool has_passed_fingerprint_check() const { + return (_misc_flags & _misc_has_passed_fingerprint_check) != 0; + } + void set_has_passed_fingerprint_check(bool b) { + if (b) { + _misc_flags |= _misc_has_passed_fingerprint_check; + } else { + _misc_flags &= ~_misc_has_passed_fingerprint_check; + } + } + bool supers_have_passed_fingerprint_checks(); + + static bool should_store_fingerprint(); + bool has_stored_fingerprint() const; + uint64_t get_stored_fingerprint() const; + void store_fingerprint(uint64_t fingerprint); + bool is_scratch_class() const { return (_misc_flags & _misc_is_scratch_class) != 0; } @@ -1028,19 +1048,21 @@ static int size(int vtable_length, int itable_length, int nonstatic_oop_map_size, - bool is_interface, bool is_anonymous) { + bool is_interface, bool is_anonymous, bool has_stored_fingerprint) { return align_metadata_size(header_size() + vtable_length + itable_length + nonstatic_oop_map_size + (is_interface ? (int)sizeof(Klass*)/wordSize : 0) + - (is_anonymous ? (int)sizeof(Klass*)/wordSize : 0)); + (is_anonymous ? (int)sizeof(Klass*)/wordSize : 0) + + (has_stored_fingerprint ? (int)sizeof(uint64_t*)/wordSize : 0)); } int size() const { return size(vtable_length(), itable_length(), nonstatic_oop_map_size(), is_interface(), - is_anonymous()); + is_anonymous(), + has_stored_fingerprint()); } #if INCLUDE_SERVICES virtual void collect_statistics(KlassSizeStats *sz) const; @@ -1083,6 +1105,24 @@ } } + address adr_fingerprint() const { + if (has_stored_fingerprint()) { + InstanceKlass** adr_host = adr_host_klass(); + if (adr_host != NULL) { + return (address)(adr_host + 1); + } + + Klass** adr_impl = adr_implementor(); + if (adr_impl != NULL) { + return (address)(adr_impl + 1); + } + + return (address)end_of_nonstatic_oop_maps(); + } else { + return NULL; + } + } + // Use this to return the size of an instance in heap words: int size_helper() const { return layout_helper_to_size_helper(layout_helper()); --- old/src/share/vm/oops/metadata.cpp 2016-10-31 17:47:00.000000000 -0700 +++ new/src/share/vm/oops/metadata.cpp 2016-10-31 17:47:00.000000000 -0700 @@ -42,7 +42,7 @@ } char* Metadata::print_value_string() const { - char buf[100]; + char buf[256]; stringStream st(buf, sizeof(buf)); if (this == NULL) { st.print("NULL"); --- old/src/share/vm/oops/method.hpp 2016-10-31 17:47:00.000000000 -0700 +++ new/src/share/vm/oops/method.hpp 2016-10-31 17:47:00.000000000 -0700 @@ -103,6 +103,10 @@ CompiledMethod* volatile _code; // Points to the corresponding piece of native code volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry +#if INCLUDE_AOT && defined(TIERED) + CompiledMethod* _aot_code; +#endif + // Constructor Method(ConstMethod* xconst, AccessFlags access_flags); public: @@ -386,7 +390,20 @@ mcs->set_rate(rate); } } -#endif + +#if INCLUDE_AOT + void set_aot_code(CompiledMethod* aot_code) { + _aot_code = aot_code; + } + + CompiledMethod* aot_code() const { + return _aot_code; + } +#else + CompiledMethod* aot_code() const { return NULL; } +#endif // INCLUDE_AOT +#endif // TIERED + int nmethod_age() const { if (method_counters() == NULL) { return INT_MAX; @@ -648,6 +665,10 @@ // simultaneously. Use with caution. bool has_compiled_code() const { return code() != NULL; } +#ifdef TIERED + bool has_aot_code() const { return aot_code() != NULL; } +#endif + // sizing static int header_size() { return sizeof(Method)/wordSize; } static int size(bool is_native); --- old/src/share/vm/oops/methodCounters.hpp 2016-10-31 17:47:01.000000000 -0700 +++ new/src/share/vm/oops/methodCounters.hpp 2016-10-31 17:47:01.000000000 -0700 @@ -34,6 +34,7 @@ friend class VMStructs; friend class JVMCIVMStructs; private: + Method* _method; // Back link to Method #if defined(COMPILER2) || INCLUDE_JVMCI int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting @@ -64,7 +65,8 @@ u1 _highest_osr_comp_level; // Same for OSR level #endif - MethodCounters(methodHandle mh) : _nmethod_age(INT_MAX) + MethodCounters(methodHandle mh) : _method(mh()), + _nmethod_age(INT_MAX) #ifdef TIERED , _rate(0), _prev_time(0), @@ -107,6 +109,8 @@ void deallocate_contents(ClassLoaderData* loader_data) {} DEBUG_ONLY(bool on_stack() { return false; }) // for template + Method* method() const { return _method; } + static int size() { return sizeof(MethodCounters) / wordSize; } bool is_klass() const { return false; } --- old/src/share/vm/opto/output.cpp 2016-10-31 17:47:02.000000000 -0700 +++ new/src/share/vm/opto/output.cpp 2016-10-31 17:47:02.000000000 -0700 @@ -292,6 +292,10 @@ if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) { stub_size += CompiledStaticCall::to_interp_stub_size(); reloc_size += CompiledStaticCall::reloc_to_interp_stub(); +#if INCLUDE_AOT + stub_size += CompiledStaticCall::to_aot_stub_size(); + reloc_size += CompiledStaticCall::reloc_to_aot_stub(); +#endif } } else if (mach->is_MachSafePoint()) { // If call/safepoint are adjacent, account for possible --- old/src/share/vm/prims/jvmtiExport.cpp 2016-10-31 17:47:02.000000000 -0700 +++ new/src/share/vm/prims/jvmtiExport.cpp 2016-10-31 17:47:02.000000000 -0700 @@ -587,6 +587,7 @@ JvmtiThreadState * _state; KlassHandle * _h_class_being_redefined; JvmtiClassLoadKind _load_kind; + bool _has_been_modified; public: inline JvmtiClassFileLoadHookPoster(Symbol* h_name, Handle class_loader, @@ -603,6 +604,7 @@ _curr_data = *data_ptr; _curr_env = NULL; _cached_class_file_ptr = cache_ptr; + _has_been_modified = false; _state = _thread->jvmti_thread_state(); if (_state != NULL) { @@ -641,6 +643,8 @@ copy_modified_data(); } + bool has_been_modified() { return _has_been_modified; } + private: void post_all_envs() { if (_load_kind != jvmti_class_load_kind_retransform) { @@ -687,6 +691,7 @@ } if (new_data != NULL) { // this agent has modified class data. + _has_been_modified = true; if (caching_needed && *_cached_class_file_ptr == NULL) { // data has been changed by the new retransformable agent // and it hasn't already been cached, cache it @@ -734,14 +739,14 @@ bool JvmtiExport::_should_post_class_file_load_hook = false; // this entry is for class file load hook on class load, redefine and retransform -void JvmtiExport::post_class_file_load_hook(Symbol* h_name, +bool JvmtiExport::post_class_file_load_hook(Symbol* h_name, Handle class_loader, Handle h_protection_domain, unsigned char **data_ptr, unsigned char **end_ptr, JvmtiCachedClassFileData **cache_ptr) { if (JvmtiEnv::get_phase() < JVMTI_PHASE_PRIMORDIAL) { - return; + return false; } JvmtiClassFileLoadHookPoster poster(h_name, class_loader, @@ -749,6 +754,7 @@ data_ptr, end_ptr, cache_ptr); poster.post(); + return poster.has_been_modified(); } void JvmtiExport::report_unsupported(bool on) { --- old/src/share/vm/prims/jvmtiExport.hpp 2016-10-31 17:47:03.000000000 -0700 +++ new/src/share/vm/prims/jvmtiExport.hpp 2016-10-31 17:47:03.000000000 -0700 @@ -326,10 +326,11 @@ JVMTI_ONLY(return _should_post_class_file_load_hook); NOT_JVMTI(return false;) } - static void post_class_file_load_hook(Symbol* h_name, Handle class_loader, + // Return true if the class was modified by the hook. + static bool post_class_file_load_hook(Symbol* h_name, Handle class_loader, Handle h_protection_domain, unsigned char **data_ptr, unsigned char **end_ptr, - JvmtiCachedClassFileData **cache_ptr) NOT_JVMTI_RETURN; + JvmtiCachedClassFileData **cache_ptr) NOT_JVMTI_RETURN_(false); static void post_native_method_bind(Method* method, address* function_ptr) NOT_JVMTI_RETURN; static void post_compiled_method_load(nmethod *nm) NOT_JVMTI_RETURN; static void post_dynamic_code_generated(const char *name, const void *code_begin, const void *code_end) NOT_JVMTI_RETURN; --- old/src/share/vm/prims/whitebox.cpp 2016-10-31 17:47:04.000000000 -0700 +++ new/src/share/vm/prims/whitebox.cpp 2016-10-31 17:47:03.000000000 -0700 @@ -1172,6 +1172,9 @@ int WhiteBox::get_blob_type(const CodeBlob* code) { guarantee(WhiteBoxAPI, "internal testing API :: WhiteBox has to be enabled"); + if (code->is_aot()) { + return -1; + } return CodeCache::get_code_heap(code)->code_blob_type(); } @@ -1227,7 +1230,8 @@ if (code == NULL) { return result; } - int insts_size = code->insts_size(); + int comp_level = code->comp_level(); + int insts_size = comp_level == CompLevel_aot ? code->code_end() - code->code_begin() : code->insts_size(); ThreadToNativeFromVM ttn(thread); jclass clazz = env->FindClass(vmSymbols::java_lang_Object()->as_C_string()); @@ -1242,7 +1246,7 @@ CHECK_JNI_EXCEPTION_(env, NULL); env->SetObjectArrayElement(result, 0, codeBlob); - jobject level = integerBox(thread, env, code->comp_level()); + jobject level = integerBox(thread, env, comp_level); CHECK_JNI_EXCEPTION_(env, NULL); env->SetObjectArrayElement(result, 1, level); --- old/src/share/vm/runtime/advancedThresholdPolicy.cpp 2016-10-31 17:47:04.000000000 -0700 +++ new/src/share/vm/runtime/advancedThresholdPolicy.cpp 2016-10-31 17:47:04.000000000 -0700 @@ -24,7 +24,6 @@ #include "precompiled.hpp" #include "code/codeCache.hpp" -#include "compiler/compileTask.hpp" #include "runtime/advancedThresholdPolicy.hpp" #include "runtime/simpleThresholdPolicy.inline.hpp" #if INCLUDE_JVMCI @@ -206,7 +205,6 @@ if (PrintTieredEvents) { print_event(REMOVE_FROM_QUEUE, method, method, task->osr_bci(), (CompLevel)task->comp_level()); } - task->log_task_dequeued("stale"); compile_queue->remove_and_mark_stale(task); method->clear_queued_for_compilation(); task = next_task; @@ -276,6 +274,10 @@ // the threshold values double. bool AdvancedThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { + case CompLevel_aot: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return loop_predicate_helper(i, b, k, method); + } case CompLevel_none: case CompLevel_limited_profile: { double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); @@ -292,6 +294,10 @@ bool AdvancedThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { + case CompLevel_aot: { + double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); + return call_predicate_helper(i, b, k, method); + } case CompLevel_none: case CompLevel_limited_profile: { double k = threshold_scale(CompLevel_full_profile, Tier3LoadFeedback); @@ -394,31 +400,49 @@ next_level = CompLevel_simple; } else { switch(cur_level) { + case CompLevel_aot: { + // If we were at full profile level, would we switch to full opt? + if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { + next_level = CompLevel_full_optimization; + } else if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } + } + break; case CompLevel_none: // If we were at full profile level, would we switch to full opt? if (common(p, method, CompLevel_full_profile, disable_feedback) == CompLevel_full_optimization) { next_level = CompLevel_full_optimization; } else if ((this->*p)(i, b, cur_level, method)) { #if INCLUDE_JVMCI - if (UseJVMCICompiler) { + if (EnableJVMCI && UseJVMCICompiler) { // Since JVMCI takes a while to warm up, its queue inevitably backs up during - // early VM execution. + // early VM execution. As of 2014-06-13, JVMCI's inliner assumes that the root + // compilation method and all potential inlinees have mature profiles (which + // includes type profiling). If it sees immature profiles, JVMCI's inliner + // can perform pathologically bad (e.g., causing OutOfMemoryErrors due to + // exploring/inlining too many graphs). Since a rewrite of the inliner is + // in progress, we simply disable the dialing back heuristic for now and will + // revisit this decision once the new inliner is completed. next_level = CompLevel_full_profile; - break; - } + } else #endif - // C1-generated fully profiled code is about 30% slower than the limited profile - // code that has only invocation and backedge counters. The observation is that - // if C2 queue is large enough we can spend too much time in the fully profiled code - // while waiting for C2 to pick the method from the queue. To alleviate this problem - // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long - // we choose to compile a limited profiled version and then recompile with full profiling - // when the load on C2 goes down. - if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > - Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { - next_level = CompLevel_limited_profile; - } else { - next_level = CompLevel_full_profile; + { + // C1-generated fully profiled code is about 30% slower than the limited profile + // code that has only invocation and backedge counters. The observation is that + // if C2 queue is large enough we can spend too much time in the fully profiled code + // while waiting for C2 to pick the method from the queue. To alleviate this problem + // we introduce a feedback on the C2 queue size. If the C2 queue is sufficiently long + // we choose to compile a limited profiled version and then recompile with full profiling + // when the load on C2 goes down. + if (!disable_feedback && CompileBroker::queue_size(CompLevel_full_optimization) > + Tier3DelayOn * compiler_count(CompLevel_full_optimization)) { + next_level = CompLevel_limited_profile; + } else { + next_level = CompLevel_full_profile; + } } } break; @@ -438,6 +462,13 @@ } else { next_level = CompLevel_full_optimization; } + } else { + // If there is no MDO we need to profile + if (disable_feedback || (CompileBroker::queue_size(CompLevel_full_optimization) <= + Tier3DelayOff * compiler_count(CompLevel_full_optimization) && + (this->*p)(i, b, cur_level, method))) { + next_level = CompLevel_full_profile; + } } } break; @@ -514,15 +545,39 @@ CompileBroker::compile_method(mh, bci, level, mh, hot_count, CompileTask::Reason_Tiered, thread); } +bool AdvancedThresholdPolicy::maybe_switch_to_aot(methodHandle mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread) { + if (UseAOT && !delay_compilation_during_startup()) { + if (cur_level == CompLevel_full_profile || cur_level == CompLevel_none) { + // If the current level is full profile or interpreter and we're switching to any other level, + // activate the AOT code back first so that we won't waste time overprofiling. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Fall through for JIT compilation. + } + if (next_level == CompLevel_limited_profile && cur_level != CompLevel_aot && mh->has_aot_code()) { + // If the next level is limited profile, use the aot code (if there is any), + // since it's essentially the same thing. + compile(mh, InvocationEntryBci, CompLevel_aot, thread); + // Not need to JIT, we're done. + return true; + } + } + return false; +} + + // Handle the invocation event. void AdvancedThresholdPolicy::method_invocation_event(const methodHandle& mh, const methodHandle& imh, CompLevel level, CompiledMethod* nm, JavaThread* thread) { if (should_create_mdo(mh(), level)) { create_mdo(mh, thread); } - if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { - CompLevel next_level = call_event(mh(), level, thread); - if (next_level != level) { + CompLevel next_level = call_event(mh(), level, thread); + if (next_level != level) { + if (maybe_switch_to_aot(mh, level, next_level, thread)) { + // No JITting necessary + return; + } + if (is_compilation_enabled() && !CompileBroker::compilation_is_in_queue(mh)) { compile(mh, InvocationEntryBci, next_level, thread); } } @@ -552,46 +607,56 @@ // enough calls. CompLevel cur_level, next_level; if (mh() != imh()) { // If there is an enclosing method - guarantee(nm != NULL, "Should have nmethod here"); - cur_level = comp_level(mh()); - next_level = call_event(mh(), cur_level, thread); - - if (max_osr_level == CompLevel_full_optimization) { - // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts - bool make_not_entrant = false; - if (nm->is_osr_method()) { - // This is an osr method, just make it not entrant and recompile later if needed - make_not_entrant = true; - } else { - if (next_level != CompLevel_full_optimization) { - // next_level is not full opt, so we need to recompile the - // enclosing method without the inlinee - cur_level = CompLevel_none; + if (level == CompLevel_aot) { + // Recompile the enclosing method to prevent infinite OSRs. Stay at AOT level while it's compiling. + if (max_osr_level != CompLevel_none && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, MIN2((CompLevel)TieredStopAtLevel, CompLevel_full_profile), thread); + } + } else { + // Current loop event level is not AOT + guarantee(nm != NULL, "Should have nmethod here"); + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + + if (max_osr_level == CompLevel_full_optimization) { + // The inlinee OSRed to full opt, we need to modify the enclosing method to avoid deopts + bool make_not_entrant = false; + if (nm->is_osr_method()) { + // This is an osr method, just make it not entrant and recompile later if needed make_not_entrant = true; + } else { + if (next_level != CompLevel_full_optimization) { + // next_level is not full opt, so we need to recompile the + // enclosing method without the inlinee + cur_level = CompLevel_none; + make_not_entrant = true; + } } - } - if (make_not_entrant) { - if (PrintTieredEvents) { - int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; - print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); + if (make_not_entrant) { + if (PrintTieredEvents) { + int osr_bci = nm->is_osr_method() ? nm->osr_entry_bci() : InvocationEntryBci; + print_event(MAKE_NOT_ENTRANT, mh(), mh(), osr_bci, level); + } + nm->make_not_entrant(); } - nm->make_not_entrant(); } - } - if (!CompileBroker::compilation_is_in_queue(mh)) { // Fix up next_level if necessary to avoid deopts if (next_level == CompLevel_limited_profile && max_osr_level == CompLevel_full_profile) { next_level = CompLevel_full_profile; } if (cur_level != next_level) { - compile(mh, InvocationEntryBci, next_level, thread); + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } } } } else { - cur_level = comp_level(imh()); - next_level = call_event(imh(), cur_level, thread); - if (!CompileBroker::compilation_is_in_queue(imh) && (next_level != cur_level)) { - compile(imh, InvocationEntryBci, next_level, thread); + cur_level = comp_level(mh()); + next_level = call_event(mh(), cur_level, thread); + if (next_level != cur_level) { + if (!maybe_switch_to_aot(mh, cur_level, next_level, thread) && !CompileBroker::compilation_is_in_queue(mh)) { + compile(mh, InvocationEntryBci, next_level, thread); + } } } } --- old/src/share/vm/runtime/advancedThresholdPolicy.hpp 2016-10-31 17:47:05.000000000 -0700 +++ new/src/share/vm/runtime/advancedThresholdPolicy.hpp 2016-10-31 17:47:05.000000000 -0700 @@ -205,6 +205,8 @@ double _increase_threshold_at_ratio; + bool maybe_switch_to_aot(methodHandle mh, CompLevel cur_level, CompLevel next_level, JavaThread* thread); + protected: void print_specific(EventType type, methodHandle mh, methodHandle imh, int bci, CompLevel level); --- old/src/share/vm/runtime/frame.cpp 2016-10-31 17:47:06.000000000 -0700 +++ new/src/share/vm/runtime/frame.cpp 2016-10-31 17:47:05.000000000 -0700 @@ -644,6 +644,7 @@ // // First letter indicates type of the frame: // J: Java frame (compiled) +// A: Java frame (aot compiled) // j: Java frame (interpreted) // V: VM frame (C/C++) // v: Other frames running VM generated code (e.g. stubs, adapters, etc.) @@ -683,7 +684,9 @@ CompiledMethod* cm = (CompiledMethod*)_cb; Method* m = cm->method(); if (m != NULL) { - if (cm->is_nmethod()) { + if (cm->is_aot()) { + st->print("A %d ", cm->compile_id()); + } else if (cm->is_nmethod()) { nmethod* nm = cm->as_nmethod(); st->print("J %d%s", nm->compile_id(), (nm->is_osr_method() ? "%" : "")); st->print(" %s", nm->compiler_name()); @@ -1262,8 +1265,10 @@ // For now just label the frame CompiledMethod* cm = (CompiledMethod*)cb(); values.describe(-1, info_address, - FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s", frame_no, - p2i(cm), cm->method()->name_and_sig_as_C_string(), + FormatBuffer<1024>("#%d nmethod " INTPTR_FORMAT " for method %s%s%s", frame_no, + p2i(cm), + (cm->is_aot() ? "A ": "J "), + cm->method()->name_and_sig_as_C_string(), (_deopt_state == is_deoptimized) ? " (deoptimized)" : ((_deopt_state == unknown) ? " (state unknown)" : "")), --- old/src/share/vm/runtime/globals.hpp 2016-10-31 17:47:06.000000000 -0700 +++ new/src/share/vm/runtime/globals.hpp 2016-10-31 17:47:06.000000000 -0700 @@ -3387,6 +3387,22 @@ "Non-segmented code cache: X[%] of the total code cache") \ range(0, 100) \ \ + /* AOT parameters */ \ + product(bool, UseAOT, AOT_ONLY(true) NOT_AOT(false), \ + "Use AOT compiled files") \ + \ + product(ccstrlist, AOTLibrary, NULL, \ + "AOT library") \ + \ + product(bool, PrintAOT, false, \ + "Print used AOT klasses and methods") \ + \ + notproduct(bool, PrintAOTStatistics, false, \ + "Print AOT statistics") \ + \ + diagnostic(bool, UseAOTStrictLoading, false, \ + "Exit the VM if any of the AOT libraries has invalid config") \ + \ /* interpreter debugging */ \ develop(intx, BinarySwitchThreshold, 5, \ "Minimal number of lookupswitch entries for rewriting to binary " \ @@ -3659,6 +3675,21 @@ "Back edge threshold at which tier 3 OSR compilation is invoked") \ range(0, max_jint) \ \ + product(intx, Tier3AOTInvocationThreshold, 10000, \ + "Compile if number of method invocations crosses this " \ + "threshold if coming from AOT") \ + \ + product(intx, Tier3AOTMinInvocationThreshold, 1000, \ + "Minimum invocation to compile at tier 3 if coming from AOT") \ + \ + product(intx, Tier3AOTCompileThreshold, 15000, \ + "Threshold at which tier 3 compilation is invoked (invocation " \ + "minimum must be satisfied) if coming from AOT") \ + \ + product(intx, Tier3AOTBackEdgeThreshold, 120000, \ + "Back edge threshold at which tier 3 OSR compilation is invoked " \ + "if coming from AOT") \ + \ product(intx, Tier4InvocationThreshold, 5000, \ "Compile if number of method invocations crosses this " \ "threshold") \ --- old/src/share/vm/runtime/java.cpp 2016-10-31 17:47:07.000000000 -0700 +++ new/src/share/vm/runtime/java.cpp 2016-10-31 17:47:07.000000000 -0700 @@ -87,6 +87,7 @@ #include "opto/indexSet.hpp" #include "opto/runtime.hpp" #endif +#include "aot/aotLoader.hpp" GrowableArray* collected_profiled_methods; @@ -280,6 +281,10 @@ #endif #endif + if (PrintAOTStatistics) { + AOTLoader::print_statistics(); + } + if (PrintNMethodStatistics) { nmethod::print_statistics(); } --- old/src/share/vm/runtime/sharedRuntime.cpp 2016-10-31 17:47:08.000000000 -0700 +++ new/src/share/vm/runtime/sharedRuntime.cpp 2016-10-31 17:47:07.000000000 -0700 @@ -44,6 +44,7 @@ #include "oops/klass.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" +#include "aot/aotLoader.hpp" #include "prims/forte.hpp" #include "prims/jvmtiExport.hpp" #include "prims/methodHandles.hpp" @@ -78,6 +79,7 @@ RuntimeStub* SharedRuntime::_resolve_opt_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_virtual_call_blob; RuntimeStub* SharedRuntime::_resolve_static_call_blob; +address SharedRuntime::_resolve_static_call_entry; DeoptimizationBlob* SharedRuntime::_deopt_blob; SafepointBlob* SharedRuntime::_polling_page_vectors_safepoint_handler_blob; @@ -97,6 +99,7 @@ _resolve_opt_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_opt_virtual_call_C), "resolve_opt_virtual_call"); _resolve_virtual_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_virtual_call_C), "resolve_virtual_call"); _resolve_static_call_blob = generate_resolve_blob(CAST_FROM_FN_PTR(address, SharedRuntime::resolve_static_call_C), "resolve_static_call"); + _resolve_static_call_entry = _resolve_static_call_blob->entry_point(); #if defined(COMPILER2) || INCLUDE_JVMCI // Vectors are generated only by C2 and JVMCI. @@ -475,7 +478,7 @@ // JVMCI's ExceptionHandlerStub expects the thread local exception PC to be clear // and other exception handler continuations do not read it thread->set_exception_pc(NULL); -#endif +#endif // INCLUDE_JVMCI // The fastest case first CodeBlob* blob = CodeCache::find_blob(return_address); @@ -512,6 +515,16 @@ return Interpreter::rethrow_exception_entry(); } +#if INCLUDE_AOT + // AOT Compiled code + if (UseAOT && AOTLoader::contains(return_address)) { + AOTCompiledMethod* aotm = AOTLoader::find_aot((address) return_address); + // Set flag if return address is a method handle call site. + thread->set_is_method_handle_return(aotm->is_method_handle_return(return_address)); + return aotm->exception_begin(); + } +#endif + guarantee(blob == NULL || !blob->is_runtime_stub(), "caller should have skipped stub"); guarantee(!VtableStubs::contains(return_address), "NULL exceptions in vtables should have been handled already!"); @@ -988,17 +1001,12 @@ } JRT_ENTRY_NO_ASYNC(void, SharedRuntime::register_finalizer(JavaThread* thread, oopDesc* obj)) - assert(obj->is_oop(), "must be a valid oop"); #if INCLUDE_JVMCI - // This removes the requirement for JVMCI compilers to emit code - // performing a dynamic check that obj has a finalizer before - // calling this routine. There should be no performance impact - // for C1 since it emits a dynamic check. C2 and the interpreter - // uses other runtime routines for registering finalizers. if (!obj->klass()->has_finalizer()) { return; } #endif // INCLUDE_JVMCI + assert(obj->is_oop(), "must be a valid oop"); assert(obj->klass()->has_finalizer(), "shouldn't be here otherwise"); InstanceKlass::register_finalizer(instanceOop(obj), CHECK); JRT_END @@ -1225,7 +1233,6 @@ assert(fr.is_entry_frame(), "must be"); // fr is now pointing to the entry frame. callee_method = methodHandle(THREAD, fr.entry_frame_call_wrapper()->callee_method()); - assert(fr.entry_frame_call_wrapper()->receiver() == NULL || !callee_method->is_static(), "non-null receiver for static call??"); } else { Bytecodes::Code bc; CallInfo callinfo; @@ -1354,16 +1361,18 @@ address dest_entry_point = callee == NULL ? 0 : callee->entry_point(); // used below #endif + bool is_nmethod = caller_nm->is_nmethod(); + if (is_virtual) { assert(receiver.not_null() || invoke_code == Bytecodes::_invokehandle, "sanity check"); bool static_bound = call_info.resolved_method()->can_be_statically_bound(); KlassHandle h_klass(THREAD, invoke_code == Bytecodes::_invokehandle ? NULL : receiver->klass()); CompiledIC::compute_monomorphic_entry(callee_method, h_klass, - is_optimized, static_bound, virtual_call_info, + is_optimized, static_bound, is_nmethod, virtual_call_info, CHECK_(methodHandle())); } else { // static call - CompiledStaticCall::compute_entry(callee_method, static_call_info); + CompiledStaticCall::compute_entry(callee_method, is_nmethod, static_call_info); } // grab lock, check for deoptimization and potentially patch caller @@ -1394,7 +1403,7 @@ inline_cache->set_to_monomorphic(virtual_call_info); } } else { - CompiledStaticCall* ssc = compiledStaticCall_before(caller_frame.pc()); + CompiledStaticCall* ssc = caller_nm->compiledStaticCall_before(caller_frame.pc()); if (ssc->is_clean()) ssc->set(static_call_info); } } @@ -1510,6 +1519,7 @@ JRT_END + methodHandle SharedRuntime::handle_ic_miss_helper(JavaThread *thread, TRAPS) { ResourceMark rm(thread); CallInfo call_info; @@ -1622,7 +1632,7 @@ inline_cache->compute_monomorphic_entry(callee_method, receiver_klass, inline_cache->is_optimized(), - false, + false, caller_nm->is_nmethod(), info, CHECK_(methodHandle())); inline_cache->set_to_monomorphic(info); } else if (!inline_cache->is_megamorphic() && !inline_cache->is_clean()) { @@ -1691,10 +1701,7 @@ // busy patching it. MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); // Location of call instruction - if (NativeCall::is_call_before(pc)) { - NativeCall *ncall = nativeCall_before(pc); - call_addr = ncall->instruction_address(); - } + call_addr = caller_nm->call_instruction_address(pc); } // Make sure nmethod doesn't get deoptimized and removed until // this is done with it. @@ -1724,9 +1731,10 @@ // to a wrong method). It should not be performance critical, since the // resolve is only done once. + bool is_nmethod = caller_nm->is_nmethod(); MutexLocker ml(CompiledIC_lock); if (is_static_call) { - CompiledStaticCall* ssc= compiledStaticCall_at(call_addr); + CompiledStaticCall* ssc = caller_nm->compiledStaticCall_at(call_addr); ssc->set_to_clean(); } else { // compiled, dispatched call (which used to call an interpreted method) @@ -1793,6 +1801,37 @@ } #endif +bool SharedRuntime::should_fixup_call_destination(address destination, address entry_point, address caller_pc, Method* moop, CodeBlob* cb) { + if (destination != entry_point) { + CodeBlob* callee = CodeCache::find_blob(destination); + // callee == cb seems weird. It means calling interpreter thru stub. + if (callee == cb || callee->is_adapter_blob()) { + // static call or optimized virtual + if (TraceCallFixup) { + tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + return true; + } else { + if (TraceCallFixup) { + tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + // assert is too strong could also be resolve destinations. + // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); + } + } else { + if (TraceCallFixup) { + tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); + moop->print_short_name(tty); + tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); + } + } + return false; +} + // --------------------------------------------------------------------------- // We are calling the interpreter via a c2i. Normally this would mean that // we were called by a compiled method. However we could have lost a race @@ -1842,7 +1881,8 @@ // Expect to find a native call there (unless it was no-inline cache vtable dispatch) MutexLockerEx ml_patch(Patching_lock, Mutex::_no_safepoint_check_flag); if (NativeCall::is_call_before(return_pc)) { - NativeCall *call = nativeCall_before(return_pc); + ResourceMark mark; + NativeCallWrapper* call = nm->call_wrapper_before(return_pc); // // bug 6281185. We might get here after resolving a call site to a vanilla // virtual call. Because the resolvee uses the verified entry it may then @@ -1863,32 +1903,8 @@ return; } address destination = call->destination(); - if (destination != entry_point) { - CodeBlob* callee = CodeCache::find_blob(destination); - // callee == cb seems weird. It means calling interpreter thru stub. - if (callee == cb || callee->is_adapter_blob()) { - // static call or optimized virtual - if (TraceCallFixup) { - tty->print("fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); - moop->print_short_name(tty); - tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); - } - call->set_destination_mt_safe(entry_point); - } else { - if (TraceCallFixup) { - tty->print("failed to fixup callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); - moop->print_short_name(tty); - tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); - } - // assert is too strong could also be resolve destinations. - // assert(InlineCacheBuffer::contains(destination) || VtableStubs::contains(destination), "must be"); - } - } else { - if (TraceCallFixup) { - tty->print("already patched callsite at " INTPTR_FORMAT " to compiled code for", p2i(caller_pc)); - moop->print_short_name(tty); - tty->print_cr(" to " INTPTR_FORMAT, p2i(entry_point)); - } + if (should_fixup_call_destination(destination, entry_point, caller_pc, moop, cb)) { + call->set_destination_mt_safe(entry_point); } } } --- old/src/share/vm/runtime/sharedRuntime.hpp 2016-10-31 17:47:08.000000000 -0700 +++ new/src/share/vm/runtime/sharedRuntime.hpp 2016-10-31 17:47:08.000000000 -0700 @@ -60,6 +60,7 @@ static RuntimeStub* _resolve_opt_virtual_call_blob; static RuntimeStub* _resolve_virtual_call_blob; static RuntimeStub* _resolve_static_call_blob; + static address _resolve_static_call_entry; static DeoptimizationBlob* _deopt_blob; --- old/src/share/vm/runtime/simpleThresholdPolicy.cpp 2016-10-31 17:47:09.000000000 -0700 +++ new/src/share/vm/runtime/simpleThresholdPolicy.cpp 2016-10-31 17:47:09.000000000 -0700 @@ -33,6 +33,7 @@ #include "jvmci/jvmciRuntime.hpp" #endif +#ifdef TIERED void SimpleThresholdPolicy::print_counters(const char* prefix, methodHandle mh) { int invocation_count = mh->invocation_count(); @@ -242,6 +243,23 @@ if (level == CompLevel_none) { return; } + if (level == CompLevel_aot) { + if (mh->has_aot_code()) { + if (PrintTieredEvents) { + print_event(COMPILE, mh, mh, bci, level); + } + MutexLocker ml(Compile_lock); + NoSafepointVerifier nsv; + if (mh->has_aot_code() && mh->code() != mh->aot_code()) { + mh->aot_code()->make_entrant(); + if (mh->has_compiled_code()) { + mh->code()->make_not_entrant(); + } + Method::set_code(mh, mh->aot_code()); + } + } + return; + } // Check if the method can be compiled. If it cannot be compiled with C1, continue profiling // in the interpreter and then compile with C2 (the transition function will request that, @@ -275,6 +293,9 @@ // are passed to common() transition function). bool SimpleThresholdPolicy::loop_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { + case CompLevel_aot: { + return loop_predicate_helper(i, b, 1.0, method); + } case CompLevel_none: case CompLevel_limited_profile: { return loop_predicate_helper(i, b, 1.0, method); @@ -289,6 +310,9 @@ bool SimpleThresholdPolicy::call_predicate(int i, int b, CompLevel cur_level, Method* method) { switch(cur_level) { + case CompLevel_aot: { + return call_predicate_helper(i, b, 1.0, method); + } case CompLevel_none: case CompLevel_limited_profile: { return call_predicate_helper(i, b, 1.0, method); @@ -321,10 +345,16 @@ int i = method->invocation_count(); int b = method->backedge_count(); - if (is_trivial(method)) { + if (is_trivial(method) && cur_level != CompLevel_aot) { next_level = CompLevel_simple; } else { switch(cur_level) { + case CompLevel_aot: { + if ((this->*p)(i, b, cur_level, method)) { + next_level = CompLevel_full_profile; + } + } + break; case CompLevel_none: // If we were at full profile level, would we switch to full opt? if (common(p, method, CompLevel_full_profile) == CompLevel_full_optimization) { @@ -438,3 +468,5 @@ } } } + +#endif --- old/src/share/vm/runtime/simpleThresholdPolicy.hpp 2016-10-31 17:47:09.000000000 -0700 +++ new/src/share/vm/runtime/simpleThresholdPolicy.hpp 2016-10-31 17:47:09.000000000 -0700 @@ -30,6 +30,8 @@ #include "runtime/compilationPolicy.hpp" #include "utilities/globalDefinitions.hpp" +#ifdef TIERED + class CompileTask; class CompileQueue; @@ -118,4 +120,6 @@ } }; +#endif // TIERED + #endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_HPP --- old/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp 2016-10-31 17:47:10.000000000 -0700 +++ new/src/share/vm/runtime/simpleThresholdPolicy.inline.hpp 2016-10-31 17:47:10.000000000 -0700 @@ -27,6 +27,8 @@ #include "compiler/compilerOracle.hpp" +#ifdef TIERED + template bool SimpleThresholdPolicy::call_predicate_helper(int i, int b, double scale, Method* method) { double threshold_scaling; @@ -34,6 +36,9 @@ scale *= threshold_scaling; } switch(level) { + case CompLevel_aot: + return (i >= Tier3AOTInvocationThreshold * scale) || + (i >= Tier3AOTMinInvocationThreshold * scale && i + b >= Tier3AOTCompileThreshold * scale); case CompLevel_none: case CompLevel_limited_profile: return (i >= Tier3InvocationThreshold * scale) || @@ -52,6 +57,8 @@ scale *= threshold_scaling; } switch(level) { + case CompLevel_aot: + return b >= Tier3AOTBackEdgeThreshold * scale; case CompLevel_none: case CompLevel_limited_profile: return b >= Tier3BackEdgeThreshold * scale; @@ -87,4 +94,6 @@ return false; } +#endif // TIERED + #endif // SHARE_VM_RUNTIME_SIMPLETHRESHOLDPOLICY_INLINE_HPP --- old/src/share/vm/runtime/sweeper.cpp 2016-10-31 17:47:11.000000000 -0700 +++ new/src/share/vm/runtime/sweeper.cpp 2016-10-31 17:47:11.000000000 -0700 @@ -213,6 +213,8 @@ if (_current.method() != NULL) { if (_current.method()->is_nmethod()) { assert(CodeCache::find_blob_unsafe(_current.method()) == _current.method(), "Sweeper nmethod cached state invalid"); + } else if (_current.method()->is_aot()) { + assert(CodeCache::find_blob_unsafe(_current.method()->code_begin()) == _current.method(), "Sweeper AOT method cached state invalid"); } else { ShouldNotReachHere(); } @@ -570,7 +572,7 @@ RelocIterator iter(nm); while (iter.next()) { if (iter.type() == relocInfo::virtual_call_type) { - CompiledIC::cleanup_call_site(iter.virtual_call_reloc()); + CompiledIC::cleanup_call_site(iter.virtual_call_reloc(), nm); } } } --- old/src/share/vm/runtime/vframe_hp.cpp 2016-10-31 17:47:11.000000000 -0700 +++ new/src/share/vm/runtime/vframe_hp.cpp 2016-10-31 17:47:11.000000000 -0700 @@ -198,7 +198,7 @@ if (scope() == NULL) { CompiledMethod* nm = code(); Method* method = nm->method(); - assert(method->is_native(), ""); + assert(method->is_native() || nm->is_aot(), "Expect a native method or precompiled method"); if (!method->is_synchronized()) { return new GrowableArray(0); } --- old/src/share/vm/runtime/vmStructs.cpp 2016-10-31 17:47:12.000000000 -0700 +++ new/src/share/vm/runtime/vmStructs.cpp 2016-10-31 17:47:12.000000000 -0700 @@ -2537,6 +2537,24 @@ declare_constant(InstanceKlass::fully_initialized) \ declare_constant(InstanceKlass::initialization_error) \ \ + /***************************************/ \ + /* InstanceKlass enums for _misc_flags */ \ + /***************************************/ \ + \ + declare_constant(InstanceKlass::_misc_rewritten) \ + declare_constant(InstanceKlass::_misc_has_nonstatic_fields) \ + declare_constant(InstanceKlass::_misc_should_verify_class) \ + declare_constant(InstanceKlass::_misc_is_anonymous) \ + declare_constant(InstanceKlass::_misc_is_contended) \ + declare_constant(InstanceKlass::_misc_has_nonstatic_concrete_methods) \ + declare_constant(InstanceKlass::_misc_declares_nonstatic_concrete_methods)\ + declare_constant(InstanceKlass::_misc_has_been_redefined) \ + declare_constant(InstanceKlass::_misc_has_passed_fingerprint_check) \ + declare_constant(InstanceKlass::_misc_is_scratch_class) \ + declare_constant(InstanceKlass::_misc_is_shared_boot_class) \ + declare_constant(InstanceKlass::_misc_is_shared_platform_class) \ + declare_constant(InstanceKlass::_misc_is_shared_app_class) \ + \ /*********************************/ \ /* Symbol* - symbol max length */ \ /*********************************/ \ @@ -2685,6 +2703,7 @@ declare_constant(CompLevel_limited_profile) \ declare_constant(CompLevel_full_profile) \ declare_constant(CompLevel_full_optimization) \ + declare_constant(CompLevel_aot) \ \ /***************/ \ /* OopMapValue */ \ --- old/src/share/vm/runtime/vm_version.cpp 2016-10-31 17:47:13.000000000 -0700 +++ new/src/share/vm/runtime/vm_version.cpp 2016-10-31 17:47:13.000000000 -0700 @@ -134,7 +134,19 @@ case Arguments::_int: return UseSharedSpaces ? "interpreted mode, sharing" : "interpreted mode"; case Arguments::_mixed: - return UseSharedSpaces ? "mixed mode, sharing" : "mixed mode"; + if (UseSharedSpaces) { + if (UseAOT) { + return "mixed mode, aot, sharing"; + } else { + return "mixed mode, sharing"; + } + } else { + if (UseAOT) { + return "mixed mode, aot"; + } else { + return "mixed mode"; + } + } case Arguments::_comp: return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode"; }; --- old/src/share/vm/utilities/debug.cpp 2016-10-31 17:47:13.000000000 -0700 +++ new/src/share/vm/utilities/debug.cpp 2016-10-31 17:47:13.000000000 -0700 @@ -771,7 +771,7 @@ // see if it's a valid frame if (fr.pc()) { - st->print_cr("Native frames: (J=compiled Java code, j=interpreted, Vv=VM code, C=native code)"); + st->print_cr("Native frames: (J=compiled Java code, A=aot compiled Java code, j=interpreted, Vv=VM code, C=native code)"); int count = 0; while (count++ < StackPrintLimit) { --- old/src/share/vm/utilities/growableArray.hpp 2016-10-31 17:47:14.000000000 -0700 +++ new/src/share/vm/utilities/growableArray.hpp 2016-10-31 17:47:14.000000000 -0700 @@ -503,6 +503,7 @@ } public: + GrowableArrayIterator() : _array(NULL), _position(0) { } GrowableArrayIterator& operator++() { ++_position; return *this; } E operator*() { return _array->at(_position); } --- old/src/share/vm/utilities/macros.hpp 2016-10-31 17:47:15.000000000 -0700 +++ new/src/share/vm/utilities/macros.hpp 2016-10-31 17:47:14.000000000 -0700 @@ -180,6 +180,14 @@ #define INCLUDE_JVMCI 1 #endif +#ifdef INCLUDE_AOT +# if INCLUDE_AOT && !(INCLUDE_JVMCI) +# error "Must have JVMCI for AOT" +# endif +#else +# define INCLUDE_AOT 0 +#endif + #if INCLUDE_JVMCI #define JVMCI_ONLY(code) code #define NOT_JVMCI(code) @@ -190,6 +198,16 @@ #define NOT_JVMCI_RETURN {} #endif // INCLUDE_JVMCI +#if INCLUDE_AOT +#define AOT_ONLY(code) code +#define NOT_AOT(code) +#define NOT_AOT_RETURN /* next token must be ; */ +#else +#define AOT_ONLY(code) +#define NOT_AOT(code) code +#define NOT_AOT_RETURN {} +#endif // INCLUDE_AOT + // COMPILER1 variant #ifdef COMPILER1 #ifdef COMPILER2 --- /dev/null 2016-10-31 17:47:15.000000000 -0700 +++ new/make/CompileTools.gmk 2016-10-31 17:47:15.000000000 -0700 @@ -0,0 +1,160 @@ +# +# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +# This must be the first rule +default: all + +include $(SPEC) +include MakeBase.gmk + +include JavaCompilation.gmk +include SetupJavaCompilers.gmk + +TARGETS := + +# Hook to include the corresponding custom file, if present. +$(eval $(call IncludeCustomExtension, hotspot, CompileTools.gmk)) + +ifeq ($(ENABLE_AOT), true) + VM_CI_SRC_DIR := $(HOTSPOT_TOPDIR)/src/jdk.vm.ci/share/classes + + SRC_DIR := $(HOTSPOT_TOPDIR)/src/jdk.vm.compiler/share/classes + + ############################################################################## + # Compile the annotation processors + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_MATCH_PROCESSOR, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := \ + $(SRC_DIR)/com.oracle.graal.compiler/src \ + $(SRC_DIR)/com.oracle.graal.compiler.common/src \ + $(SRC_DIR)/com.oracle.graal.compiler.match.processor/src \ + $(SRC_DIR)/com.oracle.graal.api.collections/src \ + $(SRC_DIR)/com.oracle.graal.api.replacements/src \ + $(SRC_DIR)/com.oracle.graal.asm/src \ + $(SRC_DIR)/com.oracle.graal.bytecode/src \ + $(SRC_DIR)/com.oracle.graal.code/src \ + $(SRC_DIR)/com.oracle.graal.debug/src \ + $(SRC_DIR)/com.oracle.graal.graph/src \ + $(SRC_DIR)/com.oracle.graal.lir/src \ + $(SRC_DIR)/com.oracle.graal.loop/src \ + $(SRC_DIR)/com.oracle.graal.loop.phases/src \ + $(SRC_DIR)/com.oracle.graal.nodeinfo/src \ + $(SRC_DIR)/com.oracle.graal.nodes/src \ + $(SRC_DIR)/com.oracle.graal.options/src \ + $(SRC_DIR)/com.oracle.graal.phases/src \ + $(SRC_DIR)/com.oracle.graal.phases.common/src \ + $(SRC_DIR)/com.oracle.graal.serviceprovider/src \ + $(SRC_DIR)/com.oracle.graal.virtual/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.runtime/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \ + , \ + EXCLUDE_FILES := $(EXCLUDE_FILES), \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor, \ + JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor.jar, \ + )) + + TARGETS += $(BUILD_VM_COMPILER_MATCH_PROCESSOR) + + ############################################################################## + + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_NODEINFO_PROCESSOR, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := \ + $(SRC_DIR)/com.oracle.graal.nodeinfo/src \ + $(SRC_DIR)/com.oracle.graal.nodeinfo.processor/src \ + , \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor, \ + JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor.jar, \ + )) + + TARGETS += $(BUILD_VM_COMPILER_NODEINFO_PROCESSOR) + + ############################################################################## + + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := \ + $(SRC_DIR)/com.oracle.graal.options/src \ + $(SRC_DIR)/com.oracle.graal.options.processor/src \ + , \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \ + JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar, \ + )) + + TARGETS += $(BUILD_VM_COMPILER_OPTIONS_PROCESSOR) + + ############################################################################## + + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := \ + $(SRC_DIR)/com.oracle.graal.replacements.verifier/src \ + $(SRC_DIR)/com.oracle.graal.api.collections/src \ + $(SRC_DIR)/com.oracle.graal.api.replacements/src \ + $(SRC_DIR)/com.oracle.graal.code/src \ + $(SRC_DIR)/com.oracle.graal.compiler.common/src \ + $(SRC_DIR)/com.oracle.graal.debug/src \ + $(SRC_DIR)/com.oracle.graal.graph/src \ + $(SRC_DIR)/com.oracle.graal.nodeinfo/src \ + $(SRC_DIR)/com.oracle.graal.options/src \ + $(SRC_DIR)/com.oracle.graal.serviceprovider/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.code/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.common/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.meta/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.runtime/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \ + , \ + EXCLUDE_FILES := $(EXCLUDE_FILES), \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier, \ + JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar, \ + )) + + TARGETS += $(BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER) + + ############################################################################## + + $(eval $(call SetupJavaCompilation, BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR, \ + SETUP := GENERATE_OLDBYTECODE, \ + SRC := \ + $(SRC_DIR)/com.oracle.graal.serviceprovider/src \ + $(SRC_DIR)/com.oracle.graal.serviceprovider.processor/src \ + $(VM_CI_SRC_DIR)/jdk.vm.ci.services/src \ + , \ + EXCLUDE_FILES := $(EXCLUDE_FILES), \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor, \ + JAR := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor.jar, \ + )) + + TARGETS += $(BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR) + + ############################################################################## +endif + +all: $(TARGETS) + +.PHONY: all --- /dev/null 2016-10-31 17:47:16.000000000 -0700 +++ new/make/Tools.gmk 2016-10-31 17:47:16.000000000 -0700 @@ -0,0 +1,69 @@ +# +# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +################################################################################ +# This file is meant to be included by makefiles using any of the build tools +# compiled by CompileTools.gmk. It assumes that CompileTools.gmk has already +# been run in a separate top level target. It should contain definitions for +# build tools targets without defining the rules to build them so that usage of +# those tools can define prerequisites to them. + +include JavaCompilation.gmk + +# Hook to include the corresponding custom file, if present. +$(eval $(call IncludeCustomExtension, hotspot, Tools.gmk)) + +ifeq ($(ENABLE_AOT), true) + $(eval $(call SetupJavaCompilationCompileTarget, \ + BUILD_VM_COMPILER_MATCH_PROCESSOR, \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor, \ + SETUP := GENERATE_JDKBYTECODE, \ + )) + + $(eval $(call SetupJavaCompilationCompileTarget, \ + BUILD_VM_COMPILER_NODEINFO_PROCESSOR, \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor, \ + SETUP := GENERATE_JDKBYTECODE, \ + )) + + $(eval $(call SetupJavaCompilationCompileTarget, \ + BUILD_VM_COMPILER_OPTIONS_PROCESSOR, \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor, \ + SETUP := GENERATE_JDKBYTECODE, \ + )) + + $(eval $(call SetupJavaCompilationCompileTarget, \ + BUILD_VM_COMPILER_REPLACEMENTS_VERIFIER, \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier, \ + SETUP := GENERATE_JDKBYTECODE, \ + )) + + $(eval $(call SetupJavaCompilationCompileTarget, \ + BUILD_VM_COMPILER_SERVICEPROVIDER_PROCESSOR, \ + BIN := $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor, \ + SETUP := GENERATE_JDKBYTECODE, \ + )) +endif +################################################################################ --- /dev/null 2016-10-31 17:47:16.000000000 -0700 +++ new/make/gensrc/Gensrc-jdk.vm.compiler.gmk 2016-10-31 17:47:16.000000000 -0700 @@ -0,0 +1,141 @@ +# +# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +default: all + +include $(SPEC) +include MakeBase.gmk +include Tools.gmk + +$(eval $(call IncludeCustomExtension, hotspot, gensrc/Gensrc-jdk.vm.compiler.gmk)) + +GENSRC_DIR := $(SUPPORT_OUTPUTDIR)/gensrc/$(MODULE) +SRC_DIR := $(HOTSPOT_TOPDIR)/src/$(MODULE)/share/classes + +################################################################################ + +PROC_SRC_SUBDIRS := \ + com.oracle.graal.code \ + com.oracle.graal.compiler \ + com.oracle.graal.compiler.aarch64 \ + com.oracle.graal.compiler.amd64 \ + com.oracle.graal.compiler.common \ + com.oracle.graal.compiler.sparc \ + com.oracle.graal.debug \ + com.oracle.graal.hotspot \ + com.oracle.graal.hotspot.aarch64 \ + com.oracle.graal.hotspot.amd64 \ + com.oracle.graal.hotspot.sparc \ + com.oracle.graal.graph \ + com.oracle.graal.java \ + com.oracle.graal.lir \ + com.oracle.graal.lir.amd64 \ + com.oracle.graal.loop \ + com.oracle.graal.loop.phases \ + com.oracle.graal.nodes \ + com.oracle.graal.replacements \ + com.oracle.graal.replacements.aarch64 \ + com.oracle.graal.replacements.amd64 \ + com.oracle.graal.phases \ + com.oracle.graal.phases.common \ + com.oracle.graal.printer \ + com.oracle.graal.virtual \ + # + +PROC_SRC_DIRS := $(patsubst %, $(SRC_DIR)/%/src, $(PROC_SRC_SUBDIRS)) + +PROC_SRCS := $(filter %.java, $(call CacheFind, $(PROC_SRC_DIRS))) + +ALL_SRC_DIRS := $(wildcard $(SRC_DIR)/*/src) +SOURCEPATH := $(call PathList, $(ALL_SRC_DIRS)) + +PROCESSOR_JARS := \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.match.processor.jar \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.nodeinfo.processor.jar \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.options.processor.jar \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.replacements.verifier.jar \ + $(BUILDTOOLS_OUTPUTDIR)/jdk.vm.compiler.serviceprovider.processor.jar \ + # +PROCESSOR_PATH := $(call PathList, $(PROCESSOR_JARS)) + +ADD_EXPORTS := \ + --add-exports jdk.vm.ci/jdk.vm.ci.aarch64=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.amd64=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.code=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.code.site=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.code.stack=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.common=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspot=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspot.aarch64=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspot.amd64=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspot.events=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspot.sparc=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.hotspotvmconfig=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.inittimer=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.meta=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.runtime=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.services=ALL-UNNAMED \ + --add-exports jdk.vm.ci/jdk.vm.ci.sparc=ALL-UNNAMED \ + # + +$(GENSRC_DIR)/_gensrc_proc_done: $(PROC_SRCS) $(PROCESSOR_JARS) + $(call MakeDir, $(@D)) + $(eval $(call ListPathsSafely,PROC_SRCS,$(@D)/_gensrc_proc_files)) + $(JAVA_SMALL) $(NEW_JAVAC) \ + -XDignore.symbol.file \ + --upgrade-module-path $(JDK_OUTPUTDIR)/modules --system none \ + $(ADD_EXPORTS) \ + -sourcepath $(SOURCEPATH) \ + -implicit:none \ + -proc:only \ + -processorpath $(PROCESSOR_PATH) \ + -d $(GENSRC_DIR) \ + -s $(GENSRC_DIR) \ + @$(@D)/_gensrc_proc_files + $(TOUCH) $@ + +TARGETS += $(GENSRC_DIR)/_gensrc_proc_done + +################################################################################ + +$(GENSRC_DIR)/module-info.java.extra: $(GENSRC_DIR)/_gensrc_proc_done + ($(CD) $(GENSRC_DIR)/META-INF/providers && \ + for i in $$($(LS)); do \ + c=$$($(CAT) $$i | $(TR) -d '\n\r'); \ + $(ECHO) "provides $$c with $$i;" >> $@; \ + done); \ + $(ECHO) "uses com.oracle.graal.options.OptionDescriptors;" >> $@; \ + for i in $$($(FIND) $(GENSRC_DIR) -name '*_OptionDescriptors.java'); do \ + c=$$($(ECHO) $$i | $(SED) 's:.*/jdk\.vm\.compiler/\(.*\)\.java:\1:' | $(TR) '/' '.'); \ + $(ECHO) "provides com.oracle.graal.options.OptionDescriptors with $$c;" >> $@; \ + done + +TARGETS += $(GENSRC_DIR)/module-info.java.extra + +################################################################################ + +all: $(TARGETS) + +.PHONY: default all --- /dev/null 2016-10-31 17:47:17.000000000 -0700 +++ new/make/lib/Lib-jdk.aot.gmk 2016-10-31 17:47:17.000000000 -0700 @@ -0,0 +1,53 @@ +# +# Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. +# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. +# +# This code is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License version 2 only, as +# published by the Free Software Foundation. Oracle designates this +# particular file as subject to the "Classpath" exception as provided +# by Oracle in the LICENSE file that accompanied this code. +# +# This code is distributed in the hope that it will be useful, but WITHOUT +# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or +# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License +# version 2 for more details (a copy is included in the LICENSE file that +# accompanied this code). +# +# You should have received a copy of the GNU General Public License version +# 2 along with this work; if not, write to the Free Software Foundation, +# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA +# or visit www.oracle.com if you need additional information or have any +# questions. +# + +include $(SPEC) +include NativeCompilation.gmk + +$(eval $(call IncludeCustomExtension, hotspot, lib/Lib-jdk.aot.gmk)) + +################################################################################ + +# Build libjelfshim only on Linux and Solaris and when libelf is available +ifeq ($(NEEDS_LIB_JELFSHIM), true) + JELFSHIM_NAME := jelfshim + + $(eval $(call SetupNativeCompilation, BUILD_LIBJELFSHIM, \ + TOOLCHAIN := TOOLCHAIN_DEFAULT, \ + OPTIMIZATION := LOW, \ + LIBRARY := $(JELFSHIM_NAME), \ + OUTPUT_DIR := $(call FindLibDirForModule, $(MODULE)), \ + SRC := $(HOTSPOT_TOPDIR)/src/jdk.aot/unix/native/libjelfshim, \ + CFLAGS := $(CFLAGS_JDKLIB) $(ELF_CFLAGS) \ + -I$(SUPPORT_OUTPUTDIR)/headers/$(MODULE), \ + LDFLAGS := $(LDFLAGS_JDKLIB), \ + OBJECT_DIR := $(SUPPORT_OUTPUTDIR)/native/$(MODULE)/lib$(JELFSHIM_NAME), \ + LIBS := $(ELF_LIBS) $(LIBS_JDKLIB), \ + )) + + TARGETS += $(BUILD_LIBJELFSHIM) +endif + +################################################################################ --- /dev/null 2016-10-31 17:47:17.000000000 -0700 +++ new/src/cpu/x86/vm/compiledIC_aot_x86_64.cpp 2016-10-31 17:47:17.000000000 -0700 @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "aot/compiledIC_aot.hpp" +#include "memory/resourceArea.hpp" +#include "code/codeCache.hpp" + +void CompiledDirectStaticCall::set_to_far(methodHandle callee, address entry) { + address stub = find_stub(true /* is_far */); + guarantee(stub != NULL, "stub not found"); + + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_far %s", + p2i(instruction_address()), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + // mov rax,imm_aot_addr + // jmp rax + NativeMovConstReg* destination_holder = nativeMovConstReg_at(stub); + +#ifdef ASSERT + // read the value once + intptr_t data = destination_holder->data(); + assert(data == 0 || data == (intptr_t)entry, + "MT-unsafe modification of inline cache"); +#endif + + // Update stub. + destination_holder->set_data((intptr_t)entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +void CompiledPltStaticCall::set_to_interpreted(methodHandle callee, address entry) { + address stub = find_stub(); + guarantee(stub != NULL, "stub not found"); + if (TraceICs) { + ResourceMark rm; + tty->print_cr("CompiledPltStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s", + p2i(instruction_address()), + callee->name_and_sig_as_C_string()); + } + + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + + intptr_t data = method_loader->data(); + address destination = jump->destination(); + assert(data == 0 || data == (intptr_t)callee(), + "a) MT-unsafe modification of inline cache"); + assert(destination == (address)-1 || destination == entry, + "b) MT-unsafe modification of inline cache"); + + // Update stub. + method_loader->set_data((intptr_t)callee()); + jump->set_jump_destination(entry); + + // Update jump to call. + set_destination_mt_safe(stub); +} + +#ifdef NEVER_CALLED +void CompiledPltStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) { + assert (CompiledIC_lock->is_locked() || SafepointSynchronize::is_at_safepoint(), "mt unsafe call"); + // Reset stub. + address stub = static_stub->addr(); + assert(stub != NULL, "stub not found"); + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + method_loader->set_data(0); + jump->set_jump_destination((address)-1); +} +#endif + +#ifndef PRODUCT +void CompiledPltStaticCall::verify() { + // Verify call. + _call->verify(); + +#ifdef ASSERT + CodeBlob *cb = CodeCache::find_blob_unsafe((address) _call); + assert(cb && cb->is_aot(), "CompiledPltStaticCall can only be used on AOTCompiledMethod"); +#endif + + // Verify stub. + address stub = find_stub(); + assert(stub != NULL, "no stub found for static call"); + // Creation also verifies the object. + NativeLoadGot* method_loader = nativeLoadGot_at(stub); + NativeGotJump* jump = nativeGotJump_at(method_loader->next_instruction_address()); + // Verify state. + assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted(), "sanity check"); +} +#endif // !PRODUCT --- /dev/null 2016-10-31 17:47:18.000000000 -0700 +++ new/src/share/vm/aot/aotCodeHeap.cpp 2016-10-31 17:47:18.000000000 -0700 @@ -0,0 +1,851 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/g1/heapRegion.hpp" +#include "gc/shared/gcLocker.hpp" +#include "jvmci/compilerRuntime.hpp" +#include "jvmci/jvmciRuntime.hpp" +#include "oops/method.hpp" +#include "aot/aotLoader.hpp" +#include "aot/aotCodeHeap.hpp" + +#include "runtime/os.hpp" +#include "runtime/sharedRuntime.hpp" +#include "runtime/vm_operations.hpp" + +bool AOTCodeHeap::_narrow_oop_shift_initialized = false; +int AOTCodeHeap::_narrow_oop_shift = 0; +int AOTCodeHeap::_narrow_klass_shift = 0; + +address AOTCodeHeap::load_symbol(const char *name) { + address symbol = (address) dlsym(_dl_handle, name); + if (symbol == NULL) { + tty->print_cr("Shared file %s error: missing %s", _name, name); + vm_exit(1); + } + return symbol; +} + +Klass* AOTCodeHeap::get_klass_from_got(const char* klass_name, int klass_len, const Method* method) { + AOTKlassData* klass_data = (AOTKlassData*)load_symbol(klass_name); + Klass* k = (Klass*)_metaspace_got[klass_data->_got_index]; + if (k == NULL) { + Thread* thread = Thread::current(); + k = lookup_klass(klass_name, klass_len, method, thread); + // Note, exceptions are cleared. + if (k == NULL) { + fatal("Shared file %s error: klass %s should be resolved already", _name, klass_name); + vm_exit(1); + } + _metaspace_got[klass_data->_got_index] = k; + } + return k; +} + +Klass* AOTCodeHeap::lookup_klass(const char* name, int len, const Method* method, Thread* thread) { + ResourceMark rm(thread); + assert(method != NULL, "incorrect call parameter"); + methodHandle caller(thread, (Method*)method); + + // Use class loader of aot method. + Handle loader(thread, caller->method_holder()->class_loader()); + Handle protection_domain(thread, caller->method_holder()->protection_domain()); + + // Ignore wrapping L and ; + if (name[0] == 'L') { + assert(len > 2, "small name %s", name); + name++; + len -= 2; + } + TempNewSymbol sym = SymbolTable::probe(name, len); + if (sym == NULL) { + log_debug(aotclassresolve)("Probe failed for AOT class %s", name); + return NULL; + } + Klass* k = SystemDictionary::find_instance_or_array_klass(sym, loader, protection_domain, thread); + assert(!thread->has_pending_exception(), "should not throw"); + + if (k != NULL) { + log_info(aotclassresolve)("%s %s (lookup)", caller->method_holder()->external_name(), k->external_name()); + } + return k; +} + +void AOTCodeHeap::handle_config_error(const char* format, ...) { + if (PrintAOT) { + va_list ap; + va_start(ap, format); + tty->vprint_cr(format, ap); + va_end(ap); + } + if (UseAOTStrictLoading) { + vm_exit(1); + } + _valid = false; +} + +void AOTCodeHeap::verify_flag(bool aot_flag, bool flag, const char* name) { + if (_valid && aot_flag != flag) { + handle_config_error("Shared file %s error: %s has different value '%s' from current '%s'", _name, name , (aot_flag ? "true" : "false"), (flag ? "true" : "false")); + } +} + +void AOTCodeHeap::verify_flag(int aot_flag, int flag, const char* name) { + if (_valid && aot_flag != flag) { + handle_config_error("Shared file %s error: %s has different value '%d' from current '%d'", _name, name , aot_flag, flag); + } +} + +void AOTCodeHeap::verify_config(const AOTHeader* header) { + if (header->_version != AOTHeader::AOT_SHARED_VERSION) { + handle_config_error("Invalid version of the shared file %s. Expected %d but was %d", _name, header->_version, AOTHeader::AOT_SHARED_VERSION); + return; + } + // Debug VM has different layout of runtime and metadata structures +#ifdef ASSERT + verify_flag(_config->_debug_VM, true, "Debug VM version"); +#else + verify_flag(!(_config->_debug_VM), true, "Product VM version"); +#endif + // Check configuration size + verify_flag(_config->_config_size, AOTConfiguration::CONFIG_SIZE, "AOT configuration size"); + + // Check flags + verify_flag(_config->_useCompressedOops, UseCompressedOops, "UseCompressedOops"); + verify_flag(_config->_useCompressedClassPointers, UseCompressedClassPointers, "UseCompressedClassPointers"); + verify_flag(_config->_useG1GC, UseG1GC, "UseG1GC"); + verify_flag(_config->_useCMSGC, UseConcMarkSweepGC, "UseConcMarkSweepGC"); + verify_flag(_config->_useTLAB, UseTLAB, "UseTLAB"); + verify_flag(_config->_useBiasedLocking, UseBiasedLocking, "UseBiasedLocking"); + verify_flag(_config->_objectAlignment, ObjectAlignmentInBytes, "ObjectAlignmentInBytes"); + verify_flag(_config->_contendedPaddingWidth, ContendedPaddingWidth, "ContendedPaddingWidth"); + verify_flag(_config->_fieldsAllocationStyle, FieldsAllocationStyle, "FieldsAllocationStyle"); + verify_flag(_config->_compactFields, CompactFields, "CompactFields"); + verify_flag(_config->_enableContended, EnableContended, "EnableContended"); + verify_flag(_config->_restrictContended, RestrictContended, "RestrictContended"); + + if (!TieredCompilation && _config->_tieredAOT) { + handle_config_error("Shared file %s error: Expected to run with tiered compilation on", _name); + } + + // Shifts are static values which initialized by 0 until java heap initialization. + // AOT libs are loaded before heap initialized so shift values are not set. + // It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded. + // Set shifts value based on first AOT library config. + if (UseCompressedOops && _valid) { + if (!_narrow_oop_shift_initialized) { + _narrow_oop_shift = _config->_narrowOopShift; + if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set + _narrow_klass_shift = _config->_narrowKlassShift; + } + _narrow_oop_shift_initialized = true; + } else { + verify_flag(_config->_narrowOopShift, _narrow_oop_shift, "aot_config->_narrowOopShift"); + if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set + verify_flag(_config->_narrowKlassShift, _narrow_klass_shift, "aot_config->_narrowKlassShift"); + } + } + } +} + +AOTCodeHeap::~AOTCodeHeap() { + free((void*) _name); + if (_classes != NULL) { + FREE_C_HEAP_ARRAY(AOTClass, _classes); + } + if (_code_to_aot != NULL) { + FREE_C_HEAP_ARRAY(CodeToAMethod, _code_to_aot); + } +} + +AOTCodeHeap::AOTCodeHeap(void* handle, const char* name, int id) : + CodeHeap("CodeHeap 'AOT'", CodeBlobType::AOT), _valid(true), _dl_handle(handle), _dso_id(id), _classes(NULL), _code_to_aot(NULL) { + _name = (const char*) strdup(name); + _lib_symbols_initialized = false; + _aot_id = 0; + + // Verify that VM runs with the same parameters as AOT tool. + _config = (AOTConfiguration*) load_symbol("JVM.config"); + const AOTHeader* header = (AOTHeader*) load_symbol("JVM.header"); + + verify_config(header); + + if (!_valid) { + if (PrintAOT) { + tty->print("%7d ", (int) tty->time_stamp().milliseconds()); + tty->print_cr("%4d skipped %s aot library", _dso_id, _name); + } + return; + } + + _class_count = header->_class_count; + _method_count = header->_method_count; + + // Collect metaspace info: names -> address in .got section + _metaspace_names = (const char*) load_symbol("JVM.metaspace.names"); + _method_metadata = (address) load_symbol("JVM.method.metadata"); + _methods_offsets = (address) load_symbol("JVM.methods.offsets"); + _klasses_offsets = (address) load_symbol("JVM.klasses.offsets"); + _dependencies = (address) load_symbol("JVM.klasses.dependencies"); + _code_space = (address) load_symbol("JVM.text"); + + // First cell is number of elements. + jlong* got_sect; + _metaspace_got = (Metadata**) load_symbol("JVM.metaspace.got"); + _metaspace_got_size = header->_metaspace_got_size; + + _metadata_got = (Metadata**) load_symbol("JVM.metadata.got"); + _metadata_got_size = header->_metadata_got_size; + + _oop_got = (oop*) load_symbol("JVM.oop.got"); + _oop_got_size = header->_oop_got_size; + + // Collect stubs info + _stubs_offsets = (int*) load_symbol("JVM.stubs.offsets"); + + // code segments table + _code_segments = (address) load_symbol("JVM.code.segments"); + + // method state + _method_state = (jlong*) load_symbol("JVM.method.state"); + + // Create a table for mapping classes + _classes = NEW_C_HEAP_ARRAY(AOTClass, _class_count, mtCode); + memset(_classes, 0, _class_count * sizeof(AOTClass)); + + // Create table for searching AOTCompiledMethod based on pc. + _code_to_aot = NEW_C_HEAP_ARRAY(CodeToAMethod, _method_count, mtCode); + memset(_code_to_aot, 0, _method_count * sizeof(CodeToAMethod)); + + _low_boundary = _code_space; + _memory.set_low_boundary((char *)_code_space); + _memory.set_high_boundary((char *)_code_space); + _memory.set_low((char *)_code_space); + _memory.set_high((char *)_code_space); + + _segmap.set_low_boundary((char *)_code_segments); + _segmap.set_low((char *)_code_segments); + + _log2_segment_size = exact_log2(_config->_codeSegmentSize); + + // Register aot stubs + register_stubs(); + + if (PrintAOT || (PrintCompilation && PrintAOT)) { + tty->print("%7d ", (int) tty->time_stamp().milliseconds()); + tty->print_cr("%4d loaded %s aot library", _dso_id, _name); + } +} + +void AOTCodeHeap::publish_aot(instanceKlassHandle kh, methodHandle mh, AOTMethodData* method_data, int code_id) { + // The method may be explicitly excluded by the user. + if (CompilerOracle::should_exclude(mh)) { + return; + } + + address code = method_data->_code; + const char* name = method_data->_name; + aot_metadata* meta = method_data->_meta; + + if (meta->scopes_pcs_begin() == meta->scopes_pcs_end()) { + // When the AOT compiler compiles something big we fail to generate metadata + // in CodeInstaller::gather_metadata. In that case the scopes_pcs_begin == scopes_pcs_end. + // In all successful cases we always have 2 entries of scope pcs. + return; + } + + jlong* state_adr = &_method_state[code_id]; + address metadata_table = method_data->_metadata_table; + int metadata_size = method_data->_metadata_size; + assert(code_id < _method_count, "sanity"); + _aot_id++; + +#ifdef ASSERT + if (_aot_id > CIStop || _aot_id < CIStart) { + // Skip compilation + return; + } +#endif + // Check one more time. + if (_code_to_aot[code_id]._state == invalid) { + return; + } + AOTCompiledMethod *aot = new AOTCompiledMethod(code, mh(), meta, metadata_table, metadata_size, state_adr, this, name, code_id, _aot_id); + assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized"); + _code_to_aot[code_id]._aot = aot; // Should set this first + if (Atomic::cmpxchg(in_use, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) { + _code_to_aot[code_id]._aot = NULL; // Clean + } else { // success + // Publish method +#ifdef TIERED + mh->set_aot_code(aot); +#endif + Method::set_code(mh, aot); + if (PrintAOT || (PrintCompilation && PrintAOT)) { + aot->print_on(tty, NULL); + } + // Publish oop only after we are visible to CompiledMethodIterator + aot->set_oop(mh()->method_holder()->klass_holder()); + } +} + +void AOTCodeHeap::link_primitive_array_klasses() { + ResourceMark rm; + for (int i = T_BOOLEAN; i <= T_CONFLICT; i++) { + BasicType t = (BasicType)i; + if (is_java_primitive(t)) { + const Klass* arr_klass = Universe::typeArrayKlassObj(t); + AOTKlassData* klass_data = (AOTKlassData*) dlsym(_dl_handle, arr_klass->signature_name()); + if (klass_data != NULL) { + // Set both GOT cells, resolved and initialized klass pointers. + // _got_index points to second cell - resolved klass pointer. + _metaspace_got[klass_data->_got_index-1] = (Metadata*)arr_klass; // Initialized + _metaspace_got[klass_data->_got_index ] = (Metadata*)arr_klass; // Resolved + if (PrintAOT) { + tty->print_cr("[Found %s in %s]", arr_klass->internal_name(), _name); + } + } + } + } +} + +void AOTCodeHeap::register_stubs() { + int stubs_count = _stubs_offsets[0]; // contains number + _stubs_offsets++; + AOTMethodOffsets* stub_offsets = (AOTMethodOffsets*)_stubs_offsets; + for (int i = 0; i < stubs_count; ++i) { + const char* stub_name = _metaspace_names + stub_offsets[i]._name_offset; + address entry = _code_space + stub_offsets[i]._code_offset; + aot_metadata* meta = (aot_metadata *) (_method_metadata + stub_offsets[i]._meta_offset); + address metadata_table = (address)_metadata_got + stub_offsets[i]._metadata_got_offset; + int metadata_size = stub_offsets[i]._metadata_got_size; + int code_id = stub_offsets[i]._code_id; + assert(code_id < _method_count, "sanity"); + jlong* state_adr = &_method_state[code_id]; + int len = build_u2_from((address)stub_name); + stub_name += 2; + char* full_name = NEW_C_HEAP_ARRAY(char, len+5, mtCode); + memcpy(full_name, "AOT ", 4); + memcpy(full_name+4, stub_name, len); + full_name[len+4] = 0; + guarantee(_code_to_aot[code_id]._state != invalid, "stub %s can't be invalidated", full_name); + AOTCompiledMethod* aot = new AOTCompiledMethod(entry, NULL, meta, metadata_table, metadata_size, state_adr, this, full_name, code_id, i); + assert(_code_to_aot[code_id]._aot == NULL, "should be not initialized"); + _code_to_aot[code_id]._aot = aot; + if (Atomic::cmpxchg(in_use, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) { + fatal("stab '%s' code state is %d", full_name, _code_to_aot[code_id]._state); + } + // Adjust code buffer boundaries only for stubs because they are last in the buffer. + adjust_boundaries(aot); + if (PrintAOT && Verbose) { + aot->print_on(tty, NULL); + } + } +} + +#define SET_AOT_GLOBAL_SYMBOL_VALUE(AOTSYMNAME, AOTSYMTYPE, VMSYMVAL) \ + { \ + char* error; \ + /* Clear any existing error */ \ + dlerror(); \ + AOTSYMTYPE * adr = (AOTSYMTYPE *) dlsym(_dl_handle, AOTSYMNAME); \ + /* Check for any dlsym lookup error */ \ + error = dlerror(); \ + guarantee(error == NULL, "%s", error); \ + *adr = (AOTSYMTYPE) VMSYMVAL; \ + } + +void AOTCodeHeap::link_graal_runtime_symbols() { + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_monitorenter", address, JVMCIRuntime::monitorenter); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_monitorexit", address, JVMCIRuntime::monitorexit); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_object", address, JVMCIRuntime::log_object); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_printf", address, JVMCIRuntime::log_printf); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_log_primitive", address, JVMCIRuntime::log_primitive); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_instance", address, JVMCIRuntime::new_instance); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_array", address, JVMCIRuntime::new_array); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_new_multi_array", address, JVMCIRuntime::new_multi_array); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_array", address, JVMCIRuntime::dynamic_new_array); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_validate_object", address, JVMCIRuntime::validate_object); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_pre", address, JVMCIRuntime::write_barrier_pre); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_identity_hash_code", address, JVMCIRuntime::identity_hash_code); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_write_barrier_post", address, JVMCIRuntime::write_barrier_post); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_dynamic_new_instance", address, JVMCIRuntime::dynamic_new_instance); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_thread_is_interrupted", address, JVMCIRuntime::thread_is_interrupted); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_exception_handler_for_pc", address, JVMCIRuntime::exception_handler_for_pc); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_test_deoptimize_call_int", address, JVMCIRuntime::test_deoptimize_call_int); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_and_post_jvmti_exception", address, JVMCIRuntime::throw_and_post_jvmti_exception); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_klass_external_name_exception", address, JVMCIRuntime::throw_klass_external_name_exception); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_throw_class_cast_exception", address, JVMCIRuntime::throw_class_cast_exception); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_vm_message", address, JVMCIRuntime::vm_message); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_jvmci_runtime_vm_error", address, JVMCIRuntime::vm_error); +} + +void AOTCodeHeap::link_shared_runtime_symbols() { + SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_static_entry", address, SharedRuntime::get_resolve_static_call_stub()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_virtual_entry", address, SharedRuntime::get_resolve_virtual_call_stub()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_resolve_opt_virtual_entry", address, SharedRuntime::get_resolve_opt_virtual_call_stub()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_unpack", address, SharedRuntime::deopt_blob()->unpack()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_deopt_blob_uncommon_trap", address, SharedRuntime::deopt_blob()->uncommon_trap()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_ic_miss_stub", address, SharedRuntime::get_ic_miss_stub()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_handle_wrong_method_stub", address, SharedRuntime::get_handle_wrong_method_stub()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_exception_handler_for_return_address", address, SharedRuntime::exception_handler_for_return_address); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_register_finalizer", address, SharedRuntime::register_finalizer); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_OSR_migration_end", address, SharedRuntime::OSR_migration_end); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_string_by_symbol", address, CompilerRuntime::resolve_string_by_symbol); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_klass_by_symbol", address, CompilerRuntime::resolve_klass_by_symbol); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_resolve_method_by_symbol_and_load_counters", address, CompilerRuntime::resolve_method_by_symbol_and_load_counters); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_initialize_klass_by_symbol", address, CompilerRuntime::initialize_klass_by_symbol); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_invocation_event", address, CompilerRuntime::invocation_event); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_backedge_event", address, CompilerRuntime::backedge_event); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dpow", address, SharedRuntime::dpow); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dexp", address, SharedRuntime::dexp); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dcos", address, SharedRuntime::dcos); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dsin", address, SharedRuntime::dsin); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dtan", address, SharedRuntime::dtan); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dlog", address, SharedRuntime::dlog); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_shared_runtime_dlog10", address, SharedRuntime::dlog10); +} + +void AOTCodeHeap::link_stub_routines_symbols() { + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jbyte_arraycopy", address, StubRoutines::_jbyte_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jshort_arraycopy", address, StubRoutines::_jshort_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jint_arraycopy", address, StubRoutines::_jint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jlong_arraycopy", address, StubRoutines::_jlong_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_arraycopy", address, StubRoutines::_oop_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_arraycopy_uninit", address, StubRoutines::_oop_arraycopy_uninit); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jbyte_disjoint_arraycopy", address, StubRoutines::_jbyte_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jshort_disjoint_arraycopy", address, StubRoutines::_jshort_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jint_disjoint_arraycopy", address, StubRoutines::_jint_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_jlong_disjoint_arraycopy", address, StubRoutines::_jlong_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_disjoint_arraycopy", address, StubRoutines::_oop_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_oop_disjoint_arraycopy_uninit", address, StubRoutines::_oop_disjoint_arraycopy_uninit); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jbyte_arraycopy", address, StubRoutines::_arrayof_jbyte_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jshort_arraycopy", address, StubRoutines::_arrayof_jshort_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jint_arraycopy", address, StubRoutines::_arrayof_jint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jlong_arraycopy", address, StubRoutines::_arrayof_jlong_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_arraycopy", address, StubRoutines::_arrayof_oop_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_arraycopy_uninit", address, StubRoutines::_arrayof_oop_arraycopy_uninit); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jbyte_disjoint_arraycopy", address, StubRoutines::_arrayof_jbyte_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jshort_disjoint_arraycopy", address, StubRoutines::_arrayof_jshort_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jint_disjoint_arraycopy", address, StubRoutines::_arrayof_jint_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_jlong_disjoint_arraycopy", address, StubRoutines::_arrayof_jlong_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy", address, StubRoutines::_arrayof_oop_disjoint_arraycopy); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_arrayof_oop_disjoint_arraycopy_uninit", address, StubRoutines::_arrayof_oop_disjoint_arraycopy_uninit); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_checkcast_arraycopy", address, StubRoutines::_checkcast_arraycopy); + + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_encryptBlock", address, StubRoutines::_aescrypt_encryptBlock); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_aescrypt_decryptBlock", address, StubRoutines::_aescrypt_decryptBlock); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_encryptAESCrypt", address, StubRoutines::_cipherBlockChaining_encryptAESCrypt); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_cipherBlockChaining_decryptAESCrypt", address, StubRoutines::_cipherBlockChaining_decryptAESCrypt); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_update_bytes_crc32", address, StubRoutines::_updateBytesCRC32); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_stub_routines_crc_table_adr", address, StubRoutines::_crc_table_adr); +} + +void AOTCodeHeap::link_os_symbols() { + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_os_javaTimeMillis", address, os::javaTimeMillis); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_os_javaTimeNanos", address, os::javaTimeNanos); +} + +/* + * Link any global symbols in precompiled DSO with dlopen() _dl_handle + * dso_handle. + */ + +void AOTCodeHeap::link_global_lib_symbols() { + if (!_lib_symbols_initialized) { + _lib_symbols_initialized = true; + + CollectedHeap* heap = Universe::heap(); + CardTableModRefBS* ct = (CardTableModRefBS*)(heap->barrier_set()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_card_table_address", address, ct->byte_map_base); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_top_address", address, (heap->supports_inline_contig_alloc() ? heap->top_addr() : NULL)); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_heap_end_address", address, (heap->supports_inline_contig_alloc() ? heap->end_addr() : NULL)); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_polling_page", address, os::get_polling_page()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_narrow_klass_base_address", address, Universe::narrow_klass_base()); + SET_AOT_GLOBAL_SYMBOL_VALUE("_aot_log_of_heap_region_grain_bytes", int, HeapRegion::LogOfHRGrainBytes); + + link_shared_runtime_symbols(); + link_stub_routines_symbols(); + link_os_symbols(); + link_graal_runtime_symbols(); + + // Link primitive array klasses. + link_primitive_array_klasses(); + } +} + +#ifndef PRODUCT +int AOTCodeHeap::klasses_seen = 0; +int AOTCodeHeap::aot_klasses_found = 0; +int AOTCodeHeap::aot_klasses_fp_miss = 0; +int AOTCodeHeap::aot_klasses_cl_miss = 0; +int AOTCodeHeap::aot_methods_found = 0; + +void AOTCodeHeap::print_statistics() { + tty->print_cr("Classes seen: %d AOT classes found: %d AOT methods found: %d", klasses_seen, aot_klasses_found, aot_methods_found); + tty->print_cr("AOT fingerprint mismatches: %d AOT class loader mismatches: %d", aot_klasses_fp_miss, aot_klasses_cl_miss); +} +#endif + +Method* AOTCodeHeap::find_method(KlassHandle klass, Thread* thread, const char* method_name) { + int method_name_len = build_u2_from((address)method_name); + method_name += 2; + const char* signature_name = method_name + method_name_len; + int signature_name_len = build_u2_from((address)signature_name); + signature_name += 2; + // The class should have been loaded so the method and signature should already be + // in the symbol table. If they're not there, the method doesn't exist. + TempNewSymbol name = SymbolTable::probe(method_name, method_name_len); + TempNewSymbol signature = SymbolTable::probe(signature_name, signature_name_len); + + Method* m; + if (name == NULL || signature == NULL) { + m = NULL; + } else if (name == vmSymbols::object_initializer_name() || + name == vmSymbols::class_initializer_name()) { + // Never search superclasses for constructors + if (klass->is_instance_klass()) { + m = InstanceKlass::cast(klass())->find_method(name, signature); + } else { + m = NULL; + } + } else { + m = klass->lookup_method(name, signature); + if (m == NULL && klass->is_instance_klass()) { + m = InstanceKlass::cast(klass())->lookup_method_in_ordered_interfaces(name, signature); + } + } + if (m == NULL) { + // Fatal error because we assume classes and methods should not be changed since aot compilation. + const char* klass_name = klass->external_name(); + int klass_len = (int)strlen(klass_name); + char* meta_name = NEW_RESOURCE_ARRAY(char, klass_len + 1 + method_name_len + signature_name_len + 1); + memcpy(meta_name, klass_name, klass_len); + meta_name[klass_len] = '.'; + memcpy(&meta_name[klass_len + 1], method_name, method_name_len); + memcpy(&meta_name[klass_len + 1 + method_name_len], signature_name, signature_name_len); + meta_name[klass_len + 1 + method_name_len + signature_name_len] = '\0'; + Handle exception = Exceptions::new_exception(thread, vmSymbols::java_lang_NoSuchMethodError(), meta_name); + java_lang_Throwable::print(exception, tty); + tty->cr(); + java_lang_Throwable::print_stack_trace(exception(), tty); + tty->cr(); + fatal("Failed to find method '%s'", meta_name); + } + NOT_PRODUCT( aot_methods_found++; ) + return m; +} + +AOTKlassData* AOTCodeHeap::find_klass(InstanceKlass* ik) { + ResourceMark rm; + AOTKlassData* klass_data = (AOTKlassData*) dlsym(_dl_handle, ik->signature_name()); + return klass_data; +} + +bool AOTCodeHeap::is_dependent_method(Klass* dependee, AOTCompiledMethod* aot) { + InstanceKlass *dependee_ik = InstanceKlass::cast(dependee); + AOTKlassData* klass_data = find_klass(dependee_ik); + if (klass_data == NULL) { + return false; // no AOT records for this class - no dependencies + } + if (!dependee_ik->has_passed_fingerprint_check()) { + return false; // different class + } + + int methods_offset = klass_data->_dependent_methods_offset; + if (methods_offset >= 0) { + address methods_cnt_adr = _dependencies + methods_offset; + int methods_cnt = *(int*)methods_cnt_adr; + int* indexes = (int*)(methods_cnt_adr + 4); + for (int i = 0; i < methods_cnt; ++i) { + int code_id = indexes[i]; + if (_code_to_aot[code_id]._aot == aot) { + return true; // found dependent method + } + } + } + return false; +} + +void AOTCodeHeap::sweep_dependent_methods(AOTKlassData* klass_data) { + // Make dependent methods non_entrant forever. + int methods_offset = klass_data->_dependent_methods_offset; + if (methods_offset >= 0) { + int marked = 0; + address methods_cnt_adr = _dependencies + methods_offset; + int methods_cnt = *(int*)methods_cnt_adr; + int* indexes = (int*)(methods_cnt_adr + 4); + for (int i = 0; i < methods_cnt; ++i) { + int code_id = indexes[i]; + // Invalidate aot code. + if (Atomic::cmpxchg(invalid, (jint*)&_code_to_aot[code_id]._state, not_set) != not_set) { + if (_code_to_aot[code_id]._state == in_use) { + AOTCompiledMethod* aot = _code_to_aot[code_id]._aot; + assert(aot != NULL, "aot should be set"); + if (!aot->is_runtime_stub()) { // Something is wrong - should not invalidate stubs. + aot->mark_for_deoptimization(false); + marked++; + } + } + } + } + if (marked > 0) { + VM_Deoptimize op; + VMThread::execute(&op); + } + } +} + +bool AOTCodeHeap::load_klass_data(instanceKlassHandle kh, Thread* thread) { + ResourceMark rm; + + NOT_PRODUCT( klasses_seen++; ) + + AOTKlassData* klass_data = find_klass(kh()); + if (klass_data == NULL) { + return false; + } + + if (!kh->has_passed_fingerprint_check()) { + NOT_PRODUCT( aot_klasses_fp_miss++; ) + log_trace(aotclassfingerprint)("class %s%s has bad fingerprint in %s tid=" INTPTR_FORMAT, + kh->internal_name(), kh->is_shared() ? " (shared)" : "", + _name, p2i(thread)); + sweep_dependent_methods(klass_data); + return false; + } + + assert(klass_data->_class_id < _class_count, "invalid class id"); + AOTClass* aot_class = &_classes[klass_data->_class_id]; + if (aot_class->_classloader != NULL && aot_class->_classloader != kh->class_loader_data()) { + log_trace(aotclassload)("class %s in %s already loaded for classloader %p vs %p tid=" INTPTR_FORMAT, + kh->internal_name(), _name, aot_class->_classloader, kh->class_loader_data(), p2i(thread)); + NOT_PRODUCT( aot_klasses_cl_miss++; ) + return false; + } + + NOT_PRODUCT( aot_klasses_found++; ) + + log_trace(aotclassload)("found %s in %s for classloader %p tid=" INTPTR_FORMAT, kh->internal_name(), _name, kh->class_loader_data(), p2i(thread)); + + aot_class->_classloader = kh->class_loader_data(); + // Set klass's Resolve (second) got cell. + _metaspace_got[klass_data->_got_index] = kh(); + + // Initialize global symbols of the DSO to the correspondingVM symbol values. + link_global_lib_symbols(); + + int methods_offset = klass_data->_compiled_methods_offset; + if (methods_offset >= 0) { + address methods_cnt_adr = _methods_offsets + methods_offset; + int methods_cnt = *(int*)methods_cnt_adr; + // Collect data about compiled methods + AOTMethodData* methods_data = NEW_RESOURCE_ARRAY(AOTMethodData, methods_cnt); + AOTMethodOffsets* methods_offsets = (AOTMethodOffsets*)(methods_cnt_adr + 4); + for (int i = 0; i < methods_cnt; ++i) { + AOTMethodOffsets* method_offsets = &methods_offsets[i]; + int code_id = method_offsets->_code_id; + if (_code_to_aot[code_id]._state == invalid) { + continue; // skip AOT methods slots which have been invalidated + } + AOTMethodData* method_data = &methods_data[i]; + const char* aot_name = _metaspace_names + method_offsets->_name_offset; + method_data->_name = aot_name; + method_data->_code = _code_space + method_offsets->_code_offset; + method_data->_meta = (aot_metadata*)(_method_metadata + method_offsets->_meta_offset); + method_data->_metadata_table = (address)_metadata_got + method_offsets->_metadata_got_offset; + method_data->_metadata_size = method_offsets->_metadata_got_size; + // aot_name format: "Ljava/lang/ThreadGroup;addUnstarted()V" + int klass_len = build_u2_from((address)aot_name); + const char* method_name = aot_name + 2 + klass_len; + Method* m = AOTCodeHeap::find_method(kh, thread, method_name); + methodHandle mh(thread, m); + if (mh->code() != NULL) { // Does it have already compiled code? + continue; // Don't overwrite + } + publish_aot(kh, mh, method_data, code_id); + } + } + return true; +} + +AOTCompiledMethod* AOTCodeHeap::next_in_use_at(int start) const { + for (int index = start; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + return aot; + } + return NULL; +} + +void* AOTCodeHeap::first() const { + return next_in_use_at(0); +} + +void* AOTCodeHeap::next(void* p) const { + AOTCompiledMethod *aot = (AOTCompiledMethod *)p; + int next_index = aot->method_index() + 1; + assert(next_index <= _method_count, ""); + if (next_index == _method_count) { + return NULL; + } + return next_in_use_at(next_index); +} + +void* AOTCodeHeap::find_start(void* p) const { + if (!contains(p)) { + return NULL; + } + size_t offset = pointer_delta(p, low_boundary(), 1); + // Use segments table + size_t seg_idx = offset / _config->_codeSegmentSize; + if ((int)(_code_segments[seg_idx]) == 0xff) { + return NULL; + } + while (_code_segments[seg_idx] > 0) { + seg_idx -= (int)_code_segments[seg_idx]; + } + int code_offset = (int)seg_idx * _config->_codeSegmentSize; + int aot_index = *(int*)(_code_space + code_offset); + AOTCompiledMethod* aot = _code_to_aot[aot_index]._aot; + assert(aot != NULL, "should find registered aot method"); + return aot; +} + +AOTCompiledMethod* AOTCodeHeap::find_aot(address p) const { + assert(contains(p), "should be here"); + return (AOTCompiledMethod *)find_start(p); +} + +CodeBlob* AOTCodeHeap::find_blob_unsafe(void* start) const { + return (CodeBlob*)AOTCodeHeap::find_start(start); +} + +void AOTCodeHeap::oops_do(OopClosure* f) { + for (int i = 0; i < _oop_got_size; i++) { + oop* p = &_oop_got[i]; + if (*p == NULL) continue; // skip non-oops + f->do_oop(p); + } + for (int index = 0; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + aot->do_oops(f); + } +} + +// Yes, this is faster than going through the relocations, +// but there are two problems: +// 1) GOT slots are sometimes patched with non-Metadata values +// 2) We don't want to scan metadata for dead methods +// Unfortunately we don't know if the metadata belongs to +// live aot methods or not, so process them all. If this +// is for mark_on_stack, some old methods may stick around +// forever instead of getting cleaned up. +void AOTCodeHeap::got_metadata_do(void f(Metadata*)) { + for (int i = 1; i < _metaspace_got_size; i++) { + Metadata** p = &_metaspace_got[i]; + Metadata* md = *p; + if (md == NULL) continue; // skip non-oops + intptr_t meta = (intptr_t)md; + if (meta == -1) continue; // skip non-oops + if (Metaspace::contains(md)) { + f(md); + } + } + for (int i = 1; i < _metadata_got_size; i++) { + Metadata** p = &_metadata_got[i]; + Metadata* md = *p; + intptr_t meta = (intptr_t)md; + if ((meta & 1) == 1) { + // already resolved + md = (Metadata*)(meta & ~1); + } else { + continue; + } + if (md == NULL) continue; // skip non-oops + if (Metaspace::contains(md)) { + f(md); + } + } +} + +void AOTCodeHeap::cleanup_inline_caches() { + for (int index = 0; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + aot->cleanup_inline_caches(); + } +} + +#ifdef ASSERT +int AOTCodeHeap::verify_icholder_relocations() { + int count = 0; + for (int index = 0; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + count += aot->verify_icholder_relocations(); + } + return count; +} +#endif + +void AOTCodeHeap::flush_evol_dependents_on(instanceKlassHandle dependee) { + for (int index = 0; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + aot->flush_evol_dependents_on(dependee); + } +} + +void AOTCodeHeap::metadata_do(void f(Metadata*)) { + for (int index = 0; index < _method_count; index++) { + if (_code_to_aot[index]._state != in_use) { + continue; // Skip uninitialized entries. + } + AOTCompiledMethod* aot = _code_to_aot[index]._aot; + if (aot->_is_alive()) { + aot->metadata_do(f); + } + } +#if 0 + // With the marking above, this call doesn't seem to be needed + got_metadata_do(f); +#endif +} --- /dev/null 2016-10-31 17:47:19.000000000 -0700 +++ new/src/share/vm/aot/aotCodeHeap.hpp 2016-10-31 17:47:18.000000000 -0700 @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_AOT_AOTCODEHEAP_HPP +#define SHARE_VM_AOT_AOTCODEHEAP_HPP + +#include "aot/aotCompiledMethod.hpp" +#include "classfile/symbolTable.hpp" +#include "oops/metadata.hpp" +#include "oops/method.hpp" + +enum CodeState { + not_set = 0, // _aot fields is not set yet + in_use = 1, // _aot field is set to corresponding AOTCompiledMethod + invalid = 2 // AOT code is invalidated because dependencies failed +}; + +typedef struct { + AOTCompiledMethod* _aot; + CodeState _state; // State change cases: not_set->in_use, not_set->invalid +} CodeToAMethod; + +class ClassLoaderData; + +class AOTClass { +public: + ClassLoaderData* _classloader; +}; + +typedef struct { + int _name_offset; + int _code_offset; + int _meta_offset; + int _metadata_got_offset; + int _metadata_got_size; + int _code_id; +} AOTMethodOffsets; + +typedef struct { + const char* _name; + address _code; + aot_metadata* _meta; + jlong* _state_adr; + address _metadata_table; + int _metadata_size; +} AOTMethodData; + +typedef struct { + int _got_index; + int _class_id; + int _compiled_methods_offset; + int _dependent_methods_offset; + uint64_t _fingerprint; +} AOTKlassData; + +typedef struct { + int _version; + int _class_count; + int _method_count; + int _metaspace_got_size; + int _metadata_got_size; + int _oop_got_size; + + enum { + AOT_SHARED_VERSION = 1 + }; +} AOTHeader; + +typedef struct { + enum { CONFIG_SIZE = 11 + 7 * 4 }; + int _config_size; + int _narrowOopShift; + int _narrowKlassShift; + int _contendedPaddingWidth; + int _fieldsAllocationStyle; + int _objectAlignment; + int _codeSegmentSize; + // byte[11] array map to boolean values here + bool _debug_VM; + bool _useCompressedOops; + bool _useCompressedClassPointers; + bool _compactFields; + bool _useG1GC; + bool _useCMSGC; + bool _useTLAB; + bool _useBiasedLocking; + bool _tieredAOT; + bool _enableContended; + bool _restrictContended; +} AOTConfiguration; + +class AOTCodeHeap : public CodeHeap { +private: + bool _valid; + void* _dl_handle; + const char* _name; + + const int _dso_id; + int _aot_id; + + int _class_count; + int _method_count; + AOTClass* _classes; + CodeToAMethod* _code_to_aot; + + address _code_space; + address _code_segments; + jlong* _method_state; + + // VM configuration during AOT compilation + AOTConfiguration* _config; + + // Collect metaspace info: names -> address in .got section + const char* _metaspace_names; + address _method_metadata; + + address _methods_offsets; + address _klasses_offsets; + address _dependencies; + + Metadata** _metaspace_got; + Metadata** _metadata_got; + oop* _oop_got; + + int _metaspace_got_size; + int _metadata_got_size; + int _oop_got_size; + + // Collect stubs info + int* _stubs_offsets; + + address _low_boundary; + + bool _lib_symbols_initialized; + + static bool _narrow_oop_shift_initialized; + static int _narrow_oop_shift; + static int _narrow_klass_shift; + + void adjust_boundaries(AOTCompiledMethod* method) { + address low = _low_boundary; + if (method->code_begin() < low) { + low = method->code_begin(); + } + address high = high_boundary(); + if (method->code_end() > high) { + high = method->code_end(); + } + assert(_method_count > 0, "methods count should be set already"); + + _low_boundary = low; + _memory.set_high_boundary((char *)high); + _memory.set_high((char *)high); + } + + void handle_config_error(const char* format, ...) ATTRIBUTE_PRINTF(2, 3); + void verify_flag(bool aot_flag, bool flag, const char* name); + void verify_config(const AOTHeader* header); + void register_stubs(); + + void link_shared_runtime_symbols(); + void link_stub_routines_symbols(); + void link_os_symbols(); + void link_graal_runtime_symbols(); + + void link_global_lib_symbols(); + void link_primitive_array_klasses(); + void publish_aot(instanceKlassHandle kh, methodHandle mh, AOTMethodData* method_data, int code_id); + + address load_symbol(const char *name); + + AOTCompiledMethod* next_in_use_at(int index) const; + + // Find klass in SystemDictionary for aot metadata. + static Klass* lookup_klass(const char* name, int len, const Method* method, Thread* THREAD); +public: + AOTCodeHeap(void* handle, const char* name, int id); + virtual ~AOTCodeHeap(); + + address low_boundary() const { return _low_boundary; } + address high_boundary() const { return (address)CodeHeap::high(); } + + bool contains(const void* p) const { + bool result = (low_boundary() <= p) && (p < high_boundary()); + assert(!result || (_method_count > 0), ""); + assert(result == CodeHeap::contains(p), ""); + return result; + } + AOTCompiledMethod* find_aot(address p) const; + + virtual void* find_start(void* p) const; + virtual CodeBlob* find_blob_unsafe(void* start) const; + virtual void* first() const; + virtual void* next(void *p) const; + + AOTKlassData* find_klass(InstanceKlass* ik); + bool load_klass_data(instanceKlassHandle kh, Thread* thread); + Klass* get_klass_from_got(const char* klass_name, int klass_len, const Method* method); + void sweep_dependent_methods(AOTKlassData* klass_data); + bool is_dependent_method(Klass* dependee, AOTCompiledMethod* aot); + + const char* get_name_at(int offset) { + return _metaspace_names + offset; + } + + void oops_do(OopClosure* f); + void metadata_do(void f(Metadata*)); + void got_metadata_do(void f(Metadata*)); + +#ifdef ASSERT + bool got_contains(Metadata **p) { + return (p >= &_metadata_got[0] && p < &_metadata_got[_metadata_got_size]) || + (p >= &_metaspace_got[0] && p < &_metaspace_got[_metaspace_got_size]); + } +#endif + + int dso_id() const { return _dso_id; } + int aot_id() const { return _aot_id; } + bool is_valid() const { return _valid; } + + int method_count() { return _method_count; } + + AOTCompiledMethod* get_code_desc_at_index(int index) { + if (index < _method_count && _code_to_aot[index]._state == in_use) { + AOTCompiledMethod* m = _code_to_aot[index]._aot; + assert(m != NULL, "AOT method should be set"); + if (!m->is_runtime_stub()) { + return m; + } + } + return NULL; + } + + static int narrow_oop_shift() { return _narrow_oop_shift; } + static int narrow_klass_shift() { return _narrow_klass_shift; } + static bool narrow_oop_shift_initialized() { return _narrow_oop_shift_initialized; } + static Method* find_method(KlassHandle klass, Thread* thread, const char* method_name); + + void verify_flag(int aot_flag, int flag, const char* name); + + void cleanup_inline_caches(); + + DEBUG_ONLY( int verify_icholder_relocations(); ) + + void flush_evol_dependents_on(instanceKlassHandle dependee); + + void alive_methods_do(void f(CompiledMethod* nm)); + +#ifndef PRODUCT + static int klasses_seen; + static int aot_klasses_found; + static int aot_klasses_fp_miss; + static int aot_klasses_cl_miss; + static int aot_methods_found; + + static void print_statistics(); +#endif +}; + +#endif // SHARE_VM_AOT_AOTCODEHEAP_HPP --- /dev/null 2016-10-31 17:47:19.000000000 -0700 +++ new/src/share/vm/aot/aotCompiledMethod.cpp 2016-10-31 17:47:19.000000000 -0700 @@ -0,0 +1,454 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" + +#include "code/codeCache.hpp" +#include "code/compiledIC.hpp" +#include "code/nativeInst.hpp" +#include "compiler/compilerOracle.hpp" +#include "gc/shared/cardTableModRefBS.hpp" +#include "gc/shared/collectedHeap.hpp" +#include "gc/shared/gcLocker.hpp" +#include "jvmci/compilerRuntime.hpp" +#include "jvmci/jvmciRuntime.hpp" +#include "oops/method.hpp" +#include "aot/aotCompiledMethod.hpp" +#include "aot/aotLoader.hpp" +#include "aot/aotCodeHeap.hpp" +#include "aot/compiledIC_aot.hpp" +#include "runtime/java.hpp" +#include "runtime/os.hpp" +#include "runtime/sharedRuntime.hpp" +#include "utilities/array.hpp" +#include "utilities/xmlstream.hpp" + +#include +#include + +#if 0 +static void metadata_oops_do(Metadata** metadata_begin, Metadata **metadata_end, OopClosure* f) { + // Visit the metadata/oops section + for (Metadata** p = metadata_begin; p < metadata_end; p++) { + Metadata* m = *p; + + intptr_t meta = (intptr_t)m; + if ((meta & 1) == 1) { + // already resolved + m = (Metadata*)(meta & ~1); + } else { + continue; + } + assert(Metaspace::contains(m), ""); + if (m->is_method()) { + m = ((Method*)m)->method_holder(); + } + assert(m->is_klass(), "must be"); + oop o = ((Klass*)m)->klass_holder(); + if (o != NULL) { + f->do_oop(&o); + } + } +} +#endif + +void AOTCompiledMethod::oops_do(OopClosure* f) { + if (_oop != NULL) { + f->do_oop(&_oop); + } +#if 0 + metadata_oops_do(metadata_begin(), metadata_end(), f); +#endif +} + +bool AOTCompiledMethod::do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) { + return false; +} + +oop AOTCompiledMethod::oop_at(int index) const { + if (index == 0) { // 0 is reserved + return NULL; + } + Metadata** entry = _metadata_got + (index - 1); + intptr_t meta = (intptr_t)*entry; + if ((meta & 1) == 1) { + // already resolved + Klass* k = (Klass*)(meta & ~1); + return k->java_mirror(); + } + // The entry is string which we need to resolve. + const char* meta_name = _heap->get_name_at((int)meta); + int klass_len = build_u2_from((address)meta_name); + const char* klass_name = meta_name + 2; + // Quick check the current method's holder. + Klass* k = _method->method_holder(); + + ResourceMark rm; // for signature_name() + if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match? + // Search klass in got cells in DSO which have this compiled method. + k = _heap->get_klass_from_got(klass_name, klass_len, _method); + } + int method_name_len = build_u2_from((address)klass_name + klass_len); + guarantee(method_name_len == 0, "only klass is expected here"); + meta = ((intptr_t)k) | 1; + *entry = (Metadata*)meta; // Should be atomic on x64 + return k->java_mirror(); +} + +Metadata* AOTCompiledMethod::metadata_at(int index) const { + if (index == 0) { // 0 is reserved + return NULL; + } + assert(index - 1 < _metadata_size, ""); + { + Metadata** entry = _metadata_got + (index - 1); + intptr_t meta = (intptr_t)*entry; + if ((meta & 1) == 1) { + // already resolved + Metadata *m = (Metadata*)(meta & ~1); + return m; + } + // The entry is string which we need to resolve. + const char* meta_name = _heap->get_name_at((int)meta); + int klass_len = build_u2_from((address)meta_name); + const char* klass_name = meta_name + 2; + // Quick check the current method's holder. + Klass* k = _method->method_holder(); + bool klass_matched = true; + + ResourceMark rm; // for signature_name() and find_method() + if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match? + // Search klass in got cells in DSO which have this compiled method. + k = _heap->get_klass_from_got(klass_name, klass_len, _method); + klass_matched = false; + } + int method_name_len = build_u2_from((address)klass_name + klass_len); + if (method_name_len == 0) { // Array or Klass name only? + meta = ((intptr_t)k) | 1; + *entry = (Metadata*)meta; // Should be atomic on x64 + return (Metadata*)k; + } else { // Method + // Quick check the current method's name. + Method* m = _method; + int signature_len = build_u2_from((address)klass_name + klass_len + 2 + method_name_len); + int full_len = 2 + klass_len + 2 + method_name_len + 2 + signature_len; + if (!klass_matched || memcmp(_name, meta_name, full_len) != 0) { // Does not match? + Thread* thread = Thread::current(); + KlassHandle klass = KlassHandle(thread, k); + const char* method_name = klass_name + klass_len; + m = AOTCodeHeap::find_method(klass, thread, method_name); + } + meta = ((intptr_t)m) | 1; + *entry = (Metadata*)meta; // Should be atomic on x64 + return (Metadata*)m; + } + // need to resolve it here..., patching of GOT need to be CAS or atomic operation. + // FIXIT: need methods for debuginfo. + // return _method; + } + ShouldNotReachHere(); return NULL; +} + +bool AOTCompiledMethod::make_not_entrant_helper(int new_state) { + // Make sure the method is not flushed in case of a safepoint in code below. + methodHandle the_method(method()); + NoSafepointVerifier nsv; + + { + // Enter critical section. Does not block for safepoint. + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + + if (*_state_adr == new_state) { + // another thread already performed this transition so nothing + // to do, but return false to indicate this. + return false; + } + + // Change state + OrderAccess::storestore(); + *_state_adr = new_state; + + // Log the transition once + log_state_change(); + +#ifdef TIERED + // Remain non-entrant forever + if (new_state == not_entrant && method() != NULL) { + method()->set_aot_code(NULL); + } +#endif + + // Remove AOTCompiledMethod from method. + if (method() != NULL && (method()->code() == this || + method()->from_compiled_entry() == verified_entry_point())) { + HandleMark hm; + method()->clear_code(false /* already owns Patching_lock */); + } + } // leave critical region under Patching_lock + + + if (TraceCreateZombies) { + ResourceMark m; + const char *new_state_str = (new_state == not_entrant) ? "not entrant" : "not used"; + tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made %s", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null", new_state_str); + } + + return true; +} + +bool AOTCompiledMethod::make_entrant() { + assert(!method()->is_old(), "reviving evolved method!"); + assert(*_state_adr != not_entrant, "%s", method()->has_aot_code() ? "has_aot_code() not cleared" : "caller didn't check has_aot_code()"); + + // Make sure the method is not flushed in case of a safepoint in code below. + methodHandle the_method(method()); + NoSafepointVerifier nsv; + + { + // Enter critical section. Does not block for safepoint. + MutexLockerEx pl(Patching_lock, Mutex::_no_safepoint_check_flag); + + if (*_state_adr == in_use) { + // another thread already performed this transition so nothing + // to do, but return false to indicate this. + return false; + } + + // Change state + OrderAccess::storestore(); + *_state_adr = in_use; + + // Log the transition once + log_state_change(); + } // leave critical region under Patching_lock + + + if (TraceCreateZombies) { + ResourceMark m; + tty->print_cr("aot method <" INTPTR_FORMAT "> %s code made entrant", p2i(this), this->method() ? this->method()->name_and_sig_as_C_string() : "null"); + } + + return true; +} + +// We don't have full dependencies for AOT methods, so flushing is +// more conservative than for nmethods. +void AOTCompiledMethod::flush_evol_dependents_on(instanceKlassHandle dependee) { + if (is_java_method()) { + cleanup_inline_caches(); + mark_for_deoptimization(); + make_not_entrant(); + } +} + +// Iterate over metadata calling this function. Used by RedefineClasses +// Copied from nmethod::metadata_do +void AOTCompiledMethod::metadata_do(void f(Metadata*)) { + address low_boundary = verified_entry_point(); + { + // Visit all immediate references that are embedded in the instruction stream. + RelocIterator iter(this, low_boundary); + while (iter.next()) { + if (iter.type() == relocInfo::metadata_type ) { + metadata_Relocation* r = iter.metadata_reloc(); + // In this metadata, we must only follow those metadatas directly embedded in + // the code. Other metadatas (oop_index>0) are seen as part of + // the metadata section below. + assert(1 == (r->metadata_is_immediate()) + + (r->metadata_addr() >= metadata_begin() && r->metadata_addr() < metadata_end()), + "metadata must be found in exactly one place"); + if (r->metadata_is_immediate() && r->metadata_value() != NULL) { + Metadata* md = r->metadata_value(); + if (md != _method) f(md); + } + } else if (iter.type() == relocInfo::virtual_call_type) { + // Check compiledIC holders associated with this nmethod + CompiledIC *ic = CompiledIC_at(&iter); + if (ic->is_icholder_call()) { + CompiledICHolder* cichk = ic->cached_icholder(); + f(cichk->holder_method()); + f(cichk->holder_klass()); + } else { + Metadata* ic_oop = ic->cached_metadata(); + if (ic_oop != NULL) { + f(ic_oop); + } + } + } + } + } + + // Visit the metadata section + for (Metadata** p = metadata_begin(); p < metadata_end(); p++) { + Metadata* m = *p; + + intptr_t meta = (intptr_t)m; + if ((meta & 1) == 1) { + // already resolved + m = (Metadata*)(meta & ~1); + } else { + continue; + } + assert(Metaspace::contains(m), ""); + f(m); + } + + // Visit metadata not embedded in the other places. + if (_method != NULL) f(_method); +} + +void AOTCompiledMethod::print() const { + print_on(tty, "AOTCompiledMethod"); +} + +void AOTCompiledMethod::print_on(outputStream* st) const { + print_on(st, "AOTCompiledMethod"); +} + +// Print out more verbose output usually for a newly created aot method. +void AOTCompiledMethod::print_on(outputStream* st, const char* msg) const { + if (st != NULL) { + ttyLocker ttyl; + st->print("%7d ", (int) st->time_stamp().milliseconds()); + st->print("%4d ", _aot_id); // print compilation number + st->print(" aot[%2d]", _heap->dso_id()); + // Stubs have _method == NULL + st->print(" %s", (_method == NULL ? _name : _method->name_and_sig_as_C_string())); + if (Verbose) { + st->print(" entry at " INTPTR_FORMAT, p2i(_code)); + } + if (msg != NULL) { + st->print(" %s", msg); + } + st->cr(); + } +} + +void AOTCompiledMethod::print_value_on(outputStream* st) const { + st->print("AOTCompiledMethod "); + print_on(st, NULL); +} + +// Print a short set of xml attributes to identify this aot method. The +// output should be embedded in some other element. +void AOTCompiledMethod::log_identity(xmlStream* log) const { + log->print(" aot_id='%d'", _aot_id); + log->print(" aot='%2d'", _heap->dso_id()); +} + +void AOTCompiledMethod::log_state_change() const { + if (LogCompilation) { + ResourceMark m; + if (xtty != NULL) { + ttyLocker ttyl; // keep the following output all in one block + if (*_state_adr == not_entrant) { + xtty->begin_elem("make_not_entrant thread='" UINTX_FORMAT "'", + os::current_thread_id()); + } else if (*_state_adr == not_used) { + xtty->begin_elem("make_not_used thread='" UINTX_FORMAT "'", + os::current_thread_id()); + } else if (*_state_adr == in_use) { + xtty->begin_elem("make_entrant thread='" UINTX_FORMAT "'", + os::current_thread_id()); + } + log_identity(xtty); + xtty->stamp(); + xtty->end_elem(); + } + } + if (PrintCompilation) { + ResourceMark m; + if (*_state_adr == not_entrant) { + print_on(tty, "made not entrant"); + } else if (*_state_adr == not_used) { + print_on(tty, "made not used"); + } else if (*_state_adr == in_use) { + print_on(tty, "made entrant"); + } + } +} + + +NativeInstruction* PltNativeCallWrapper::get_load_instruction(virtual_call_Relocation* r) const { + return nativeLoadGot_at(_call->plt_load_got()); +} + +void PltNativeCallWrapper::verify_resolve_call(address dest) const { + CodeBlob* db = CodeCache::find_blob_unsafe(dest); + if (db == NULL) { + assert(dest == _call->plt_resolve_call(), "sanity"); + } +} + +void PltNativeCallWrapper::set_to_interpreted(methodHandle method, CompiledICInfo& info) { + assert(!info.to_aot(), "only for nmethod"); + CompiledPltStaticCall* csc = CompiledPltStaticCall::at(instruction_address()); + csc->set_to_interpreted(method, info.entry()); +} + +NativeCallWrapper* AOTCompiledMethod::call_wrapper_at(address call) const { + return new PltNativeCallWrapper((NativePltCall*) call); +} + +NativeCallWrapper* AOTCompiledMethod::call_wrapper_before(address return_pc) const { + return new PltNativeCallWrapper(nativePltCall_before(return_pc)); +} + +CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(Relocation* call_site) const { + return CompiledPltStaticCall::at(call_site); +} + +CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_at(address call_site) const { + return CompiledPltStaticCall::at(call_site); +} + +CompiledStaticCall* AOTCompiledMethod::compiledStaticCall_before(address return_addr) const { + return CompiledPltStaticCall::before(return_addr); +} + +address AOTCompiledMethod::call_instruction_address(address pc) const { + NativePltCall* pltcall = nativePltCall_before(pc); + return pltcall->instruction_address(); +} + +bool AOTCompiledMethod::is_evol_dependent_on(Klass* dependee) { + return !is_aot_runtime_stub() && _heap->is_dependent_method(dependee, this); +} + +void AOTCompiledMethod::clear_inline_caches() { + assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint"); + if (is_zombie()) { + return; + } + + RelocIterator iter(this); + while (iter.next()) { + iter.reloc()->clear_inline_cache(); + if (iter.type() == relocInfo::opt_virtual_call_type) { + CompiledIC* cic = CompiledIC_at(&iter); + assert(cic->is_clean(), "!"); + nativePltCall_at(iter.addr())->set_stub_to_clean(); + } + } +} + --- /dev/null 2016-10-31 17:47:20.000000000 -0700 +++ new/src/share/vm/aot/aotCompiledMethod.hpp 2016-10-31 17:47:19.000000000 -0700 @@ -0,0 +1,324 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_AOT_AOTCOMPILEDMETHOD_HPP +#define SHARE_VM_AOT_AOTCOMPILEDMETHOD_HPP + +#include "code/pcDesc.hpp" +#include "code/relocInfo.hpp" +#include "code/compiledIC.hpp" + +class AOTCodeHeap; + +class aot_metadata { +private: + int _size; + int _code_size; + int _entry; + int _verified_entry; + int _exception_handler_offset; + int _deopt_handler_offset; + int _stubs_offset; + int _frame_size; + // location in frame (offset for sp) that deopt can store the original + // pc during a deopt. + int _orig_pc_offset; + int _unsafe_access; + + int _pc_desc_begin; + int _scopes_begin; + int _reloc_begin; + int _exception_table_begin; + int _oopmap_begin; + address at_offset(size_t offset) const { return ((address) this) + offset; } +public: + int code_size() const { return _code_size; } + int frame_size() const { return _frame_size / HeapWordSize; } + PcDesc *scopes_pcs_begin() const { return (PcDesc *) at_offset(_pc_desc_begin); } + PcDesc *scopes_pcs_end() const { return (PcDesc *) at_offset(_scopes_begin); } + address scopes_data_begin() const { return at_offset(_scopes_begin); } + address scopes_data_end() const { return at_offset(_reloc_begin); } + relocInfo* relocation_begin() const { return (relocInfo*) at_offset(_reloc_begin); } + relocInfo* relocation_end() const { return (relocInfo*) at_offset(_exception_table_begin); } + address handler_table_begin () const { return at_offset(_exception_table_begin); } + address handler_table_end() const { return at_offset(_oopmap_begin); } + + address nul_chk_table_begin() const { return at_offset(_oopmap_begin); } + address nul_chk_table_end() const { return at_offset(_oopmap_begin); } + + ImmutableOopMapSet* oopmap_set() const { return (ImmutableOopMapSet*) at_offset(_oopmap_begin); } + + address consts_begin() const { return at_offset(_size); } + address consts_end() const { return at_offset(_size); } + int stub_offset() const { return _stubs_offset; } + int entry_offset() const { return _entry; } + int verified_entry_offset() const { return _verified_entry; } + int exception_handler_offset() const { return _exception_handler_offset; } + int deopt_handler_offset() const { return _deopt_handler_offset; } + int orig_pc_offset() const { return _orig_pc_offset; } + + int handler_table_size() const { return handler_table_end() - handler_table_begin(); } + int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); } + bool has_unsafe_access() const { return _unsafe_access != 0; } + +}; + +/* + * Use this for AOTCompiledMethods since a lot of the fields in CodeBlob gets the same + * value when they come from AOT. code_begin == content_begin, etc... */ +class AOTCompiledMethodLayout : public CodeBlobLayout { +public: + AOTCompiledMethodLayout(address code_begin, address code_end, address relocation_begin, address relocation_end) : + CodeBlobLayout( + code_begin, // code_begin + code_end, // code_end + code_begin, // content_begin + code_end, // content_end + code_end, // data_end + relocation_begin, // relocation_begin + relocation_end + ) { + } +}; + +class AOTCompiledMethod : public CompiledMethod, public CHeapObj { +private: + address _code; + aot_metadata* _meta; + Metadata** _metadata_got; + jlong* _state_adr; // Address of cell to indicate aot method state (in_use or not_entrant) + AOTCodeHeap* _heap; // code heap which has this method + const char* _name; // For stub: "AOT Stub" for stub, + // For nmethod: "Ljava/lang/ThreadGroup;addUnstarted()V" + const int _metadata_size; // size of _metadata_got + const int _aot_id; + const int _method_index; + oop _oop; // method()->method_holder()->klass_holder() + + address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _meta->orig_pc_offset()); } + bool make_not_entrant_helper(int new_state); + + public: + using CHeapObj::operator new; + using CHeapObj::operator delete; + + int method_index() const { return _method_index; } + void set_oop(oop o) { _oop = o; } + + AOTCompiledMethod(address code, Method* method, aot_metadata* meta, address metadata_got, int metadata_size, jlong* state_adr, AOTCodeHeap* heap, const char* name, int method_index, int aot_id) : + CompiledMethod(method, name, compiler_jvmci, // AOT code is generated by JVMCI compiler + AOTCompiledMethodLayout(code, code + meta->code_size(), (address) meta->relocation_begin(), (address) meta->relocation_end()), + 0 /* frame_complete_offset */, meta->frame_size() /* frame_size */, meta->oopmap_set(), false /* caller_must_gc_arguments */), + _code(code), + _meta(meta), + _metadata_got((Metadata**) metadata_got), + _state_adr(state_adr), + _heap(heap), + _name(name), + _metadata_size(metadata_size), + _method_index(method_index), + _aot_id(aot_id) { + + _is_far_code = CodeCache::is_far_target(code) || + CodeCache::is_far_target(code + meta->code_size()); + _exception_cache = NULL; + + _scopes_data_begin = (address) _meta->scopes_data_begin(); + _deopt_handler_begin = (address) _code + _meta->deopt_handler_offset(); + _deopt_mh_handler_begin = (address) this; + + _pc_desc_container.reset_to(scopes_pcs_begin()); + + // Mark the AOTCompiledMethod as in_use + *_state_adr = nmethod::in_use; + set_has_unsafe_access(_meta->has_unsafe_access()); + _oop = NULL; + } + + virtual bool is_aot() const { return true; } + virtual bool is_runtime_stub() const { return is_aot_runtime_stub(); } + + virtual bool is_compiled() const { return !is_aot_runtime_stub(); } + + virtual bool is_locked_by_vm() const { return false; } + + int state() const { return *_state_adr; } + + // Non-virtual for speed + bool _is_alive() const { return state() < zombie; } + + virtual bool is_zombie() const { return state() == zombie; } + virtual bool is_unloaded() const { return state() == unloaded; } + virtual bool is_not_entrant() const { return state() == not_entrant || + state() == not_used; } + virtual bool is_alive() const { return _is_alive(); } + virtual bool is_in_use() const { return state() == in_use; } + + address exception_begin() { return (address) _code + _meta->exception_handler_offset(); } + + virtual const char* name() const { return _name; } + + virtual int compile_id() const { return _aot_id; } + + void print_on(outputStream* st) const; + void print_on(outputStream* st, const char* msg) const; + void print() const; + + virtual void print_value_on(outputStream *stream) const; + virtual void print_block_comment(outputStream *stream, address block_begin) const { } + virtual void verify() {} + + virtual int comp_level() const { return CompLevel_aot; } + virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); } + virtual void log_identity(xmlStream* stream) const; + virtual void log_state_change() const; + virtual bool make_entrant(); + virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); } + virtual bool make_not_used() { return make_not_entrant_helper(not_used); } + virtual address entry_point() const { return _code + _meta->entry_offset(); } + virtual bool make_zombie() { ShouldNotReachHere(); return false; } + virtual bool is_osr_method() const { return false; } + virtual int osr_entry_bci() const { ShouldNotReachHere(); return -1; } + // AOT compiled methods do not get into zombie state + virtual bool can_convert_to_zombie() { return false; } + + virtual bool is_evol_dependent_on(Klass* dependee); + virtual bool is_dependent_on_method(Method* dependee) { return true; } + + virtual void clear_inline_caches(); + + virtual void print_pcs() {} + + virtual address scopes_data_end() const { return _meta->scopes_data_end(); } + + virtual oop oop_at(int index) const; + virtual Metadata* metadata_at(int index) const; + + virtual PcDesc* scopes_pcs_begin() const { return _meta->scopes_pcs_begin(); } + virtual PcDesc* scopes_pcs_end() const { return _meta->scopes_pcs_end(); } + + virtual address handler_table_begin() const { return _meta->handler_table_begin(); } + virtual address handler_table_end() const { return _meta->handler_table_end(); } + + virtual address nul_chk_table_begin() const { return _meta->nul_chk_table_begin(); } + virtual address nul_chk_table_end() const { return _meta->nul_chk_table_end(); } + + virtual address consts_begin() const { return _meta->consts_begin(); } + virtual address consts_end() const { return _meta->consts_end(); } + + virtual address stub_begin() const { return code_begin() + _meta->stub_offset(); } + virtual address stub_end() const { return code_end(); } + + virtual oop* oop_addr_at(int index) const { ShouldNotReachHere(); return NULL; } + virtual Metadata** metadata_addr_at(int index) const { ShouldNotReachHere(); return NULL; } + + // Accessor/mutator for the original pc of a frame before a frame was deopted. + address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); } + void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; } + +#ifdef HOTSWAP + // Flushing and deoptimization in case of evolution + void flush_evol_dependents_on(instanceKlassHandle dependee); +#endif // HOTSWAP + + virtual void metadata_do(void f(Metadata*)); + + bool metadata_got_contains(Metadata **p) { + return p >= &_metadata_got[0] && p < &_metadata_got[_metadata_size]; + } + + Metadata** metadata_begin() const { return &_metadata_got[0] ; } + Metadata** metadata_end() const { return &_metadata_got[_metadata_size] ; } + const char* compile_kind() const { return "AOT"; } + + int get_state() const { + return (int) (*_state_adr); + } + + virtual void oops_do(OopClosure* f); + + // inlined and non-virtual for AOTCodeHeap::oops_do + void do_oops(OopClosure* f) { + assert(_is_alive(), ""); + if (_oop != NULL) { + f->do_oop(&_oop); + } +#if 0 + metadata_oops_do(metadata_begin(), metadata_end(), f); +#endif + } + + +protected: + // AOT compiled methods are not flushed + void flush() {}; + + NativeCallWrapper* call_wrapper_at(address call) const; + NativeCallWrapper* call_wrapper_before(address return_pc) const; + address call_instruction_address(address pc) const; + + CompiledStaticCall* compiledStaticCall_at(Relocation* call_site) const; + CompiledStaticCall* compiledStaticCall_at(address addr) const; + CompiledStaticCall* compiledStaticCall_before(address addr) const; +private: + bool is_aot_runtime_stub() const { return _method == NULL; } + +protected: + virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred); + virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) { return false; } + +}; + +class PltNativeCallWrapper: public NativeCallWrapper { +private: + NativePltCall* _call; + +public: + PltNativeCallWrapper(NativePltCall* call) : _call(call) {} + + virtual address destination() const { return _call->destination(); } + virtual address instruction_address() const { return _call->instruction_address(); } + virtual address next_instruction_address() const { return _call->next_instruction_address(); } + virtual address return_address() const { return _call->return_address(); } + virtual address get_resolve_call_stub(bool is_optimized) const { return _call->plt_resolve_call(); } + virtual void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); } + virtual void set_to_interpreted(methodHandle method, CompiledICInfo& info); + virtual void verify() const { _call->verify(); } + virtual void verify_resolve_call(address dest) const; + + virtual bool is_call_to_interpreted(address dest) const { return (dest == _call->plt_c2i_stub()); } + // TODO: assume for now that patching of aot code (got cell) is safe. + virtual bool is_safe_for_patching() const { return true; } + + virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const; + + virtual void *get_data(NativeInstruction* instruction) const { + return (void*)((NativeLoadGot*) instruction)->data(); + } + + virtual void set_data(NativeInstruction* instruction, intptr_t data) { + ((NativeLoadGot*) instruction)->set_data(data); + } +}; + +#endif //SHARE_VM_AOT_AOTCOMPILEDMETHOD_HPP --- /dev/null 2016-10-31 17:47:20.000000000 -0700 +++ new/src/share/vm/aot/aotLoader.cpp 2016-10-31 17:47:20.000000000 -0700 @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" + +#include "jvmci/jvmciRuntime.hpp" +#include "oops/method.hpp" +#include "aot/aotLoader.hpp" +#include "aot/aotCodeHeap.hpp" + +#include +#include + +GrowableArray* AOTLoader::_heaps = new(ResourceObj::C_HEAP, mtCode) GrowableArray (2, true); + +// Iterate over all AOT CodeHeaps +#define FOR_ALL_AOT_HEAPS(heap) for (GrowableArrayIterator heap = heaps()->begin(); heap != heaps()->end(); ++heap) + +void AOTLoader::load_for_klass(instanceKlassHandle kh, Thread* thread) { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->load_klass_data(kh, thread); + } +} + +uint64_t AOTLoader::get_saved_fingerprint(InstanceKlass* ik) { + FOR_ALL_AOT_HEAPS(heap) { + AOTKlassData* klass_data = (*heap)->find_klass(ik); + if (klass_data != NULL) { + return klass_data->_fingerprint; + } + } + return 0; +} + +bool AOTLoader::find_klass(InstanceKlass* ik) { + FOR_ALL_AOT_HEAPS(heap) { + if ((*heap)->find_klass(ik) != NULL) { + return true; + } + } + return false; +} + +bool AOTLoader::contains(address p) { + FOR_ALL_AOT_HEAPS(heap) { + if ((*heap)->contains(p)) { + return true; + } + } + return false; +} + +AOTCompiledMethod* AOTLoader::find_aot(address p) { + // NMT can walk the stack before heap array is created + if (heaps() != NULL) { + FOR_ALL_AOT_HEAPS(heap) { + if ((*heap)->contains(p)) { + return (*heap)->find_aot(p); + } + } + } + return NULL; +} + +void AOTLoader::oops_do(OopClosure* f) { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->oops_do(f); + } +} + +void AOTLoader::metadata_do(void f(Metadata*)) { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->metadata_do(f); + } +} + +// Flushing and deoptimization in case of evolution +void AOTLoader::flush_evol_dependents_on(instanceKlassHandle dependee) { + // make non entrant and mark for deoptimization + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->flush_evol_dependents_on(dependee); + } + Deoptimization::deoptimize_dependents(); +} + +/** + * List of core modules for which we search for shared libraries. + */ +static const char* modules[] = { + "java.base", + "java.logging", + "jdk.compiler", + "jdk.scripting.nashorn", + "jdk.vm.ci", + "jdk.vm.compiler" +}; + +void AOTLoader::initialize() { + // Probe if we have libraries for core modules and load them if available. + bool libraries_found = false; + + if (FLAG_IS_DEFAULT(UseAOT) && AOTLibrary != NULL) { + // Don't need to set UseAOT on command line when AOTLibrary is specified + FLAG_SET_DEFAULT(UseAOT, true); + } + if (UseAOT) { + // EagerInitialization is not compatible with AOT + if (EagerInitialization) { + warning("EagerInitialization is not compatible with AOT (switching AOT off)"); + FLAG_SET_DEFAULT(UseAOT, false); + return; + } + + // -Xint is not compatible with AOT + if (Arguments::is_interpreter_only()) { + warning("-Xint is not compatible with AOT (switching AOT off)"); + FLAG_SET_DEFAULT(UseAOT, false); + return; + } + + const char* home = Arguments::get_java_home(); + const char* file_separator = os::file_separator(); + + for (int i = 0; i < (int) (sizeof(modules) / sizeof(const char*)); i++) { + char library[JVM_MAXPATHLEN]; + jio_snprintf(library, sizeof(library), "%s%slib%slib%s%s%s.so", home, file_separator, file_separator, modules[i], UseCompressedOops ? "-coop" : "", UseG1GC ? "" : "-nong1"); + bool found = load_library(library, false); + if (found) { + libraries_found = true; + } + } + + // Scan the AOTLibrary option. + if (AOTLibrary != NULL) { + const int len = strlen(AOTLibrary); + char* cp = NEW_C_HEAP_ARRAY(char, len+1, mtCode); + memcpy(cp, AOTLibrary, len); + cp[len] = '\0'; + char* end = cp + len; + while (cp < end) { + const char* name = cp; + while ((*cp) != '\0' && (*cp) != '\n' && (*cp) != ',' && (*cp) != ':' && (*cp) != ';') cp++; + cp[0] = '\0'; // Terminate name + cp++; + bool found = load_library(name, true); + if (found) { + libraries_found = true; + } + } + } + + // If we didn't find a library we are turning AOT off, if not requested otherwise. + if (!libraries_found) { + if (FLAG_IS_DEFAULT(UseAOT)) { + FLAG_SET_DEFAULT(UseAOT, false); + } + } + } +} + +void AOTLoader::universe_init() { + if (UseAOT && heaps_count() > 0) { + // Shifts are static values which initialized by 0 until java heap initialization. + // AOT libs are loaded before heap initialized so shift values are not set. + // It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded. + // Set shifts value based on first AOT library config. + if (UseCompressedOops && AOTCodeHeap::narrow_oop_shift_initialized()) { + int oop_shift = Universe::narrow_oop_shift(); + if (oop_shift == 0) { + Universe::set_narrow_oop_shift(AOTCodeHeap::narrow_oop_shift()); + } else { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->verify_flag(AOTCodeHeap::narrow_oop_shift(), oop_shift, "Universe::narrow_oop_shift"); + } + } + if (UseCompressedClassPointers) { // It is set only if UseCompressedOops is set + int klass_shift = Universe::narrow_klass_shift(); + if (klass_shift == 0) { + Universe::set_narrow_klass_shift(AOTCodeHeap::narrow_klass_shift()); + } else { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->verify_flag(AOTCodeHeap::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift"); + } + } + } + } + } +} + +void AOTLoader::set_narrow_klass_shift() { + // This method could be called from Metaspace::set_narrow_klass_base_and_shift(). + // In case it is not called (during dump CDS, for example) the corresponding code in + // AOTLoader::universe_init(), which is called later, will set the shift value. + if (UseAOT && heaps_count() > 0 && + UseCompressedOops && AOTCodeHeap::narrow_oop_shift_initialized() && + UseCompressedClassPointers) { + int klass_shift = Universe::narrow_klass_shift(); + if (klass_shift == 0) { + Universe::set_narrow_klass_shift(AOTCodeHeap::narrow_klass_shift()); + } else { + FOR_ALL_AOT_HEAPS(heap) { + (*heap)->verify_flag(AOTCodeHeap::narrow_klass_shift(), klass_shift, "Universe::narrow_klass_shift"); + } + } + } +} + +bool AOTLoader::load_library(const char* name, bool exit_on_error) { + void* handle = dlopen(name, RTLD_LAZY); + if (handle == NULL) { + if (exit_on_error) { + tty->print_cr("error opening file: %s", dlerror()); + vm_exit(1); + } + return false; + } + const int dso_id = heaps_count() + 1; + AOTCodeHeap* heap = new AOTCodeHeap(handle, name, dso_id); + if (!heap->is_valid()) { + delete heap; + dlclose(handle); + return false; + } + { + MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag); + add_heap(heap); + CodeCache::add_heap(heap); + } + return true; +} + +#ifndef PRODUCT +void AOTLoader::print_statistics() { + { ttyLocker ttyl; + tty->print_cr("--- AOT Statistics ---"); + tty->print_cr("AOT libraries loaded: %d", heaps_count()); + AOTCodeHeap::print_statistics(); + } +} +#endif --- /dev/null 2016-10-31 17:47:21.000000000 -0700 +++ new/src/share/vm/aot/aotLoader.hpp 2016-10-31 17:47:21.000000000 -0700 @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_AOT_AOTLOADER_HPP +#define SHARE_VM_AOT_AOTLOADER_HPP + + +#include "classfile/classFileParser.hpp" +#include "memory/iterator.hpp" +#include "oops/metadata.hpp" +#if INCLUDE_AOT +#include "aot/aotCompiledMethod.hpp" +#endif + +class AOTLoader { +private: +#if INCLUDE_AOT + static GrowableArray* _heaps; +#endif + static bool load_library(const char* name, bool exit_on_error); + +public: +#if INCLUDE_AOT + static GrowableArray* heaps() { return _heaps; } + static int heaps_count() { return heaps()->length(); } + static void add_heap(AOTCodeHeap *heap) { heaps()->append(heap); } +#endif + static void initialize() NOT_AOT({ FLAG_SET_ERGO(bool, UseAOT, false); }); + + static void universe_init() NOT_AOT_RETURN; + static void set_narrow_klass_shift() NOT_AOT_RETURN; + static bool contains(address p) NOT_AOT({ return false; }); +#if INCLUDE_AOT + static AOTCompiledMethod* find_aot(address p); +#endif + static void load_for_klass(instanceKlassHandle kh, Thread* thread) NOT_AOT_RETURN; + static bool find_klass(InstanceKlass* ik) NOT_AOT({ return false; }); + static uint64_t get_saved_fingerprint(InstanceKlass* ik) NOT_AOT({ return 0; }); + static void oops_do(OopClosure* f) NOT_AOT_RETURN; + static void metadata_do(void f(Metadata*)) NOT_AOT_RETURN; + + NOT_PRODUCT( static void print_statistics() NOT_AOT_RETURN; ) + +#ifdef HOTSWAP + // Flushing and deoptimization in case of evolution + static void flush_evol_dependents_on(instanceKlassHandle dependee) NOT_AOT_RETURN; +#endif // HOTSWAP + +}; + +#endif // SHARE_VM_AOT_AOTLOADER_HPP --- /dev/null 2016-10-31 17:47:21.000000000 -0700 +++ new/src/share/vm/aot/compiledIC_aot.cpp 2016-10-31 17:47:21.000000000 -0700 @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "aot/compiledIC_aot.hpp" + +bool CompiledPltStaticCall::is_call_to_interpreted() const { + // It is a call to interpreted, if it calls to a stub. Hence, the destination + // must be in the stub part of the nmethod that contains the call + return destination() == _call->plt_c2i_stub(); +} + +address CompiledPltStaticCall::find_stub() { + // It is static NativePltCall. Return c2i stub address. + return _call->plt_c2i_stub(); +} --- /dev/null 2016-10-31 17:47:22.000000000 -0700 +++ new/src/share/vm/aot/compiledIC_aot.hpp 2016-10-31 17:47:22.000000000 -0700 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_AOT_COMPILEDIC_AOT_HPP +#define SHARE_VM_AOT_COMPILEDIC_AOT_HPP + +#include "code/nativeInst.hpp" +#include "interpreter/linkResolver.hpp" +#include "oops/compiledICHolder.hpp" +#include "code/compiledIC.hpp" + +class CompiledPltStaticCall: public CompiledStaticCall { + friend class CompiledIC; + friend class PltNativeCallWrapper; + + // Also used by CompiledIC + void set_to_interpreted(methodHandle callee, address entry); + + address instruction_address() const { return _call->instruction_address(); } + void set_destination_mt_safe(address dest) { _call->set_destination_mt_safe(dest); } + + NativePltCall* _call; + + CompiledPltStaticCall(NativePltCall* call) : _call(call) {} + + public: + + inline static CompiledPltStaticCall* before(address return_addr) { + CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_before(return_addr)); + st->verify(); + return st; + } + + static inline CompiledPltStaticCall* at(address native_call) { + CompiledPltStaticCall* st = new CompiledPltStaticCall(nativePltCall_at(native_call)); + st->verify(); + return st; + } + + static inline CompiledPltStaticCall* at(Relocation* call_site) { + return at(call_site->addr()); + } + + // Delegation + address destination() const { return _call->destination(); } + + virtual bool is_call_to_interpreted() const; + + // Stub support + address find_stub(); + static void set_stub_to_clean(static_stub_Relocation* static_stub); + + // Misc. + void print() PRODUCT_RETURN; + void verify() PRODUCT_RETURN; + + protected: + virtual address resolve_call_stub() const { return _call->plt_resolve_call(); } + virtual void set_to_far(methodHandle callee, address entry) { set_to_compiled(entry); } + virtual const char* name() const { return "CompiledPltStaticCall"; } +}; + +#endif // SHARE_VM_AOT_COMPILEDIC_AOT_HPP --- /dev/null 2016-10-31 17:47:22.000000000 -0700 +++ new/src/share/vm/jvmci/compilerRuntime.cpp 2016-10-31 17:47:22.000000000 -0700 @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "classfile/stringTable.hpp" +#include "classfile/symbolTable.hpp" +#include "jvmci/compilerRuntime.hpp" +#include "runtime/interfaceSupport.hpp" +#include "runtime/compilationPolicy.hpp" +#include "runtime/deoptimization.hpp" + +// Resolve and allocate String +JRT_BLOCK_ENTRY(void, CompilerRuntime::resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name)) + JRT_BLOCK + oop str = *(oop*)string_result; // Is it resolved already? + if (str == NULL) { // Do resolution + // First 2 bytes of name contains length (number of bytes). + int len = build_u2_from((address)name); + name += 2; + TempNewSymbol sym = SymbolTable::new_symbol(name, len, CHECK); + str = StringTable::intern(sym, CHECK); + assert(java_lang_String::is_instance(str), "must be string"); + *(oop*)string_result = str; // Store result + } + assert(str != NULL, "Should be allocated!"); + thread->set_vm_result(str); + JRT_BLOCK_END +JRT_END + + + +Klass* CompilerRuntime::resolve_klass_helper(JavaThread *thread, const char* name, int len, TRAPS) { + ResourceMark rm(THREAD); + // last java frame on stack (which includes native call frames) + RegisterMap cbl_map(thread, false); + // Skip stub + frame caller_frame = thread->last_frame().sender(&cbl_map); + CodeBlob* caller_cb = caller_frame.cb(); + guarantee(caller_cb != NULL && caller_cb->is_compiled(), "must be called from compiled method"); + CompiledMethod* caller_nm = caller_cb->as_compiled_method_or_null(); + methodHandle caller(THREAD, caller_nm->method()); + + // Use class loader of aot method. + Handle loader(THREAD, caller->method_holder()->class_loader()); + Handle protection_domain(THREAD, caller->method_holder()->protection_domain()); + + // Ignore wrapping L and ; + if (name[0] == 'L') { + assert(len > 2, "small name %s", name); + name++; + len -= 2; + } + TempNewSymbol sym = SymbolTable::new_symbol(name, len, CHECK_NULL); + if (sym == NULL) { + return NULL; + } + Klass* k = SystemDictionary::resolve_or_fail(sym, loader, protection_domain, true, CHECK_NULL); + + return k; +} + +// Resolve Klass +JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::resolve_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name)) + Klass* k = NULL; + JRT_BLOCK + k = *klass_result; // Is it resolved already? + if (k == NULL) { // Do resolution + // First 2 bytes of name contains length (number of bytes). + int len = build_u2_from((address)name); + name += 2; + k = CompilerRuntime::resolve_klass_helper(thread, name, len, CHECK_NULL); + *klass_result = k; // Store result + } + JRT_BLOCK_END + assert(k != NULL, " Should be loaded!"); + return k; +JRT_END + + +Method* CompilerRuntime::resolve_method_helper(Klass* klass, const char* method_name, int method_name_len, + const char* signature_name, int signature_name_len) { + Method* m = NULL; + TempNewSymbol name_symbol = SymbolTable::probe(method_name, method_name_len); + TempNewSymbol signature_symbol = SymbolTable::probe(signature_name, signature_name_len); + if (name_symbol != NULL && signature_symbol != NULL) { + if (name_symbol == vmSymbols::object_initializer_name() || + name_symbol == vmSymbols::class_initializer_name()) { + // Never search superclasses for constructors + if (klass->is_instance_klass()) { + m = InstanceKlass::cast(klass)->find_method(name_symbol, signature_symbol); + } + } else { + m = klass->lookup_method(name_symbol, signature_symbol); + if (m == NULL && klass->is_instance_klass()) { + m = InstanceKlass::cast(klass)->lookup_method_in_ordered_interfaces(name_symbol, signature_symbol); + } + } + } + return m; +} + +JRT_BLOCK_ENTRY(MethodCounters*, CompilerRuntime::resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass, const char* data)) + MethodCounters* c = *counters_result; // Is it resolved already? + JRT_BLOCK + if (c == NULL) { // Do resolution + // Get method name and its length + int method_name_len = build_u2_from((address)data); + data += sizeof(u2); + const char* method_name = data; + data += method_name_len; + + // Get signature and its length + int signature_name_len = build_u2_from((address)data); + data += sizeof(u2); + const char* signature_name = data; + + assert(klass != NULL, "Klass parameter must not be null"); + Method* m = resolve_method_helper(klass, method_name, method_name_len, signature_name, signature_name_len); + assert(m != NULL, "Method must resolve successfully"); + + // Create method counters immediately to avoid check at runtime. + c = m->get_method_counters(thread); + if (c == NULL) { + THROW_MSG_NULL(vmSymbols::java_lang_OutOfMemoryError(), "Cannot allocate method counters"); + } + + *counters_result = c; + } + JRT_BLOCK_END + return c; +JRT_END + +// Resolve and initialize Klass +JRT_BLOCK_ENTRY(Klass*, CompilerRuntime::initialize_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name)) + Klass* k = NULL; + JRT_BLOCK + k = klass_result[0]; // Is it initialized already? + if (k == NULL) { // Do initialized + k = klass_result[1]; // Is it resolved already? + if (k == NULL) { // Do resolution + // First 2 bytes of name contains length (number of bytes). + int len = build_u2_from((address)name); + const char *cname = name + 2; + k = CompilerRuntime::resolve_klass_helper(thread, cname, len, CHECK_NULL); + klass_result[1] = k; // Store resolved result + } + Klass* k0 = klass_result[0]; // Is it initialized already? + if (k0 == NULL && k != NULL && k->is_instance_klass()) { + // Force initialization of instance class + InstanceKlass::cast(k)->initialize(CHECK_NULL); + // Double-check that it was really initialized, + // because we could be doing a recursive call + // from inside . + if (InstanceKlass::cast(k)->is_initialized()) { + klass_result[0] = k; // Store initialized result + } + } + } + JRT_BLOCK_END + assert(k != NULL, " Should be loaded!"); + return k; +JRT_END + + +JRT_BLOCK_ENTRY(void, CompilerRuntime::invocation_event(JavaThread *thread, MethodCounters* counters)) + if (!TieredCompilation) { + // Ignore the event if tiered is off + return; + } + JRT_BLOCK + methodHandle mh(THREAD, counters->method()); + RegisterMap map(thread, false); + + // Compute the enclosing method + frame fr = thread->last_frame().sender(&map); + CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); + assert(cm != NULL && cm->is_compiled(), "Sanity check"); + methodHandle emh(THREAD, cm->method()); + + assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); + CompilationPolicy::policy()->event(emh, mh, InvocationEntryBci, InvocationEntryBci, CompLevel_aot, cm, thread); + assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); + JRT_BLOCK_END +JRT_END + +JRT_BLOCK_ENTRY(void, CompilerRuntime::backedge_event(JavaThread *thread, MethodCounters* counters, int branch_bci, int target_bci)) + if (!TieredCompilation) { + // Ignore the event if tiered is off + return; + } + assert(branch_bci != InvocationEntryBci && target_bci != InvocationEntryBci, "Wrong bci"); + assert(target_bci <= branch_bci, "Expected a back edge"); + JRT_BLOCK + methodHandle mh(THREAD, counters->method()); + RegisterMap map(thread, false); + + // Compute the enclosing method + frame fr = thread->last_frame().sender(&map); + CompiledMethod* cm = fr.cb()->as_compiled_method_or_null(); + assert(cm != NULL && cm->is_compiled(), "Sanity check"); + methodHandle emh(THREAD, cm->method()); + assert(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); + nmethod* osr_nm = CompilationPolicy::policy()->event(emh, mh, branch_bci, target_bci, CompLevel_aot, cm, thread); + assert(!HAS_PENDING_EXCEPTION, "Event handler should not throw any exceptions"); + if (osr_nm != NULL) { + Deoptimization::deoptimize_frame(thread, fr.id()); + } + JRT_BLOCK_END +JRT_END --- /dev/null 2016-10-31 17:47:23.000000000 -0700 +++ new/src/share/vm/jvmci/compilerRuntime.hpp 2016-10-31 17:47:23.000000000 -0700 @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_RUNTIME_COMPILERRUNTIME_HPP +#define SHARE_VM_RUNTIME_COMPILERRUNTIME_HPP + +#include "memory/allocation.hpp" +#include "memory/resourceArea.hpp" +#include "oops/klass.hpp" +#include "oops/method.hpp" +#include "utilities/exceptions.hpp" + +class CompilerRuntime : AllStatic { + public: + // Resolves klass for aot compiled method. + static Klass* resolve_klass_helper(JavaThread *thread, const char* name, int len, TRAPS); + // Resolves method for aot compiled method. + static Method* resolve_method_helper(Klass* klass, const char* method_name, int method_name_len, + const char* signature_name, int signature_name_len); + // Resolution methods for aot compiled code. + static void resolve_string_by_symbol(JavaThread *thread, void* string_result, const char* name); + static Klass* resolve_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name); + static Klass* initialize_klass_by_symbol(JavaThread *thread, Klass** klass_result, const char* name); + static MethodCounters* resolve_method_by_symbol_and_load_counters(JavaThread *thread, MethodCounters** counters_result, Klass* klass_hint, const char* data); + static void invocation_event(JavaThread *thread, MethodCounters* counters); + static void backedge_event(JavaThread *thread, MethodCounters* counters, int branch_bci, int target_bci); +}; + +#endif // SHARE_VM_RUNTIME_COMPILERRUNTIME_HPP --- /dev/null 2016-10-31 17:47:23.000000000 -0700 +++ new/src/share/vm/jvmci/vmStructs_compiler_runtime.hpp 2016-10-31 17:47:23.000000000 -0700 @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#ifndef SHARE_VM_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP +#define SHARE_VM_JVMCI_VMSTRUCTS_COMPILER_RUNTIME_HPP + +#include "jvmci/compilerRuntime.hpp" + +#define VM_ADDRESSES_COMPILER_RUNTIME(declare_address, declare_preprocessor_address, declare_function) \ + declare_function(CompilerRuntime::resolve_string_by_symbol) \ + declare_function(CompilerRuntime::resolve_klass_by_symbol) \ + declare_function(CompilerRuntime::resolve_method_by_symbol_and_load_counters) \ + declare_function(CompilerRuntime::initialize_klass_by_symbol) \ + declare_function(CompilerRuntime::invocation_event) \ + declare_function(CompilerRuntime::backedge_event) + +#endif // SHARE_VM_AOT_VMSTRUCTS_COMPILER_RUNTIME_HPP