src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File hotspot Cdiff src/share/vm/code/nmethod.cpp

src/share/vm/code/nmethod.cpp

Print this page

        

*** 24,33 **** --- 24,34 ---- #include "precompiled.hpp" #include "code/codeCache.hpp" #include "code/compiledIC.hpp" #include "code/dependencies.hpp" + #include "code/nativeInst.hpp" #include "code/nmethod.hpp" #include "code/scopeDesc.hpp" #include "compiler/abstractCompiler.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp"
*** 44,56 **** --- 45,75 ---- #include "runtime/sweeper.hpp" #include "utilities/resourceHash.hpp" #include "utilities/dtrace.hpp" #include "utilities/events.hpp" #include "utilities/xmlstream.hpp" + #ifdef TARGET_ARCH_x86 + # include "nativeInst_x86.hpp" + #endif + #ifdef TARGET_ARCH_sparc + # include "nativeInst_sparc.hpp" + #endif + #ifdef TARGET_ARCH_zero + # include "nativeInst_zero.hpp" + #endif + #ifdef TARGET_ARCH_arm + # include "nativeInst_arm.hpp" + #endif + #ifdef TARGET_ARCH_ppc + # include "nativeInst_ppc.hpp" + #endif #ifdef SHARK #include "shark/sharkCompiler.hpp" #endif + #if INCLUDE_JVMCI + #include "jvmci/jvmciJavaClasses.hpp" + #endif PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC unsigned char nmethod::_global_unloading_clock = 0;
*** 82,91 **** --- 101,115 ---- if (compiler() == NULL) { return false; } return compiler()->is_c1(); } + bool nmethod::is_compiled_by_jvmci() const { + if (compiler() == NULL || method() == NULL) return false; // can happen during debug printing + if (is_native_method()) return false; + return compiler()->is_jvmci(); + } bool nmethod::is_compiled_by_c2() const { if (compiler() == NULL) { return false; } return compiler()->is_c2();
*** 106,117 **** // (In the latter two cases, they like other stats are printed to the log only.) #ifndef PRODUCT // These variables are put into one block to reduce relocations // and make it simpler to print from the debugger. ! static ! struct nmethod_stats_struct { int nmethod_count; int total_size; int relocation_size; int consts_size; int insts_size; --- 130,140 ---- // (In the latter two cases, they like other stats are printed to the log only.) #ifndef PRODUCT // These variables are put into one block to reduce relocations // and make it simpler to print from the debugger. ! struct java_nmethod_stats_struct { int nmethod_count; int total_size; int relocation_size; int consts_size; int insts_size;
*** 120,181 **** int scopes_pcs_size; int dependencies_size; int handler_table_size; int nul_chk_table_size; int oops_size; void note_nmethod(nmethod* nm) { nmethod_count += 1; total_size += nm->size(); relocation_size += nm->relocation_size(); consts_size += nm->consts_size(); insts_size += nm->insts_size(); stub_size += nm->stub_size(); oops_size += nm->oops_size(); scopes_data_size += nm->scopes_data_size(); scopes_pcs_size += nm->scopes_pcs_size(); dependencies_size += nm->dependencies_size(); handler_table_size += nm->handler_table_size(); nul_chk_table_size += nm->nul_chk_table_size(); } ! void print_nmethod_stats() { if (nmethod_count == 0) return; ! tty->print_cr("Statistics for %d bytecoded nmethods:", nmethod_count); if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); } int native_nmethod_count; int native_total_size; int native_relocation_size; int native_insts_size; int native_oops_size; void note_native_nmethod(nmethod* nm) { native_nmethod_count += 1; native_total_size += nm->size(); native_relocation_size += nm->relocation_size(); native_insts_size += nm->insts_size(); native_oops_size += nm->oops_size(); } void print_native_nmethod_stats() { if (native_nmethod_count == 0) return; tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); } int pc_desc_resets; // number of resets (= number of caches) int pc_desc_queries; // queries to nmethod::find_pc_desc int pc_desc_approx; // number of those which have approximate true int pc_desc_repeats; // number of _pc_descs[0] hits int pc_desc_hits; // number of LRU cache hits --- 143,215 ---- int scopes_pcs_size; int dependencies_size; int handler_table_size; int nul_chk_table_size; int oops_size; + int metadata_size; void note_nmethod(nmethod* nm) { nmethod_count += 1; total_size += nm->size(); relocation_size += nm->relocation_size(); consts_size += nm->consts_size(); insts_size += nm->insts_size(); stub_size += nm->stub_size(); oops_size += nm->oops_size(); + metadata_size += nm->metadata_size(); scopes_data_size += nm->scopes_data_size(); scopes_pcs_size += nm->scopes_pcs_size(); dependencies_size += nm->dependencies_size(); handler_table_size += nm->handler_table_size(); nul_chk_table_size += nm->nul_chk_table_size(); } ! void print_nmethod_stats(const char* name) { if (nmethod_count == 0) return; ! tty->print_cr("Statistics for %d bytecoded nmethods for %s:", nmethod_count, name); if (total_size != 0) tty->print_cr(" total in heap = %d", total_size); + if (nmethod_count != 0) tty->print_cr(" header = %d", nmethod_count * sizeof(nmethod)); if (relocation_size != 0) tty->print_cr(" relocation = %d", relocation_size); if (consts_size != 0) tty->print_cr(" constants = %d", consts_size); if (insts_size != 0) tty->print_cr(" main code = %d", insts_size); if (stub_size != 0) tty->print_cr(" stub code = %d", stub_size); if (oops_size != 0) tty->print_cr(" oops = %d", oops_size); + if (metadata_size != 0) tty->print_cr(" metadata = %d", metadata_size); if (scopes_data_size != 0) tty->print_cr(" scopes data = %d", scopes_data_size); if (scopes_pcs_size != 0) tty->print_cr(" scopes pcs = %d", scopes_pcs_size); if (dependencies_size != 0) tty->print_cr(" dependencies = %d", dependencies_size); if (handler_table_size != 0) tty->print_cr(" handler table = %d", handler_table_size); if (nul_chk_table_size != 0) tty->print_cr(" nul chk table = %d", nul_chk_table_size); } + }; + struct native_nmethod_stats_struct { int native_nmethod_count; int native_total_size; int native_relocation_size; int native_insts_size; int native_oops_size; + int native_metadata_size; void note_native_nmethod(nmethod* nm) { native_nmethod_count += 1; native_total_size += nm->size(); native_relocation_size += nm->relocation_size(); native_insts_size += nm->insts_size(); native_oops_size += nm->oops_size(); + native_metadata_size += nm->metadata_size(); } void print_native_nmethod_stats() { if (native_nmethod_count == 0) return; tty->print_cr("Statistics for %d native nmethods:", native_nmethod_count); if (native_total_size != 0) tty->print_cr(" N. total size = %d", native_total_size); if (native_relocation_size != 0) tty->print_cr(" N. relocation = %d", native_relocation_size); if (native_insts_size != 0) tty->print_cr(" N. main code = %d", native_insts_size); if (native_oops_size != 0) tty->print_cr(" N. oops = %d", native_oops_size); + if (native_metadata_size != 0) tty->print_cr(" N. metadata = %d", native_metadata_size); } + }; + struct pc_nmethod_stats_struct { int pc_desc_resets; // number of resets (= number of caches) int pc_desc_queries; // queries to nmethod::find_pc_desc int pc_desc_approx; // number of those which have approximate true int pc_desc_repeats; // number of _pc_descs[0] hits int pc_desc_hits; // number of LRU cache hits
*** 192,204 **** pc_desc_resets, pc_desc_queries, pc_desc_approx, pc_desc_repeats, pc_desc_hits, pc_desc_tests, pc_desc_searches, pc_desc_adds); } ! } nmethod_stats; ! #endif //PRODUCT //--------------------------------------------------------------------------------- ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) { --- 226,280 ---- pc_desc_resets, pc_desc_queries, pc_desc_approx, pc_desc_repeats, pc_desc_hits, pc_desc_tests, pc_desc_searches, pc_desc_adds); } ! }; + #ifdef COMPILER1 + static java_nmethod_stats_struct c1_java_nmethod_stats; + #endif + #ifdef COMPILER2 + static java_nmethod_stats_struct c2_java_nmethod_stats; + #endif + #if INCLUDE_JVMCI + static java_nmethod_stats_struct jvmci_java_nmethod_stats; + #endif + #ifdef SHARK + static java_nmethod_stats_struct shark_java_nmethod_stats; + #endif + static java_nmethod_stats_struct unknown_java_nmethod_stats; + + static native_nmethod_stats_struct native_nmethod_stats; + static pc_nmethod_stats_struct pc_nmethod_stats; + + static void note_java_nmethod(nmethod* nm) { + #ifdef COMPILER1 + if (nm->is_compiled_by_c1()) { + c1_java_nmethod_stats.note_nmethod(nm); + } else + #endif + #ifdef COMPILER2 + if (nm->is_compiled_by_c2()) { + c2_java_nmethod_stats.note_nmethod(nm); + } else + #endif + #if INCLUDE_JVMCI + if (nm->is_compiled_by_jvmci()) { + jvmci_java_nmethod_stats.note_nmethod(nm); + } else + #endif + #ifdef SHARK + if (nm->is_compiled_by_shark()) { + shark_java_nmethod_stats.note_nmethod(nm); + } else + #endif + { + unknown_java_nmethod_stats.note_nmethod(nm); + } + } + #endif // !PRODUCT //--------------------------------------------------------------------------------- ExceptionCache::ExceptionCache(Handle exception, address pc, address handler) {
*** 274,284 **** //----------------------------------------------------------------------------- // Helper used by both find_pc_desc methods. static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { ! NOT_PRODUCT(++nmethod_stats.pc_desc_tests); if (!approximate) return pc->pc_offset() == pc_offset; else return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); } --- 350,360 ---- //----------------------------------------------------------------------------- // Helper used by both find_pc_desc methods. static inline bool match_desc(PcDesc* pc, int pc_offset, bool approximate) { ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_tests); if (!approximate) return pc->pc_offset() == pc_offset; else return (pc-1)->pc_offset() < pc_offset && pc_offset <= pc->pc_offset(); }
*** 286,305 **** void PcDescCache::reset_to(PcDesc* initial_pc_desc) { if (initial_pc_desc == NULL) { _pc_descs[0] = NULL; // native method; no PcDescs at all return; } ! NOT_PRODUCT(++nmethod_stats.pc_desc_resets); // reset the cache by filling it with benign (non-null) values assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); for (int i = 0; i < cache_size; i++) _pc_descs[i] = initial_pc_desc; } PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { ! NOT_PRODUCT(++nmethod_stats.pc_desc_queries); ! NOT_PRODUCT(if (approximate) ++nmethod_stats.pc_desc_approx); // Note: one might think that caching the most recently // read value separately would be a win, but one would be // wrong. When many threads are updating it, the cache // line it's in would bounce between caches, negating --- 362,381 ---- void PcDescCache::reset_to(PcDesc* initial_pc_desc) { if (initial_pc_desc == NULL) { _pc_descs[0] = NULL; // native method; no PcDescs at all return; } ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_resets); // reset the cache by filling it with benign (non-null) values assert(initial_pc_desc->pc_offset() < 0, "must be sentinel"); for (int i = 0; i < cache_size; i++) _pc_descs[i] = initial_pc_desc; } PcDesc* PcDescCache::find_pc_desc(int pc_offset, bool approximate) { ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_queries); ! NOT_PRODUCT(if (approximate) ++pc_nmethod_stats.pc_desc_approx); // Note: one might think that caching the most recently // read value separately would be a win, but one would be // wrong. When many threads are updating it, the cache // line it's in would bounce between caches, negating
*** 311,340 **** // Step one: Check the most recently added value. res = _pc_descs[0]; if (res == NULL) return NULL; // native method; no PcDescs at all if (match_desc(res, pc_offset, approximate)) { ! NOT_PRODUCT(++nmethod_stats.pc_desc_repeats); return res; } // Step two: Check the rest of the LRU cache. for (int i = 1; i < cache_size; ++i) { res = _pc_descs[i]; if (res->pc_offset() < 0) break; // optimization: skip empty cache if (match_desc(res, pc_offset, approximate)) { ! NOT_PRODUCT(++nmethod_stats.pc_desc_hits); return res; } } // Report failure. return NULL; } void PcDescCache::add_pc_desc(PcDesc* pc_desc) { ! NOT_PRODUCT(++nmethod_stats.pc_desc_adds); // Update the LRU cache by shifting pc_desc forward. for (int i = 0; i < cache_size; i++) { PcDesc* next = _pc_descs[i]; _pc_descs[i] = pc_desc; pc_desc = next; --- 387,416 ---- // Step one: Check the most recently added value. res = _pc_descs[0]; if (res == NULL) return NULL; // native method; no PcDescs at all if (match_desc(res, pc_offset, approximate)) { ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_repeats); return res; } // Step two: Check the rest of the LRU cache. for (int i = 1; i < cache_size; ++i) { res = _pc_descs[i]; if (res->pc_offset() < 0) break; // optimization: skip empty cache if (match_desc(res, pc_offset, approximate)) { ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_hits); return res; } } // Report failure. return NULL; } void PcDescCache::add_pc_desc(PcDesc* pc_desc) { ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_adds); // Update the LRU cache by shifting pc_desc forward. for (int i = 0; i < cache_size; i++) { PcDesc* next = _pc_descs[i]; _pc_descs[i] = pc_desc; pc_desc = next;
*** 476,485 **** --- 552,565 ---- _scavenge_root_state = 0; _compiler = NULL; #if INCLUDE_RTM_OPT _rtm_state = NoRTM; #endif + #if INCLUDE_JVMCI + _jvmci_installed_code = NULL; + _speculation_log = NULL; + #endif } nmethod* nmethod::new_native_nmethod(methodHandle method, int compile_id, CodeBuffer *code_buffer,
*** 501,511 **** nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size, compile_id, &offsets, code_buffer, frame_size, basic_lock_owner_sp_offset, basic_lock_sp_offset, oop_maps); ! NOT_PRODUCT(if (nm != NULL) nmethod_stats.note_native_nmethod(nm)); if ((PrintAssembly || CompilerOracle::should_print(method)) && nm != NULL) { Disassembler::decode(nm); } } // verify nmethod --- 581,591 ---- nm = new (native_nmethod_size, CompLevel_none) nmethod(method(), native_nmethod_size, compile_id, &offsets, code_buffer, frame_size, basic_lock_owner_sp_offset, basic_lock_sp_offset, oop_maps); ! NOT_PRODUCT(if (nm != NULL) native_nmethod_stats.note_native_nmethod(nm)); if ((PrintAssembly || CompilerOracle::should_print(method)) && nm != NULL) { Disassembler::decode(nm); } } // verify nmethod
*** 529,538 **** --- 609,622 ---- OopMapSet* oop_maps, ExceptionHandlerTable* handler_table, ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level + #if INCLUDE_JVMCI + , Handle installed_code, + Handle speculationLog + #endif ) { assert(debug_info->oop_recorder() == code_buffer->oop_recorder(), "shared OR"); code_buffer->finalize_oop_references(method); // create nmethod
*** 551,561 **** orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, oop_maps, handler_table, nul_chk_table, compiler, ! comp_level); if (nm != NULL) { // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. // This allows the dependency checking code to simply walk the --- 635,650 ---- orig_pc_offset, debug_info, dependencies, code_buffer, frame_size, oop_maps, handler_table, nul_chk_table, compiler, ! comp_level ! #if INCLUDE_JVMCI ! , installed_code, ! speculationLog ! #endif ! ); if (nm != NULL) { // To make dependency checking during class loading fast, record // the nmethod dependencies in the classes it is dependent on. // This allows the dependency checking code to simply walk the
*** 576,586 **** } // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } ! NOT_PRODUCT(nmethod_stats.note_nmethod(nm)); if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) { Disassembler::decode(nm); } } } --- 665,675 ---- } // record this nmethod as dependent on this klass InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } ! NOT_PRODUCT(if (nm != NULL) note_java_nmethod(nm)); if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) { Disassembler::decode(nm); } } }
*** 591,601 **** nm->log_new_nmethod(); } return nm; } ! // For native wrappers nmethod::nmethod( Method* method, int nmethod_size, int compile_id, --- 680,693 ---- nm->log_new_nmethod(); } return nm; } ! #ifdef _MSC_VER ! #pragma warning(push) ! #pragma warning(disable:4355) // warning C4355: 'this' : used in base member initializer list ! #endif // For native wrappers nmethod::nmethod( Method* method, int nmethod_size, int compile_id,
*** 681,690 **** --- 773,786 ---- xtty->tail("print_native_nmethod"); } } } + #ifdef _MSC_VER + #pragma warning(pop) + #endif + void* nmethod::operator new(size_t size, int nmethod_size, int comp_level) throw () { return CodeCache::allocate(nmethod_size, CodeCache::get_code_blob_type(comp_level)); } nmethod::nmethod(
*** 701,710 **** --- 797,810 ---- OopMapSet* oop_maps, ExceptionHandlerTable* handler_table, ImplicitExceptionTable* nul_chk_table, AbstractCompiler* compiler, int comp_level + #if INCLUDE_JVMCI + , Handle installed_code, + Handle speculation_log + #endif ) : CodeBlob("nmethod", code_buffer, sizeof(nmethod), nmethod_size, offsets->value(CodeOffsets::Frame_Complete), frame_size, oop_maps), _native_receiver_sp_offset(in_ByteSize(-1)), _native_basic_lock_sp_offset(in_ByteSize(-1))
*** 725,743 **** --- 825,870 ---- // Section offsets _consts_offset = content_offset() + code_buffer->total_offset_of(code_buffer->consts()); _stub_offset = content_offset() + code_buffer->total_offset_of(code_buffer->stubs()); + #if INCLUDE_JVMCI + _jvmci_installed_code = installed_code(); + _speculation_log = (instanceOop)speculation_log(); + + if (compiler->is_jvmci()) { + // JVMCI might not produce any stub sections + if (offsets->value(CodeOffsets::Exceptions) != -1) { + _exception_offset = code_offset() + offsets->value(CodeOffsets::Exceptions); + } else { + _exception_offset = -1; + } + if (offsets->value(CodeOffsets::Deopt) != -1) { + _deoptimize_offset = code_offset() + offsets->value(CodeOffsets::Deopt); + } else { + _deoptimize_offset = -1; + } + if (offsets->value(CodeOffsets::DeoptMH) != -1) { + _deoptimize_mh_offset = code_offset() + offsets->value(CodeOffsets::DeoptMH); + } else { + _deoptimize_mh_offset = -1; + } + } else { + #endif // Exception handler and deopt handler are in the stub section assert(offsets->value(CodeOffsets::Exceptions) != -1, "must be set"); assert(offsets->value(CodeOffsets::Deopt ) != -1, "must be set"); + _exception_offset = _stub_offset + offsets->value(CodeOffsets::Exceptions); _deoptimize_offset = _stub_offset + offsets->value(CodeOffsets::Deopt); if (offsets->value(CodeOffsets::DeoptMH) != -1) { _deoptimize_mh_offset = _stub_offset + offsets->value(CodeOffsets::DeoptMH); } else { _deoptimize_mh_offset = -1; + #if INCLUDE_JVMCI + } + #endif } if (offsets->value(CodeOffsets::UnwindHandler) != -1) { _unwind_handler_offset = code_offset() + offsets->value(CodeOffsets::UnwindHandler); } else { _unwind_handler_offset = -1;
*** 777,800 **** handler_table->copy_to(this); nul_chk_table->copy_to(this); // we use the information of entry points to find out if a method is // static or non static ! assert(compiler->is_c2() || _method->is_static() == (entry_point() == _verified_entry_point), " entry points must be same for static methods and vice versa"); } ! bool printnmethods = PrintNMethods || CompilerOracle::should_print(_method) || CompilerOracle::has_option_string(_method, "PrintNMethods"); if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { print_nmethod(printnmethods); } } - // Print a short set of xml attributes to identify this nmethod. The // output should be embedded in some other element. void nmethod::log_identity(xmlStream* log) const { log->print(" compile_id='%d'", compile_id()); const char* nm_kind = compile_kind(); --- 904,926 ---- handler_table->copy_to(this); nul_chk_table->copy_to(this); // we use the information of entry points to find out if a method is // static or non static ! assert(compiler->is_c2() || compiler->is_jvmci() || _method->is_static() == (entry_point() == _verified_entry_point), " entry points must be same for static methods and vice versa"); } ! bool printnmethods = PrintNMethods || PrintNMethodsAtLevel == _comp_level || CompilerOracle::should_print(_method) || CompilerOracle::has_option_string(_method, "PrintNMethods"); if (printnmethods || PrintDebugInfo || PrintRelocations || PrintDependencies || PrintExceptionHandlers) { print_nmethod(printnmethods); } } // Print a short set of xml attributes to identify this nmethod. The // output should be embedded in some other element. void nmethod::log_identity(xmlStream* log) const { log->print(" compile_id='%d'", compile_id()); const char* nm_kind = compile_kind();
*** 831,840 **** --- 957,967 ---- LOG_OFFSET(xtty, scopes_pcs); LOG_OFFSET(xtty, dependencies); LOG_OFFSET(xtty, handler_table); LOG_OFFSET(xtty, nul_chk_table); LOG_OFFSET(xtty, oops); + LOG_OFFSET(xtty, metadata); xtty->method(method()); xtty->stamp(); xtty->end_elem(); }
*** 872,888 **** print_pcs(); if (oop_maps()) { oop_maps()->print(); } } ! if (PrintDebugInfo) { print_scopes(); } ! if (PrintRelocations) { print_relocations(); } ! if (PrintDependencies) { print_dependencies(); } if (PrintExceptionHandlers) { print_handler_table(); print_nul_chk_table(); --- 999,1015 ---- print_pcs(); if (oop_maps()) { oop_maps()->print(); } } ! if (PrintDebugInfo || CompilerOracle::has_option_string(_method, "PrintDebugInfo")) { print_scopes(); } ! if (PrintRelocations || CompilerOracle::has_option_string(_method, "PrintRelocations")) { print_relocations(); } ! if (PrintDependencies || CompilerOracle::has_option_string(_method, "PrintDependencies")) { print_dependencies(); } if (PrintExceptionHandlers) { print_handler_table(); print_nul_chk_table();
*** 988,998 **** ScopeDesc* nmethod::scope_desc_at(address pc) { PcDesc* pd = pc_desc_at(pc); guarantee(pd != NULL, "scope must be present"); return new ScopeDesc(this, pd->scope_decode_offset(), ! pd->obj_decode_offset(), pd->should_reexecute(), pd->return_oop()); } void nmethod::clear_inline_caches() { --- 1115,1125 ---- ScopeDesc* nmethod::scope_desc_at(address pc) { PcDesc* pd = pc_desc_at(pc); guarantee(pd != NULL, "scope must be present"); return new ScopeDesc(this, pd->scope_decode_offset(), ! pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), pd->return_oop()); } void nmethod::clear_inline_caches() {
*** 1159,1169 **** return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_locked_by_vm(); } void nmethod::inc_decompile_count() { ! if (!is_compiled_by_c2()) return; // Could be gated by ProfileTraps, but do not bother... Method* m = method(); if (m == NULL) return; MethodData* mdo = m->method_data(); if (mdo == NULL) return; --- 1286,1296 ---- return stack_traversal_mark()+1 < NMethodSweeper::traversal_count() && !is_locked_by_vm(); } void nmethod::inc_decompile_count() { ! if (!is_compiled_by_c2() && !is_compiled_by_jvmci()) return; // Could be gated by ProfileTraps, but do not bother... Method* m = method(); if (m == NULL) return; MethodData* mdo = m->method_data(); if (mdo == NULL) return;
*** 1223,1232 **** --- 1350,1360 ---- if (_method->code() == this) { _method->clear_code(); // Break a cycle } _method = NULL; // Clear the method of this dead nmethod } + // Make the class unloaded - i.e., change state and notify sweeper assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint"); if (is_in_use()) { // Transitioning directly from live to unloaded -- so // we need to force a cache clean-up; remember this
*** 1235,1244 **** --- 1363,1384 ---- } // Unregister must be done before the state change Universe::heap()->unregister_nmethod(this); + #if INCLUDE_JVMCI + // The method can only be unloaded after the pointer to the installed code + // Java wrapper is no longer alive. Here we need to clear out this weak + // reference to the dead object. Nulling out the reference has to happen + // after the method is unregistered since the original value may be still + // tracked by the rset. + if (_jvmci_installed_code != NULL) { + InstalledCode::set_address(_jvmci_installed_code, 0); + _jvmci_installed_code = NULL; + } + #endif + _state = unloaded; // Log the unloading. log_state_change();
*** 1398,1410 **** // nmethod is in zombie state set_method(NULL); } else { assert(state == not_entrant, "other cases may need to be handled differently"); } if (TraceCreateZombies) { ! tty->print_cr("nmethod <" INTPTR_FORMAT "> code made %s", this, (state == not_entrant) ? "not entrant" : "zombie"); } NMethodSweeper::report_state_change(this); return true; } --- 1538,1557 ---- // nmethod is in zombie state set_method(NULL); } else { assert(state == not_entrant, "other cases may need to be handled differently"); } + #if INCLUDE_JVMCI + if (_jvmci_installed_code != NULL) { + // Break the link between nmethod and InstalledCode such that the nmethod can subsequently be flushed safely. + InstalledCode::set_address(_jvmci_installed_code, 0); + } + #endif if (TraceCreateZombies) { ! ResourceMark m; ! tty->print_cr("nmethod <" INTPTR_FORMAT "> %s code made %s", this, this->method() ? this->method()->name_and_sig_as_C_string() : "null", (state == not_entrant) ? "not entrant" : "zombie"); } NMethodSweeper::report_state_change(this); return true; }
*** 1688,1697 **** --- 1835,1871 ---- if (can_unload(is_alive, p, unloading_occurred)) { return; } } + #if INCLUDE_JVMCI + // Follow JVMCI method + BarrierSet* bs = Universe::heap()->barrier_set(); + if (_jvmci_installed_code != NULL) { + if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { + if (!is_alive->do_object_b(_jvmci_installed_code)) { + bs->write_ref_nmethod_pre(&_jvmci_installed_code, this); + _jvmci_installed_code = NULL; + bs->write_ref_nmethod_post(&_jvmci_installed_code, this); + } + } else { + if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { + return; + } + } + } + + if (_speculation_log != NULL) { + if (!is_alive->do_object_b(_speculation_log)) { + bs->write_ref_nmethod_pre(&_speculation_log, this); + _speculation_log = NULL; + bs->write_ref_nmethod_post(&_speculation_log, this); + } + } + #endif + + // Ensure that all metadata is still alive verify_metadata_loaders(low_boundary, is_alive); } template <class CompiledICorStaticCall>
*** 1770,1779 **** --- 1944,1974 ---- // call to post_compiled_method_unload() so that the unloading // of this nmethod is reported. unloading_occurred = true; } + #if INCLUDE_JVMCI + // Follow JVMCI method + if (_jvmci_installed_code != NULL) { + if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { + if (!is_alive->do_object_b(_jvmci_installed_code)) { + _jvmci_installed_code = NULL; + } + } else { + if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { + return false; + } + } + } + + if (_speculation_log != NULL) { + if (!is_alive->do_object_b(_speculation_log)) { + _speculation_log = NULL; + } + } + #endif + // Exception cache clean_exception_cache(is_alive); bool is_unloaded = false; bool postponed = false;
*** 1827,1836 **** --- 2022,2057 ---- if (is_unloaded) { return postponed; } + #if INCLUDE_JVMCI + // Follow JVMCI method + BarrierSet* bs = Universe::heap()->barrier_set(); + if (_jvmci_installed_code != NULL) { + if (_jvmci_installed_code->is_a(HotSpotNmethod::klass()) && HotSpotNmethod::isDefault(_jvmci_installed_code)) { + if (!is_alive->do_object_b(_jvmci_installed_code)) { + bs->write_ref_nmethod_pre(&_jvmci_installed_code, this); + _jvmci_installed_code = NULL; + bs->write_ref_nmethod_post(&_jvmci_installed_code, this); + } + } else { + if (can_unload(is_alive, (oop*)&_jvmci_installed_code, unloading_occurred)) { + is_unloaded = true; + } + } + } + + if (_speculation_log != NULL) { + if (!is_alive->do_object_b(_speculation_log)) { + bs->write_ref_nmethod_pre(&_speculation_log, this); + _speculation_log = NULL; + bs->write_ref_nmethod_post(&_speculation_log, this); + } + } + #endif + // Ensure that all metadata is still alive verify_metadata_loaders(low_boundary, is_alive); return postponed; }
*** 2011,2020 **** --- 2232,2250 ---- low_boundary += NativeJump::instruction_size; // %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump. // (See comment above.) } + #if INCLUDE_JVMCI + if (_jvmci_installed_code != NULL) { + f->do_oop((oop*) &_jvmci_installed_code); + } + if (_speculation_log != NULL) { + f->do_oop((oop*) &_speculation_log); + } + #endif + RelocIterator iter(this, low_boundary); while (iter.next()) { if (iter.type() == relocInfo::oop_type ) { oop_Relocation* r = iter.oop_reloc();
*** 2135,2145 **** // Method that knows how to preserve outgoing arguments at call. This method must be // called with a frame corresponding to a Java invoke void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { #ifndef SHARK ! if (!method()->is_native()) { SimpleScopeDesc ssd(this, fr.pc()); Bytecode_invoke call(ssd.method(), ssd.bci()); bool has_receiver = call.has_receiver(); bool has_appendix = call.has_appendix(); Symbol* signature = call.signature(); --- 2365,2375 ---- // Method that knows how to preserve outgoing arguments at call. This method must be // called with a frame corresponding to a Java invoke void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { #ifndef SHARK ! if (method() != NULL && !method()->is_native()) { SimpleScopeDesc ssd(this, fr.pc()); Bytecode_invoke call(ssd.method(), ssd.bci()); bool has_receiver = call.has_receiver(); bool has_appendix = call.has_appendix(); Symbol* signature = call.signature();
*** 2201,2219 **** void nmethod::copy_scopes_data(u_char* buffer, int size) { assert(scopes_data_size() >= size, "oob"); memcpy(scopes_data_begin(), buffer, size); } #ifdef ASSERT static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) { PcDesc* lower = nm->scopes_pcs_begin(); PcDesc* upper = nm->scopes_pcs_end(); lower += 1; // exclude initial sentinel PcDesc* res = NULL; for (PcDesc* p = lower; p < upper; p++) { ! NOT_PRODUCT(--nmethod_stats.pc_desc_tests); // don't count this call to match_desc if (match_desc(p, pc_offset, approximate)) { if (res == NULL) res = p; else res = (PcDesc*) badAddress; --- 2431,2457 ---- void nmethod::copy_scopes_data(u_char* buffer, int size) { assert(scopes_data_size() >= size, "oob"); memcpy(scopes_data_begin(), buffer, size); } + // When using JVMCI the address might be off by the size of a call instruction. + bool nmethod::is_deopt_entry(address pc) { + return pc == deopt_handler_begin() + #if INCLUDE_JVMCI + || pc == (deopt_handler_begin() + NativeCall::instruction_size) + #endif + ; + } #ifdef ASSERT static PcDesc* linear_search(nmethod* nm, int pc_offset, bool approximate) { PcDesc* lower = nm->scopes_pcs_begin(); PcDesc* upper = nm->scopes_pcs_end(); lower += 1; // exclude initial sentinel PcDesc* res = NULL; for (PcDesc* p = lower; p < upper; p++) { ! NOT_PRODUCT(--pc_nmethod_stats.pc_desc_tests); // don't count this call to match_desc if (match_desc(p, pc_offset, approximate)) { if (res == NULL) res = p; else res = (PcDesc*) badAddress;
*** 2256,2266 **** assert(upper->pc_offset() >= pc_offset, "sanity") assert_LU_OK; // Use the last successful return as a split point. PcDesc* mid = _pc_desc_cache.last_pc_desc(); ! NOT_PRODUCT(++nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; } --- 2494,2504 ---- assert(upper->pc_offset() >= pc_offset, "sanity") assert_LU_OK; // Use the last successful return as a split point. PcDesc* mid = _pc_desc_cache.last_pc_desc(); ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; }
*** 2269,2279 **** const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); const int RADIX = (1 << LOG2_RADIX); for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { while ((mid = lower + step) < upper) { assert_LU_OK; ! NOT_PRODUCT(++nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; break; --- 2507,2517 ---- const int LOG2_RADIX = 4 /*smaller steps in debug mode:*/ debug_only(-1); const int RADIX = (1 << LOG2_RADIX); for (int step = (1 << (LOG2_RADIX*3)); step > 1; step >>= LOG2_RADIX) { while ((mid = lower + step) < upper) { assert_LU_OK; ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; break;
*** 2284,2294 **** // Sneak up on the value with a linear search of length ~16. while (true) { assert_LU_OK; mid = lower + 1; ! NOT_PRODUCT(++nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; break; --- 2522,2532 ---- // Sneak up on the value with a linear search of length ~16. while (true) { assert_LU_OK; mid = lower + 1; ! NOT_PRODUCT(++pc_nmethod_stats.pc_desc_searches); if (mid->pc_offset() < pc_offset) { lower = mid; } else { upper = mid; break;
*** 2471,2481 **** if (nm == NULL) return; Atomic::dec(&nm->_lock_count); assert(nm->_lock_count >= 0, "unmatched nmethod lock/unlock"); } - // ----------------------------------------------------------------------------- // nmethod::get_deopt_original_pc // // Return the original PC for the given PC if: // (a) the given PC belongs to a nmethod and --- 2709,2718 ----
*** 2585,2595 **** } PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), ! pd->obj_decode_offset(), pd->should_reexecute(), pd->return_oop()); !sd->is_top(); sd = sd->sender()) { sd->verify(); } } --- 2822,2832 ---- } PcDesc* pd = pc_desc_at(nativeCall_at(call_site)->return_address()); assert(pd != NULL, "PcDesc must exist"); for (ScopeDesc* sd = new ScopeDesc(this, pd->scope_decode_offset(), ! pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(), pd->return_oop()); !sd->is_top(); sd = sd->sender()) { sd->verify(); } }
*** 2678,2687 **** --- 2915,2926 ---- tty->print("(c1) "); } else if (is_compiled_by_c2()) { tty->print("(c2) "); } else if (is_compiled_by_shark()) { tty->print("(shark) "); + } else if (is_compiled_by_jvmci()) { + tty->print("(JVMCI) "); } else { tty->print("(nm) "); } print_on(tty, NULL);
*** 2762,2772 **** --- 3001,3014 ---- for (PcDesc* p = scopes_pcs_begin(); p < scopes_pcs_end(); p++) { if (p->scope_decode_offset() == DebugInformationRecorder::serialized_null) continue; ScopeDesc* sd = scope_desc_at(p->real_pc(this)); + while (sd != NULL) { sd->print_on(tty, p); + sd = sd->sender(); + } } } void nmethod::print_dependencies() { ResourceMark rm;
*** 2879,2900 **** // Return a the last scope in (begin..end] ScopeDesc* nmethod::scope_desc_in(address begin, address end) { PcDesc* p = pc_desc_near(begin+1); if (p != NULL && p->real_pc(this) <= end) { return new ScopeDesc(this, p->scope_decode_offset(), ! p->obj_decode_offset(), p->should_reexecute(), p->return_oop()); } return NULL; } void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const { if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); ! if (block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); ! if (block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); if (has_method_handle_invokes()) if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); if (block_begin == consts_begin()) stream->print_cr("[Constants]"); --- 3121,3142 ---- // Return a the last scope in (begin..end] ScopeDesc* nmethod::scope_desc_in(address begin, address end) { PcDesc* p = pc_desc_near(begin+1); if (p != NULL && p->real_pc(this) <= end) { return new ScopeDesc(this, p->scope_decode_offset(), ! p->obj_decode_offset(), p->should_reexecute(), p->rethrow_exception(), p->return_oop()); } return NULL; } void nmethod::print_nmethod_labels(outputStream* stream, address block_begin) const { if (block_begin == entry_point()) stream->print_cr("[Entry Point]"); if (block_begin == verified_entry_point()) stream->print_cr("[Verified Entry Point]"); ! if (JVMCI_ONLY(_exception_offset >= 0 &&) block_begin == exception_begin()) stream->print_cr("[Exception Handler]"); if (block_begin == stub_begin()) stream->print_cr("[Stub Code]"); ! if (JVMCI_ONLY(_deoptimize_offset >= 0 &&) block_begin == deopt_handler_begin()) stream->print_cr("[Deopt Handler Code]"); if (has_method_handle_invokes()) if (block_begin == deopt_mh_handler_begin()) stream->print_cr("[Deopt MH Handler Code]"); if (block_begin == consts_begin()) stream->print_cr("[Constants]");
*** 3056,3065 **** --- 3298,3308 ---- else st->print("<UNKNOWN>"); } } } + st->print(" {reexecute=%d rethrow=%d return_oop=%d}", sd->should_reexecute(), sd->rethrow_exception(), sd->return_oop()); } // Print all scopes for (;sd != NULL; sd = sd->sender()) { st->move_to(column);
*** 3128,3141 **** } void nmethod::print_statistics() { ttyLocker ttyl; if (xtty != NULL) xtty->head("statistics type='nmethod'"); ! nmethod_stats.print_native_nmethod_stats(); ! nmethod_stats.print_nmethod_stats(); DebugInformationRecorder::print_statistics(); ! nmethod_stats.print_pc_stats(); Dependencies::print_statistics(); if (xtty != NULL) xtty->tail("statistics"); } ! #endif // PRODUCT --- 3371,3421 ---- } void nmethod::print_statistics() { ttyLocker ttyl; if (xtty != NULL) xtty->head("statistics type='nmethod'"); ! native_nmethod_stats.print_native_nmethod_stats(); ! #ifdef COMPILER1 ! c1_java_nmethod_stats.print_nmethod_stats("C1"); ! #endif ! #ifdef COMPILER2 ! c2_java_nmethod_stats.print_nmethod_stats("C2"); ! #endif ! #if INCLUDE_JVMCI ! jvmci_java_nmethod_stats.print_nmethod_stats("JVMCI"); ! #endif ! #ifdef SHARK ! shark_java_nmethod_stats.print_nmethod_stats("Shark"); ! #endif ! unknown_java_nmethod_stats.print_nmethod_stats("Unknown"); DebugInformationRecorder::print_statistics(); ! #ifndef PRODUCT ! pc_nmethod_stats.print_pc_stats(); ! #endif Dependencies::print_statistics(); if (xtty != NULL) xtty->tail("statistics"); } ! #endif // !PRODUCT ! ! #if INCLUDE_JVMCI ! char* nmethod::jvmci_installed_code_name(char* buf, size_t buflen) { ! if (!this->is_compiled_by_jvmci()) { ! return NULL; ! } ! oop installedCode = this->jvmci_installed_code(); ! if (installedCode != NULL) { ! oop installedCodeName = NULL; ! if (installedCode->is_a(InstalledCode::klass())) { ! installedCodeName = InstalledCode::name(installedCode); ! } ! if (installedCodeName != NULL) { ! return java_lang_String::as_utf8_string(installedCodeName, buf, (int)buflen); ! } else { ! jio_snprintf(buf, buflen, "null"); ! return buf; ! } ! } ! jio_snprintf(buf, buflen, "noInstalledCode"); ! return buf; ! } ! #endif
src/share/vm/code/nmethod.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File