--- old/make/bsd/makefiles/gcc.make 2014-05-06 15:12:56.000000000 -0700 +++ new/make/bsd/makefiles/gcc.make 2014-05-06 15:12:56.000000000 -0700 @@ -253,7 +253,7 @@ ifeq ($(USE_CLANG), true) # However we need to clean the code up before we can unrestrictedly enable this option with Clang WARNINGS_ARE_ERRORS += -Wno-logical-op-parentheses -Wno-parentheses-equality -Wno-parentheses - WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare + WARNINGS_ARE_ERRORS += -Wno-switch -Wno-tautological-compare -Wno-string-plus-int # Not yet supported by clang in Xcode 4.6.2 # WARNINGS_ARE_ERRORS += -Wno-tautological-constant-out-of-range-compare WARNINGS_ARE_ERRORS += -Wno-delete-non-virtual-dtor -Wno-deprecated -Wno-format -Wno-dynamic-class-memaccess @@ -262,6 +262,10 @@ WARNING_FLAGS = -Wpointer-arith -Wsign-compare -Wundef -Wunused-function -Wformat=2 +ifeq ($(USE_CLANG), true) + WARNING_FLAGS += -Wno-format-nonliteral +endif + ifeq ($(USE_CLANG),) # Since GCC 4.3, -Wconversion has changed its meanings to warn these implicit # conversions which might affect the values. Only enable it in earlier versions. @@ -313,10 +317,21 @@ # Work around some compiler bugs. ifeq ($(USE_CLANG), true) + # Clang 4.2 ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 2), 1) OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) OPT_CFLAGS/unsafe.o += -O1 endif + # Clang 5.0 + ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 0), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + OPT_CFLAGS/unsafe.o += -O1 + endif + # Clang 5.1 + ifeq ($(shell expr $(CC_VER_MAJOR) = 5 \& $(CC_VER_MINOR) = 1), 1) + OPT_CFLAGS/loopTransform.o += $(OPT_CFLAGS/NOOPT) + OPT_CFLAGS/unsafe.o += -O1 + endif else # 6835796. Problem in GCC 4.3.0 with mulnode.o optimized compilation. ifeq ($(shell expr $(CC_VER_MAJOR) = 4 \& $(CC_VER_MINOR) = 3), 1) @@ -324,6 +339,15 @@ endif endif +# We want to use libc++ on Clang 5.0 +ifeq ($(USE_CLANG), true) + # Clang 5.x + ifeq ($(shell expr $(CC_VER_MAJOR) = 5), 1) + CFLAGS += -stdlib=libc++ + LFLAGS += -stdlib=libc++ + endif +endif + # Flags for generating make dependency flags. DEPFLAGS = -MMD -MP -MF $(DEP_DIR)/$(@:%=%.d) ifeq ($(USE_CLANG),) @@ -364,11 +388,6 @@ # statically link libstdc++.so, work with gcc but ignored by g++ STATIC_STDCXX = -Wl,-Bstatic -lstdc++ -Wl,-Bdynamic -# Ensure use libstdc++ on clang, not libc++ -ifeq ($(USE_CLANG), true) - LFLAGS += -stdlib=libstdc++ -endif - ifeq ($(USE_CLANG),) # statically link libgcc and/or libgcc_s, libgcc does not exist before gcc-3.x. ifneq ("${CC_VER_MAJOR}", "2") --- old/make/bsd/makefiles/saproc.make 2014-05-06 15:12:56.000000000 -0700 +++ new/make/bsd/makefiles/saproc.make 2014-05-06 15:12:56.000000000 -0700 @@ -64,7 +64,8 @@ else ifeq ($(OS_VENDOR), Darwin) SASRCFILES = $(DARWIN_NON_STUB_SASRCFILES) - SALIBS = -g -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation + #SALIBS = -g -framework Foundation -F/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation + SALIBS = -g -framework Foundation -F/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.9.sdk/System/Library/Frameworks/JavaVM.framework/Frameworks -framework JavaNativeFoundation -framework Security -framework CoreFoundation #objc compiler blows up on -march=i586, perhaps it should not be included in the macosx intel 32-bit C++ compiles? SAARCH = $(subst -march=i586,,$(ARCHFLAG)) else --- old/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp 2014-05-06 15:12:57.000000000 -0700 +++ new/src/cpu/sparc/vm/c1_CodeStubs_sparc.cpp 2014-05-06 15:12:57.000000000 -0700 @@ -414,6 +414,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); + __ set(_trap_request, G4); __ call(Runtime1::entry_for(Runtime1::deoptimize_id), relocInfo::runtime_call_type); __ delayed()->nop(); ce->add_call_info_here(_info); --- old/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2014-05-06 15:12:57.000000000 -0700 +++ new/src/cpu/sparc/vm/c1_Runtime1_sparc.cpp 2014-05-06 15:12:57.000000000 -0700 @@ -781,7 +781,7 @@ { __ set_info("deoptimize", dont_gc_arguments); OopMap* oop_map = save_live_registers(sasm); - int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); + int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), G4); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); --- old/src/cpu/x86/vm/c1_CodeStubs_x86.cpp 2014-05-06 15:12:57.000000000 -0700 +++ new/src/cpu/x86/vm/c1_CodeStubs_x86.cpp 2014-05-06 15:12:57.000000000 -0700 @@ -430,6 +430,7 @@ void DeoptimizeStub::emit_code(LIR_Assembler* ce) { __ bind(_entry); + ce->store_parameter(_trap_request, 0); __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::deoptimize_id))); ce->add_call_info_here(_info); DEBUG_ONLY(__ should_not_reach_here()); --- old/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2014-05-06 15:12:58.000000000 -0700 +++ new/src/cpu/x86/vm/c1_Runtime1_x86.cpp 2014-05-06 15:12:58.000000000 -0700 @@ -1468,9 +1468,10 @@ case deoptimize_id: { StubFrame f(sasm, "deoptimize", dont_gc_arguments); - const int num_rt_args = 1; // thread + const int num_rt_args = 2; // thread, trap_request OopMap* oop_map = save_live_registers(sasm, num_rt_args); - int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize)); + f.load_argument(0, rax); + int call_offset = __ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, deoptimize), rax); oop_maps = new OopMapSet(); oop_maps->add_gc_map(call_offset, oop_map); restore_live_registers(sasm); --- old/src/share/vm/c1/c1_CodeStubs.hpp 2014-05-06 15:12:58.000000000 -0700 +++ new/src/share/vm/c1/c1_CodeStubs.hpp 2014-05-06 15:12:58.000000000 -0700 @@ -450,9 +450,11 @@ class DeoptimizeStub : public CodeStub { private: CodeEmitInfo* _info; + jint _trap_request; public: - DeoptimizeStub(CodeEmitInfo* info) : _info(new CodeEmitInfo(info)) {} + DeoptimizeStub(CodeEmitInfo* info, Deoptimization::DeoptReason reason, Deoptimization::DeoptAction action) : + _info(new CodeEmitInfo(info)), _trap_request(Deoptimization::make_trap_request(reason, action)) {} virtual void emit_code(LIR_Assembler* e); virtual CodeEmitInfo* info() const { return _info; } --- old/src/share/vm/c1/c1_Compilation.cpp 2014-05-06 15:12:58.000000000 -0700 +++ new/src/share/vm/c1/c1_Compilation.cpp 2014-05-06 15:12:58.000000000 -0700 @@ -556,6 +556,7 @@ _env->set_compiler_data(this); _exception_info_list = new ExceptionInfoList(); _implicit_exception_table.set_size(0); + _method->ensure_method_counters(); compile_method(); if (bailed_out()) { _env->record_method_not_compilable(bailout_msg(), !TieredCompilation); --- old/src/share/vm/c1/c1_Compilation.hpp 2014-05-06 15:12:59.000000000 -0700 +++ new/src/share/vm/c1/c1_Compilation.hpp 2014-05-06 15:12:59.000000000 -0700 @@ -251,6 +251,10 @@ return env()->comp_level() == CompLevel_full_profile && C1UpdateMethodData && MethodData::profile_return(); } + bool age_code() const { + return MethodCounters::should_nmethod_age(_method->nmethod_age()); + } + // will compilation make optimistic assumptions that might lead to // deoptimization and that the runtime will account for? bool is_optimistic() const { --- old/src/share/vm/c1/c1_LIRGenerator.cpp 2014-05-06 15:12:59.000000000 -0700 +++ new/src/share/vm/c1/c1_LIRGenerator.cpp 2014-05-06 15:12:59.000000000 -0700 @@ -2782,11 +2782,14 @@ __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); } } - + if (compilation()->age_code()) { + CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); + decrement_code_age(info); + } // increment invocation counters if needed if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. profile_parameters(x); - CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); + CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); increment_invocation_counter(info); } @@ -3328,6 +3331,26 @@ increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); } +void LIRGenerator::decrement_code_age(CodeEmitInfo* info) { + ciMethod* method = info->scope()->method(); + MethodCounters* mc_adr = method->ensure_method_counters(); + if (mc_adr != NULL) { + LIR_Opr mc = new_pointer_register(); + __ move(LIR_OprFact::intptrConst(mc_adr), mc); + int offset = in_bytes(MethodCounters::nmethod_age_offset()); + LIR_Address* counter = new LIR_Address(mc, offset, T_INT); + LIR_Opr result = new_register(T_INT); + __ load(counter, result); + __ sub(result, LIR_OprFact::intConst(1), result); + __ store(result, counter); + // DeoptimizeStub will reexecute from the current state in code info. + CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_age, Deoptimization::Action_make_not_entrant); + __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0)); + __ branch(lir_cond_lessEqual, T_INT, deopt); + } +} + + void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, ciMethod *method, int frequency, int bci, bool backedge, bool notify) { --- old/src/share/vm/c1/c1_LIRGenerator.hpp 2014-05-06 15:12:59.000000000 -0700 +++ new/src/share/vm/c1/c1_LIRGenerator.hpp 2014-05-06 15:12:59.000000000 -0700 @@ -372,7 +372,7 @@ increment_event_counter(info, bci, true); } } - + void decrement_code_age(CodeEmitInfo* info); CodeEmitInfo* state_for(Instruction* x, ValueStack* state, bool ignore_xhandler = false); CodeEmitInfo* state_for(Instruction* x); --- old/src/share/vm/c1/c1_Runtime1.cpp 2014-05-06 15:13:00.000000000 -0700 +++ new/src/share/vm/c1/c1_Runtime1.cpp 2014-05-06 15:12:59.000000000 -0700 @@ -685,7 +685,7 @@ JRT_END // Cf. OptoRuntime::deoptimize_caller_frame -JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread)) +JRT_ENTRY(void, Runtime1::deoptimize(JavaThread* thread, jint trap_request)) // Called from within the owner thread, so no need for safepoint RegisterMap reg_map(thread, false); frame stub_frame = thread->last_frame(); @@ -694,10 +694,17 @@ // We are coming from a compiled method; check this is true. assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity"); + Deoptimization::DeoptAction action = Deoptimization::trap_request_action(trap_request); + + if (action == Deoptimization::Action_make_not_entrant) { + nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); + if (nm != NULL) { + nm->make_not_entrant(); + } + } // Deoptimize the caller frame. Deoptimization::deoptimize_frame(thread, caller_frame.id()); - // Return to the now deoptimized frame. JRT_END --- old/src/share/vm/c1/c1_Runtime1.hpp 2014-05-06 15:13:00.000000000 -0700 +++ new/src/share/vm/c1/c1_Runtime1.hpp 2014-05-06 15:13:00.000000000 -0700 @@ -156,7 +156,7 @@ static void monitorenter(JavaThread* thread, oopDesc* obj, BasicObjectLock* lock); static void monitorexit (JavaThread* thread, BasicObjectLock* lock); - static void deoptimize(JavaThread* thread); + static void deoptimize(JavaThread* thread, jint trap_request); static int access_field_patching(JavaThread* thread); static int move_klass_patching(JavaThread* thread); --- old/src/share/vm/ci/ciMethod.cpp 2014-05-06 15:13:00.000000000 -0700 +++ new/src/share/vm/ci/ciMethod.cpp 2014-05-06 15:13:00.000000000 -0700 @@ -129,6 +129,7 @@ constantPoolHandle cpool = h_m()->constants(); _signature = new (env->arena()) ciSignature(_holder, cpool, sig_symbol); _method_data = NULL; + _nmethod_age = h_m()->nmethod_age(); // Take a snapshot of these values, so they will be commensurate with the MDO. if (ProfileInterpreter || TieredCompilation) { int invcnt = h_m()->interpreter_invocation_count(); --- old/src/share/vm/ci/ciMethod.hpp 2014-05-06 15:13:00.000000000 -0700 +++ new/src/share/vm/ci/ciMethod.hpp 2014-05-06 15:13:00.000000000 -0700 @@ -68,6 +68,7 @@ int _max_locals; vmIntrinsics::ID _intrinsic_id; int _handler_count; + int _nmethod_age; int _interpreter_invocation_count; int _interpreter_throwout_count; int _instructions_size; @@ -168,6 +169,8 @@ int interpreter_invocation_count() const { check_is_loaded(); return _interpreter_invocation_count; } int interpreter_throwout_count() const { check_is_loaded(); return _interpreter_throwout_count; } int size_of_parameters() const { check_is_loaded(); return _size_of_parameters; } + int nmethod_age() const { check_is_loaded(); return _nmethod_age; } + // Code size for inlining decisions. int code_size_for_inlining(); --- old/src/share/vm/code/nmethod.cpp 2014-05-06 15:13:01.000000000 -0700 +++ new/src/share/vm/code/nmethod.cpp 2014-05-06 15:13:01.000000000 -0700 @@ -1179,7 +1179,7 @@ } void nmethod::inc_decompile_count() { - if (!is_compiled_by_c2()) return; + if (!is_compiled_by_c2() || !is_compiled_by_c1()) return; // Could be gated by ProfileTraps, but do not bother... Method* m = method(); if (m == NULL) return; --- old/src/share/vm/oops/method.hpp 2014-05-06 15:13:01.000000000 -0700 +++ new/src/share/vm/oops/method.hpp 2014-05-06 15:13:01.000000000 -0700 @@ -371,6 +371,13 @@ } } #endif + int nmethod_age() const { + if (method_counters() == NULL) { + return INT_MAX; + } else { + return method_counters()->nmethod_age(); + } + } int invocation_count(); int backedge_count(); --- old/src/share/vm/oops/methodCounters.cpp 2014-05-06 15:13:01.000000000 -0700 +++ new/src/share/vm/oops/methodCounters.cpp 2014-05-06 15:13:01.000000000 -0700 @@ -34,4 +34,10 @@ backedge_counter()->reset(); set_interpreter_throwout_count(0); set_interpreter_invocation_count(0); + set_nmethod_age(INT_MAX); +} + +bool MethodCounters::should_nmethod_age(int age) { + return UseCodeAging && (!MethodCounters::is_nmethod_hot(age) && + !MethodCounters::is_nmethod_age_unset(age)); } --- old/src/share/vm/oops/methodCounters.hpp 2014-05-06 15:13:02.000000000 -0700 +++ new/src/share/vm/oops/methodCounters.hpp 2014-05-06 15:13:02.000000000 -0700 @@ -31,6 +31,15 @@ class MethodCounters: public MetaspaceObj { friend class VMStructs; private: + // NMethod age is a counter for warm methods detection in the code cache sweeper. + // The counter is reset by the sweeper and is decremented by some of the compiled + // code. The counter values are interpreted as follows: + // 1. (HotMethodDetection..INT_MAX] - initial value, no counters inserted + // 2. (1..HotMethodDetectionLimit) - the method is warm, the counter is used + // to figure out which methods can be flushed. + // 3. (INT_MIN..0] - method is hot and will deopt and get + // recompiled without the counters + int _nmethod_age; int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered) u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting u2 _number_of_breakpoints; // fullspeed debugging support @@ -42,7 +51,8 @@ jlong _prev_time; // Previous time the rate was acquired #endif - MethodCounters() : _interpreter_invocation_count(0), + MethodCounters() : _nmethod_age(INT_MAX), + _interpreter_invocation_count(0), _interpreter_throwout_count(0), _number_of_breakpoints(0) #ifdef TIERED @@ -52,6 +62,10 @@ { invocation_counter()->init(); backedge_counter()->init(); + + if (StressCodeAging) { + set_nmethod_age(HotMethodDetectionLimit); + } } public: @@ -104,6 +118,24 @@ InvocationCounter* invocation_counter() { return &_invocation_counter; } InvocationCounter* backedge_counter() { return &_backedge_counter; } + int nmethod_age() { + return _nmethod_age; + } + void set_nmethod_age(int age) { + _nmethod_age = age; + } + void reset_nmethod_age() { + set_nmethod_age(HotMethodDetectionLimit); + } + + static bool is_nmethod_hot(int age) { return age <= 0; } + static bool is_nmethod_warm(int age) { return age < HotMethodDetectionLimit; } + static bool is_nmethod_age_unset(int age) { return age > HotMethodDetectionLimit; } + static bool should_nmethod_age(int age); + static ByteSize nmethod_age_offset() { + return byte_offset_of(MethodCounters, _nmethod_age); + } + static ByteSize interpreter_invocation_counter_offset() { return byte_offset_of(MethodCounters, _interpreter_invocation_count); } --- old/src/share/vm/oops/methodData.hpp 2014-05-06 15:13:02.000000000 -0700 +++ new/src/share/vm/oops/methodData.hpp 2014-05-06 15:13:02.000000000 -0700 @@ -2395,7 +2395,7 @@ void inc_decompile_count() { _nof_decompiles += 1; if (decompile_count() > (uint)PerMethodRecompilationCutoff) { - method()->set_not_compilable(CompLevel_full_optimization, true, "decompile_count > PerMethodRecompilationCutoff"); + method()->set_not_compilable(CompLevel_all, true, "decompile_count > PerMethodRecompilationCutoff"); } } --- old/src/share/vm/opto/compile.cpp 2014-05-06 15:13:02.000000000 -0700 +++ new/src/share/vm/opto/compile.cpp 2014-05-06 15:13:02.000000000 -0700 @@ -1089,6 +1089,7 @@ set_do_scheduling(OptoScheduling); set_do_count_invocations(false); set_do_method_data_update(false); + set_age_code(has_method() && MethodCounters::should_nmethod_age(method()->nmethod_age())); set_rtm_state(NoRTM); // No RTM lock eliding by default #if INCLUDE_RTM_OPT if (UseRTMLocking && has_method() && (method()->method_data_or_null() != NULL)) { --- old/src/share/vm/opto/compile.hpp 2014-05-06 15:13:03.000000000 -0700 +++ new/src/share/vm/opto/compile.hpp 2014-05-06 15:13:03.000000000 -0700 @@ -311,6 +311,7 @@ bool _do_freq_based_layout; // True if we intend to do frequency based block layout bool _do_count_invocations; // True if we generate code to count invocations bool _do_method_data_update; // True if we generate code to update MethodData*s + bool _age_code; int _AliasLevel; // Locally-adjusted version of AliasLevel flag. bool _print_assembly; // True if we should dump assembly code for this compilation bool _print_inlining; // True if we should print inlining for this compilation @@ -584,7 +585,9 @@ void set_do_count_invocations(bool z){ _do_count_invocations = z; } bool do_method_data_update() const { return _do_method_data_update; } void set_do_method_data_update(bool z) { _do_method_data_update = z; } - int AliasLevel() const { return _AliasLevel; } + bool age_code() const { return _age_code; } + void set_age_code(bool z) { _age_code = z; } + int AliasLevel() const { return _AliasLevel; } bool print_assembly() const { return _print_assembly; } void set_print_assembly(bool z) { _print_assembly = z; } bool print_inlining() const { return _print_inlining; } --- old/src/share/vm/opto/parse.hpp 2014-05-06 15:13:03.000000000 -0700 +++ new/src/share/vm/opto/parse.hpp 2014-05-06 15:13:03.000000000 -0700 @@ -581,6 +581,7 @@ void jump_switch_ranges(Node* a, SwitchRange* lo, SwitchRange* hi, int depth = 0); bool create_jump_tables(Node* a, SwitchRange* lo, SwitchRange* hi); + void decrement_code_age(); // helper functions for methodData style profiling void test_counter_against_threshold(Node* cnt, int limit); void increment_and_test_invocation_counter(int limit); --- old/src/share/vm/opto/parse1.cpp 2014-05-06 15:13:03.000000000 -0700 +++ new/src/share/vm/opto/parse1.cpp 2014-05-06 15:13:03.000000000 -0700 @@ -568,6 +568,9 @@ } else { set_map(entry_map); do_method_entry(); + if (depth() == 1 && C->age_code()) { + decrement_code_age(); + } } if (depth() == 1) { // Add check to deoptimize the nmethod if RTM state was changed @@ -2048,6 +2051,32 @@ #endif } +void Parse::decrement_code_age() { + // Get the Method* node. + MethodCounters* mc = method()->ensure_method_counters(); + if (mc == NULL) { + C->record_failure("Must have MCs"); + return; + } + assert(!is_osr_parse(), "Not doing this for OSRs"); + + // Set starting bci for uncommon trap. + set_parse_bci(0); + + const TypePtr* adr_type = TypeRawPtr::make((address)mc); + Node* mc_adr = makecon(adr_type); + Node* cnt_adr = basic_plus_adr(mc_adr, mc_adr, in_bytes(MethodCounters::nmethod_age_offset())); + Node* cnt = make_load(control(), cnt_adr, TypeInt::INT, T_INT, adr_type, MemNode::unordered); + Node* decr = _gvn.transform(new (C) SubINode(cnt, makecon(TypeInt::ONE))); + store_to_memory(control(), cnt_adr, decr, T_INT, adr_type, MemNode::unordered); + Node *chk = _gvn.transform(new (C) CmpINode(decr, makecon(TypeInt::ZERO))); + Node* tst = _gvn.transform(new (C) BoolNode(chk, BoolTest::gt)); + { BuildCutout unless(this, tst, PROB_ALWAYS); + uncommon_trap(Deoptimization::Reason_age, + Deoptimization::Action_make_not_entrant); + } +} + //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { --- old/src/share/vm/runtime/deoptimization.cpp 2014-05-06 15:13:04.000000000 -0700 +++ new/src/share/vm/runtime/deoptimization.cpp 2014-05-06 15:13:04.000000000 -0700 @@ -742,6 +742,8 @@ return 0; } +Deoptimization::DeoptAction Deoptimization::_unloaded_action + = Deoptimization::Action_reinterpret; #ifdef COMPILER2 bool Deoptimization::realloc_objects(JavaThread* thread, frame* fr, GrowableArray* objects, TRAPS) { @@ -1811,8 +1813,6 @@ //--------------------------------statics-------------------------------------- -Deoptimization::DeoptAction Deoptimization::_unloaded_action - = Deoptimization::Action_reinterpret; const char* Deoptimization::_trap_reason_name[Reason_LIMIT] = { // Note: Keep this in sync. with enum DeoptReason. "none", --- old/src/share/vm/runtime/globals.hpp 2014-05-06 15:13:04.000000000 -0700 +++ new/src/share/vm/runtime/globals.hpp 2014-05-06 15:13:04.000000000 -0700 @@ -2562,6 +2562,20 @@ diagnostic(bool, PrintMethodFlushingStatistics, false, \ "print statistics about method flushing") \ \ + diagnostic(intx, HotMethodDetectionLimit, 100000, \ + "Number of compiled code invocations after which " \ + "the method is considered as hot by the flusher") \ + \ + diagnostic(intx, MinPassesBeforeFlush, 10, \ + "Minimum number of sweeper passes before an nmethod " \ + "can be flushed") \ + \ + product(bool, UseCodeAging, true, \ + "Insert counter to detect warm methods") \ + \ + diagnostic(bool, StressCodeAging, false, \ + "Start with counters compiled in") \ + \ develop(bool, UseRelocIndex, false, \ "Use an index to speed random access to relocations") \ \ --- old/src/share/vm/runtime/sweeper.cpp 2014-05-06 15:13:04.000000000 -0700 +++ new/src/share/vm/runtime/sweeper.cpp 2014-05-06 15:13:04.000000000 -0700 @@ -569,37 +569,7 @@ SWEEP(nm); } } else { - if (UseCodeCacheFlushing) { - if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { - // Do not make native methods and OSR-methods not-entrant - nm->dec_hotness_counter(); - // Get the initial value of the hotness counter. This value depends on the - // ReservedCodeCacheSize - int reset_val = hotness_counter_reset_val(); - int time_since_reset = reset_val - nm->hotness_counter(); - double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); - // The less free space in the code cache we have - the bigger reverse_free_ratio() is. - // I.e., 'threshold' increases with lower available space in the code cache and a higher - // NmethodSweepActivity. If the current hotness counter - which decreases from its initial - // value until it is reset by stack walking - is smaller than the computed threshold, the - // corresponding nmethod is considered for removal. - if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > 10)) { - // A method is marked as not-entrant if the method is - // 1) 'old enough': nm->hotness_counter() < threshold - // 2) The method was in_use for a minimum amount of time: (time_since_reset > 10) - // The second condition is necessary if we are dealing with very small code cache - // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. - // The second condition ensures that methods are not immediately made not-entrant - // after compilation. - nm->make_not_entrant(); - // Code cache state change is tracked in make_not_entrant() - if (PrintMethodFlushing && Verbose) { - tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", - nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); - } - } - } - } + possibly_flush(nm); // Clean-up all inline caches that point to zombie/non-reentrant methods MutexLocker cl(CompiledIC_lock); nm->cleanup_inline_caches(); @@ -608,6 +578,86 @@ return freed_memory; } + +void NMethodSweeper::possibly_flush(nmethod* nm) { + if (UseCodeCacheFlushing) { + if (!nm->is_locked_by_vm() && !nm->is_osr_method() && !nm->is_native_method()) { + bool make_not_entrant = false; + + // Do not make native methods and OSR-methods not-entrant + nm->dec_hotness_counter(); + // Get the initial value of the hotness counter. This value depends on the + // ReservedCodeCacheSize + int reset_val = hotness_counter_reset_val(); + int time_since_reset = reset_val - nm->hotness_counter(); + double threshold = -reset_val + (CodeCache::reverse_free_ratio() * NmethodSweepActivity); + // The less free space in the code cache we have - the bigger reverse_free_ratio() is. + // I.e., 'threshold' increases with lower available space in the code cache and a higher + // NmethodSweepActivity. If the current hotness counter - which decreases from its initial + // value until it is reset by stack walking - is smaller than the computed threshold, the + // corresponding nmethod is considered for removal. + if ((NmethodSweepActivity > 0) && (nm->hotness_counter() < threshold) && (time_since_reset > MinPassesBeforeFlush)) { + // A method is marked as not-entrant if the method is + // 1) 'old enough': nm->hotness_counter() < threshold + // 2) The method was in_use for a minimum amount of time: (time_since_reset > MinPassesBeforeFlush) + // The second condition is necessary if we are dealing with very small code cache + // sizes (e.g., <10m) and the code cache size is too small to hold all hot methods. + // The second condition ensures that methods are not immediately made not-entrant + // after compilation. + make_not_entrant = true; + } + + // The stack-scanning low-cost detection didn't see the method used (which can happen for + // flat profiles). Check the age counter for possible data. + if (UseCodeAging && make_not_entrant && (nm->is_compiled_by_c2() || nm->is_compiled_by_c1())) { + MethodCounters* mc = nm->method()->method_counters(); + if (mc != NULL) { + // Snapshot the value as it's changed concurrently + int age = mc->nmethod_age(); + if (MethodCounters::is_nmethod_hot(age)) { + // The method has gone through flushing, and it became relatively hot that it deopted + // before we could take a look at it. + if (time_since_reset > MinPassesBeforeFlush * 2) { + // It's been long enough, we still haven't seen it on stack. + // Try to flush it, but enable counters the next time. + mc->reset_nmethod_age(); + } else { + make_not_entrant = false; + } + } else if (MethodCounters::is_nmethod_warm(age)) { + // Method has counters enabled, and the method was used within + // previous 10 sweeps. Reset the counter. Stay in the existing + // compiled state. + mc->reset_nmethod_age(); + // delay the next check + nm->set_hotness_counter(NMethodSweeper::hotness_counter_reset_val()); + make_not_entrant = false; + } else if (MethodCounters::is_nmethod_age_unset(age)) { + // No counters were used before. Set the counters to the detection + // limit value. If the method is going to be used again it will be compiled + // with counters that we're going to use for analysis the the next time. + mc->reset_nmethod_age(); + } else { + // Method was totally idle for 10 sweeps + // The counter already has the initial value, flush it and may be recompile + // later with counters + } + } + } + + if (make_not_entrant) { + nm->make_not_entrant(); + + // Code cache state change is tracked in make_not_entrant() + if (PrintMethodFlushing && Verbose) { + tty->print_cr("### Nmethod %d/" PTR_FORMAT "made not-entrant: hotness counter %d/%d threshold %f", + nm->compile_id(), nm, nm->hotness_counter(), reset_val, threshold); + } + } + } + } +} + // Print out some state information about the current sweep and the // state of the code cache if it's requested. void NMethodSweeper::log_sweep(const char* msg, const char* format, ...) { --- old/src/share/vm/runtime/sweeper.hpp 2014-05-06 15:13:05.000000000 -0700 +++ new/src/share/vm/runtime/sweeper.hpp 2014-05-06 15:13:05.000000000 -0700 @@ -111,6 +111,7 @@ static int hotness_counter_reset_val(); static void report_state_change(nmethod* nm); static void possibly_enable_sweeper(); + static void possibly_flush(nmethod* nm); static void print(); // Printing/debugging }; --- old/src/share/vm/runtime/vmStructs.cpp 2014-05-06 15:13:05.000000000 -0700 +++ new/src/share/vm/runtime/vmStructs.cpp 2014-05-06 15:13:05.000000000 -0700 @@ -365,6 +365,7 @@ nonstatic_field(DataLayout, _header._struct._flags, u1) \ nonstatic_field(DataLayout, _header._struct._bci, u2) \ nonstatic_field(DataLayout, _cells[0], intptr_t) \ + nonstatic_field(MethodCounters, _nmethod_age, int) \ nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \ nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \ nonstatic_field(MethodCounters, _number_of_breakpoints, u2) \