2765 obj = new_register(T_OBJECT); 2766 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj); 2767 } else { 2768 Local* receiver = x->state()->local_at(0)->as_Local(); 2769 assert(receiver != NULL, "must already exist"); 2770 obj = receiver->operand(); 2771 } 2772 assert(obj->is_valid(), "must be valid"); 2773 2774 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2775 LIR_Opr lock = new_register(T_INT); 2776 __ load_stack_address_monitor(0, lock); 2777 2778 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); 2779 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2780 2781 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2782 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2783 } 2784 } 2785 2786 // increment invocation counters if needed 2787 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 2788 profile_parameters(x); 2789 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, false); 2790 increment_invocation_counter(info); 2791 } 2792 2793 // all blocks with a successor must end with an unconditional jump 2794 // to the successor even if they are consecutive 2795 __ jump(x->default_sux()); 2796 } 2797 2798 2799 void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2800 // construct our frame and model the production of incoming pointer 2801 // to the OSR buffer. 2802 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2803 LIR_Opr result = rlock_result(x); 2804 __ move(LIR_Assembler::osrBufferPointer(), result); 2805 } 2806 2807 2808 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2809 assert(args->length() == arg_list->length(), 3311 CodeEmitInfo* info = state_for(x, x->state(), true); 3312 // Notify the runtime very infrequently only to take care of counter overflows 3313 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true); 3314 } 3315 } 3316 3317 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) { 3318 int freq_log; 3319 int level = compilation()->env()->comp_level(); 3320 if (level == CompLevel_limited_profile) { 3321 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); 3322 } else if (level == CompLevel_full_profile) { 3323 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); 3324 } else { 3325 ShouldNotReachHere(); 3326 } 3327 // Increment the appropriate invocation/backedge counter and notify the runtime. 3328 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); 3329 } 3330 3331 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, 3332 ciMethod *method, int frequency, 3333 int bci, bool backedge, bool notify) { 3334 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); 3335 int level = _compilation->env()->comp_level(); 3336 assert(level > CompLevel_simple, "Shouldn't be here"); 3337 3338 int offset = -1; 3339 LIR_Opr counter_holder; 3340 if (level == CompLevel_limited_profile) { 3341 MethodCounters* counters_adr = method->ensure_method_counters(); 3342 if (counters_adr == NULL) { 3343 bailout("method counters allocation failed"); 3344 return; 3345 } 3346 counter_holder = new_pointer_register(); 3347 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); 3348 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : 3349 MethodCounters::invocation_counter_offset()); 3350 } else if (level == CompLevel_full_profile) { | 2765 obj = new_register(T_OBJECT); 2766 __ oop2reg(method()->holder()->java_mirror()->constant_encoding(), obj); 2767 } else { 2768 Local* receiver = x->state()->local_at(0)->as_Local(); 2769 assert(receiver != NULL, "must already exist"); 2770 obj = receiver->operand(); 2771 } 2772 assert(obj->is_valid(), "must be valid"); 2773 2774 if (method()->is_synchronized() && GenerateSynchronizationCode) { 2775 LIR_Opr lock = new_register(T_INT); 2776 __ load_stack_address_monitor(0, lock); 2777 2778 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, SynchronizationEntryBCI), NULL, x->check_flag(Instruction::DeoptimizeOnException)); 2779 CodeStub* slow_path = new MonitorEnterStub(obj, lock, info); 2780 2781 // receiver is guaranteed non-NULL so don't need CodeEmitInfo 2782 __ lock_object(syncTempOpr(), obj, lock, new_register(T_OBJECT), slow_path, NULL); 2783 } 2784 } 2785 if (compilation()->age_code()) { 2786 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); 2787 decrement_code_age(info); 2788 } 2789 // increment invocation counters if needed 2790 if (!method()->is_accessor()) { // Accessors do not have MDOs, so no counting. 2791 profile_parameters(x); 2792 CodeEmitInfo* info = new CodeEmitInfo(scope()->start()->state()->copy(ValueStack::StateBefore, 0), NULL, false); 2793 increment_invocation_counter(info); 2794 } 2795 2796 // all blocks with a successor must end with an unconditional jump 2797 // to the successor even if they are consecutive 2798 __ jump(x->default_sux()); 2799 } 2800 2801 2802 void LIRGenerator::do_OsrEntry(OsrEntry* x) { 2803 // construct our frame and model the production of incoming pointer 2804 // to the OSR buffer. 2805 __ osr_entry(LIR_Assembler::osrBufferPointer()); 2806 LIR_Opr result = rlock_result(x); 2807 __ move(LIR_Assembler::osrBufferPointer(), result); 2808 } 2809 2810 2811 void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR_OprList* arg_list) { 2812 assert(args->length() == arg_list->length(), 3314 CodeEmitInfo* info = state_for(x, x->state(), true); 3315 // Notify the runtime very infrequently only to take care of counter overflows 3316 increment_event_counter_impl(info, x->inlinee(), (1 << Tier23InlineeNotifyFreqLog) - 1, InvocationEntryBci, false, true); 3317 } 3318 } 3319 3320 void LIRGenerator::increment_event_counter(CodeEmitInfo* info, int bci, bool backedge) { 3321 int freq_log; 3322 int level = compilation()->env()->comp_level(); 3323 if (level == CompLevel_limited_profile) { 3324 freq_log = (backedge ? Tier2BackedgeNotifyFreqLog : Tier2InvokeNotifyFreqLog); 3325 } else if (level == CompLevel_full_profile) { 3326 freq_log = (backedge ? Tier3BackedgeNotifyFreqLog : Tier3InvokeNotifyFreqLog); 3327 } else { 3328 ShouldNotReachHere(); 3329 } 3330 // Increment the appropriate invocation/backedge counter and notify the runtime. 3331 increment_event_counter_impl(info, info->scope()->method(), (1 << freq_log) - 1, bci, backedge, true); 3332 } 3333 3334 void LIRGenerator::decrement_code_age(CodeEmitInfo* info) { 3335 ciMethod* method = info->scope()->method(); 3336 MethodCounters* mc_adr = method->ensure_method_counters(); 3337 if (mc_adr != NULL) { 3338 LIR_Opr mc = new_pointer_register(); 3339 __ move(LIR_OprFact::intptrConst(mc_adr), mc); 3340 int offset = in_bytes(MethodCounters::nmethod_age_offset()); 3341 LIR_Address* counter = new LIR_Address(mc, offset, T_INT); 3342 LIR_Opr result = new_register(T_INT); 3343 __ load(counter, result); 3344 __ sub(result, LIR_OprFact::intConst(1), result); 3345 __ store(result, counter); 3346 // DeoptimizeStub will reexecute from the current state in code info. 3347 CodeStub* deopt = new DeoptimizeStub(info, Deoptimization::Reason_age, Deoptimization::Action_make_not_entrant); 3348 __ cmp(lir_cond_lessEqual, result, LIR_OprFact::intConst(0)); 3349 __ branch(lir_cond_lessEqual, T_INT, deopt); 3350 } 3351 } 3352 3353 3354 void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info, 3355 ciMethod *method, int frequency, 3356 int bci, bool backedge, bool notify) { 3357 assert(frequency == 0 || is_power_of_2(frequency + 1), "Frequency must be x^2 - 1 or 0"); 3358 int level = _compilation->env()->comp_level(); 3359 assert(level > CompLevel_simple, "Shouldn't be here"); 3360 3361 int offset = -1; 3362 LIR_Opr counter_holder; 3363 if (level == CompLevel_limited_profile) { 3364 MethodCounters* counters_adr = method->ensure_method_counters(); 3365 if (counters_adr == NULL) { 3366 bailout("method counters allocation failed"); 3367 return; 3368 } 3369 counter_holder = new_pointer_register(); 3370 __ move(LIR_OprFact::intptrConst(counters_adr), counter_holder); 3371 offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() : 3372 MethodCounters::invocation_counter_offset()); 3373 } else if (level == CompLevel_full_profile) { |