< prev index next >

src/cpu/ppc/vm/macroAssembler_ppc.cpp

Print this page
rev 13015 : [mq]: 8180612.patch


2481 
2482 // Perform abort ratio calculation, set no_rtm bit if high ratio.
2483 // input:  rtm_counters_Reg (RTMLockingCounters* address) - KILLED
2484 void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
2485                                                  RTMLockingCounters* rtm_counters,
2486                                                  Metadata* method_data) {
2487   Label L_done, L_check_always_rtm1, L_check_always_rtm2;
2488 
2489   if (RTMLockingCalculationDelay > 0) {
2490     // Delay calculation.
2491     ld(rtm_counters_Reg, (RegisterOrConstant)(intptr_t)RTMLockingCounters::rtm_calculation_flag_addr());
2492     cmpdi(CCR0, rtm_counters_Reg, 0);
2493     beq(CCR0, L_done);
2494     load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
2495   }
2496   // Abort ratio calculation only if abort_count > RTMAbortThreshold.
2497   //   Aborted transactions = abort_count * 100
2498   //   All transactions = total_count *  RTMTotalCountIncrRate
2499   //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
2500   ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);

2501   cmpdi(CCR0, R0, RTMAbortThreshold);
2502   blt(CCR0, L_check_always_rtm2);





2503   mulli(R0, R0, 100);
2504 
2505   const Register tmpReg = rtm_counters_Reg;
2506   ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
2507   mulli(tmpReg, tmpReg, RTMTotalCountIncrRate);
2508   mulli(tmpReg, tmpReg, RTMAbortRatio);
2509   cmpd(CCR0, R0, tmpReg);
2510   blt(CCR0, L_check_always_rtm1); // jump to reload
2511   if (method_data != NULL) {
2512     // Set rtm_state to "no rtm" in MDO.
2513     // Not using a metadata relocation. Method and Class Loader are kept alive anyway.
2514     // (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.)
2515     load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
2516     atomic_ori_int(R0, tmpReg, NoRTM);
2517   }
2518   b(L_done);
2519 
2520   bind(L_check_always_rtm1);
2521   load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
2522   bind(L_check_always_rtm2);
2523   ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
2524   cmpdi(CCR0, tmpReg, RTMLockingThreshold / RTMTotalCountIncrRate);






2525   blt(CCR0, L_done);
2526   if (method_data != NULL) {
2527     // Set rtm_state to "always rtm" in MDO.
2528     // Not using a metadata relocation. See above.
2529     load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
2530     atomic_ori_int(R0, tmpReg, UseRTM);
2531   }
2532   bind(L_done);
2533 }
2534 
2535 // Update counters and perform abort ratio calculation.
2536 // input: abort_status_Reg
2537 void MacroAssembler::rtm_profiling(Register abort_status_Reg, Register temp_Reg,
2538                                    RTMLockingCounters* rtm_counters,
2539                                    Metadata* method_data,
2540                                    bool profile_rtm) {
2541 
2542   assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
2543   // Update rtm counters based on state at abort.
2544   // Reads abort_status_Reg, updates flags.


2603 void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
2604                                        Register obj, Register mark_word, Register tmp,
2605                                        Register retry_on_abort_count_Reg,
2606                                        RTMLockingCounters* stack_rtm_counters,
2607                                        Metadata* method_data, bool profile_rtm,
2608                                        Label& DONE_LABEL, Label& IsInflated) {
2609   assert(UseRTMForStackLocks, "why call this otherwise?");
2610   assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
2611   Label L_rtm_retry, L_decrement_retry, L_on_abort;
2612 
2613   if (RTMRetryCount > 0) {
2614     load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
2615     bind(L_rtm_retry);
2616   }
2617   andi_(R0, mark_word, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
2618   bne(CCR0, IsInflated);
2619 
2620   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
2621     Label L_noincrement;
2622     if (RTMTotalCountIncrRate > 1) {
2623       branch_on_random_using_tb(tmp, (int)RTMTotalCountIncrRate, L_noincrement);
2624     }
2625     assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
2626     load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
2627     //atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically
2628     ldx(mark_word, tmp);
2629     addi(mark_word, mark_word, 1);
2630     stdx(mark_word, tmp);
2631     bind(L_noincrement);
2632   }
2633   tbegin_();
2634   beq(CCR0, L_on_abort);
2635   ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);         // Reload in transaction, conflicts need to be tracked.
2636   andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
2637   cmpwi(flag, R0, markOopDesc::unlocked_value);                // bits = 001 unlocked
2638   beq(flag, DONE_LABEL);                                       // all done if unlocked
2639 
2640   if (UseRTMXendForLockBusy) {
2641     tend_();
2642     b(L_decrement_retry);
2643   } else {


2670                                           Label& DONE_LABEL) {
2671   assert(UseRTMLocking, "why call this otherwise?");
2672   Label L_rtm_retry, L_decrement_retry, L_on_abort;
2673   // Clean monitor_value bit to get valid pointer.
2674   int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
2675 
2676   // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
2677   std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
2678   const Register tmpReg = boxReg;
2679   const Register owner_addr_Reg = mark_word;
2680   addi(owner_addr_Reg, mark_word, owner_offset);
2681 
2682   if (RTMRetryCount > 0) {
2683     load_const_optimized(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy.
2684     load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort.
2685     bind(L_rtm_retry);
2686   }
2687   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
2688     Label L_noincrement;
2689     if (RTMTotalCountIncrRate > 1) {
2690       branch_on_random_using_tb(R0, (int)RTMTotalCountIncrRate, L_noincrement);
2691     }
2692     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
2693     load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
2694     //atomic_inc_ptr(R0, tmpReg); We don't increment atomically
2695     ldx(tmpReg, R0);
2696     addi(tmpReg, tmpReg, 1);
2697     stdx(tmpReg, R0);
2698     bind(L_noincrement);
2699   }
2700   tbegin_();
2701   beq(CCR0, L_on_abort);
2702   // We don't reload mark word. Will only be reset at safepoint.
2703   ld(R0, 0, owner_addr_Reg); // Load in transaction, conflicts need to be tracked.
2704   cmpdi(flag, R0, 0);
2705   beq(flag, DONE_LABEL);
2706 
2707   if (UseRTMXendForLockBusy) {
2708     tend_();
2709     b(L_decrement_retry);
2710   } else {




2481 
2482 // Perform abort ratio calculation, set no_rtm bit if high ratio.
2483 // input:  rtm_counters_Reg (RTMLockingCounters* address) - KILLED
2484 void MacroAssembler::rtm_abort_ratio_calculation(Register rtm_counters_Reg,
2485                                                  RTMLockingCounters* rtm_counters,
2486                                                  Metadata* method_data) {
2487   Label L_done, L_check_always_rtm1, L_check_always_rtm2;
2488 
2489   if (RTMLockingCalculationDelay > 0) {
2490     // Delay calculation.
2491     ld(rtm_counters_Reg, (RegisterOrConstant)(intptr_t)RTMLockingCounters::rtm_calculation_flag_addr());
2492     cmpdi(CCR0, rtm_counters_Reg, 0);
2493     beq(CCR0, L_done);
2494     load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
2495   }
2496   // Abort ratio calculation only if abort_count > RTMAbortThreshold.
2497   //   Aborted transactions = abort_count * 100
2498   //   All transactions = total_count *  RTMTotalCountIncrRate
2499   //   Set no_rtm bit if (Aborted transactions >= All transactions * RTMAbortRatio)
2500   ld(R0, RTMLockingCounters::abort_count_offset(), rtm_counters_Reg);
2501   if (is_simm(RTMAbortThreshold, 16)) {   // cmpdi can handle 16bit immediate only.
2502     cmpdi(CCR0, R0, RTMAbortThreshold);
2503     blt(CCR0, L_check_always_rtm2);  // reload of rtm_counters_Reg not necessary
2504   } else {
2505     load_const_optimized(rtm_counters_Reg, RTMAbortThreshold);
2506     cmpd(CCR0, R0, rtm_counters_Reg);
2507     blt(CCR0, L_check_always_rtm1);  // reload of rtm_counters_Reg required
2508   }
2509   mulli(R0, R0, 100);
2510 
2511   const Register tmpReg = rtm_counters_Reg;
2512   ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
2513   mulli(tmpReg, tmpReg, RTMTotalCountIncrRate); // allowable range: int16
2514   mulli(tmpReg, tmpReg, RTMAbortRatio);         // allowable range: int16
2515   cmpd(CCR0, R0, tmpReg);
2516   blt(CCR0, L_check_always_rtm1); // jump to reload
2517   if (method_data != NULL) {
2518     // Set rtm_state to "no rtm" in MDO.
2519     // Not using a metadata relocation. Method and Class Loader are kept alive anyway.
2520     // (See nmethod::metadata_do and CodeBuffer::finalize_oop_references.)
2521     load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
2522     atomic_ori_int(R0, tmpReg, NoRTM);
2523   }
2524   b(L_done);
2525 
2526   bind(L_check_always_rtm1);
2527   load_const_optimized(rtm_counters_Reg, (address)rtm_counters, R0); // reload
2528   bind(L_check_always_rtm2);
2529   ld(tmpReg, RTMLockingCounters::total_count_offset(), rtm_counters_Reg);
2530   int64_t thresholdValue = RTMLockingThreshold / RTMTotalCountIncrRate;
2531   if (is_simm(thresholdValue, 16)) {   // cmpdi can handle 16bit immediate only.
2532     cmpdi(CCR0, tmpReg, thresholdValue);
2533   } else {
2534     load_const_optimized(R0, thresholdValue);
2535     cmpd(CCR0, tmpReg, R0);
2536   }
2537   blt(CCR0, L_done);
2538   if (method_data != NULL) {
2539     // Set rtm_state to "always rtm" in MDO.
2540     // Not using a metadata relocation. See above.
2541     load_const(R0, (address)method_data + MethodData::rtm_state_offset_in_bytes(), tmpReg);
2542     atomic_ori_int(R0, tmpReg, UseRTM);
2543   }
2544   bind(L_done);
2545 }
2546 
2547 // Update counters and perform abort ratio calculation.
2548 // input: abort_status_Reg
2549 void MacroAssembler::rtm_profiling(Register abort_status_Reg, Register temp_Reg,
2550                                    RTMLockingCounters* rtm_counters,
2551                                    Metadata* method_data,
2552                                    bool profile_rtm) {
2553 
2554   assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
2555   // Update rtm counters based on state at abort.
2556   // Reads abort_status_Reg, updates flags.


2615 void MacroAssembler::rtm_stack_locking(ConditionRegister flag,
2616                                        Register obj, Register mark_word, Register tmp,
2617                                        Register retry_on_abort_count_Reg,
2618                                        RTMLockingCounters* stack_rtm_counters,
2619                                        Metadata* method_data, bool profile_rtm,
2620                                        Label& DONE_LABEL, Label& IsInflated) {
2621   assert(UseRTMForStackLocks, "why call this otherwise?");
2622   assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
2623   Label L_rtm_retry, L_decrement_retry, L_on_abort;
2624 
2625   if (RTMRetryCount > 0) {
2626     load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
2627     bind(L_rtm_retry);
2628   }
2629   andi_(R0, mark_word, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
2630   bne(CCR0, IsInflated);
2631 
2632   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
2633     Label L_noincrement;
2634     if (RTMTotalCountIncrRate > 1) {
2635       branch_on_random_using_tb(tmp, RTMTotalCountIncrRate, L_noincrement);
2636     }
2637     assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
2638     load_const_optimized(tmp, (address)stack_rtm_counters->total_count_addr(), R0);
2639     //atomic_inc_ptr(tmp, /*temp, will be reloaded*/mark_word); We don't increment atomically
2640     ldx(mark_word, tmp);
2641     addi(mark_word, mark_word, 1);
2642     stdx(mark_word, tmp);
2643     bind(L_noincrement);
2644   }
2645   tbegin_();
2646   beq(CCR0, L_on_abort);
2647   ld(mark_word, oopDesc::mark_offset_in_bytes(), obj);         // Reload in transaction, conflicts need to be tracked.
2648   andi(R0, mark_word, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
2649   cmpwi(flag, R0, markOopDesc::unlocked_value);                // bits = 001 unlocked
2650   beq(flag, DONE_LABEL);                                       // all done if unlocked
2651 
2652   if (UseRTMXendForLockBusy) {
2653     tend_();
2654     b(L_decrement_retry);
2655   } else {


2682                                           Label& DONE_LABEL) {
2683   assert(UseRTMLocking, "why call this otherwise?");
2684   Label L_rtm_retry, L_decrement_retry, L_on_abort;
2685   // Clean monitor_value bit to get valid pointer.
2686   int owner_offset = ObjectMonitor::owner_offset_in_bytes() - markOopDesc::monitor_value;
2687 
2688   // Store non-null, using boxReg instead of (intptr_t)markOopDesc::unused_mark().
2689   std(boxReg, BasicLock::displaced_header_offset_in_bytes(), boxReg);
2690   const Register tmpReg = boxReg;
2691   const Register owner_addr_Reg = mark_word;
2692   addi(owner_addr_Reg, mark_word, owner_offset);
2693 
2694   if (RTMRetryCount > 0) {
2695     load_const_optimized(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy.
2696     load_const_optimized(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort.
2697     bind(L_rtm_retry);
2698   }
2699   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
2700     Label L_noincrement;
2701     if (RTMTotalCountIncrRate > 1) {
2702       branch_on_random_using_tb(R0, RTMTotalCountIncrRate, L_noincrement);
2703     }
2704     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
2705     load_const(R0, (address)rtm_counters->total_count_addr(), tmpReg);
2706     //atomic_inc_ptr(R0, tmpReg); We don't increment atomically
2707     ldx(tmpReg, R0);
2708     addi(tmpReg, tmpReg, 1);
2709     stdx(tmpReg, R0);
2710     bind(L_noincrement);
2711   }
2712   tbegin_();
2713   beq(CCR0, L_on_abort);
2714   // We don't reload mark word. Will only be reset at safepoint.
2715   ld(R0, 0, owner_addr_Reg); // Load in transaction, conflicts need to be tracked.
2716   cmpdi(flag, R0, 0);
2717   beq(flag, DONE_LABEL);
2718 
2719   if (UseRTMXendForLockBusy) {
2720     tend_();
2721     b(L_decrement_retry);
2722   } else {


< prev index next >