< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.hpp

Print this page
rev 56639 : loosen a couple more counter checks due to races observed in testing; simplify om_release() extraction of mid since list head or cur_mid_in_use is marked; simplify deflate_monitor_list() extraction of mid since there are no parallel deleters due to the safepoint; simplify deflate_monitor_list_using_JT() extraction of mid since list head or cur_mid_in_use is marked; prepend_block_to_lists() - simplify based on David H's comments; does not need load_acquire() or release_store() because of the cmpxchg(); prepend_to_common() - simplify to use mark_next_loop() for m and use mark_list_head() and release_store() for the non-empty list case; add more debugging for "Non-balanced monitor enter/exit" failure mode; fix race in inflate() in the "CASE: neutral" code path; install_displaced_markword_in_object() does not need to clear the header field since that is handled when the ObjectMonitor is moved from the global free list; LSuccess should clear boxReg to set ICC.ZF=1 to avoid depending on existing boxReg contents; update fast_unlock() to detect when object no longer refers to the same ObjectMonitor and take fast path exit instead; clarify fast_lock() code where we detect when object no longer refers to the same ObjectMonitor; add/update comments for movptr() calls where we move a literal into an Address; remove set_owner(); refactor setting of owner field into set_owner_from(2 versions), set_owner_from_BasicLock(), and try_set_owner_from(); the new functions include monitorinflation+owner logging; extract debug code from v2.06 and v2.07 and move to v2.07.debug; change 'jccb' -> 'jcc' and 'jmpb' -> 'jmp' as needed; checkpoint initial version of MacroAssembler::inc_om_ref_count(); update LP64 MacroAssembler::fast_lock() and fast_unlock() to use inc_om_ref_count(); fast_lock() return flag setting logic can use 'testptr(tmpReg, tmpReg)' instead of 'cmpptr(tmpReg, 0)' since that's more efficient; fast_unlock() LSuccess return flag setting logic can use 'testl (boxReg, 0)' instead of 'xorptr(boxReg, boxReg)' since that's more efficient; cleanup "fast-path" vs "fast path" and "slow-path" vs "slow path"; update MacroAssembler::rtm_inflated_locking() to use inc_om_ref_count(); update MacroAssembler::fast_lock() to preserve the flags before decrementing ref_count and restore the flags afterwards; this is more clean than depending on the contents of rax/tmpReg; coleenp CR - refactor async monitor deflation work from ServiceThread::service_thread_entry() to ObjectSynchronizer::deflate_idle_monitors_using_JT(); rehn,eosterlund CR - add support for HandshakeAfterDeflateIdleMonitors for platforms that don't have ObjectMonitor ref_count support implemented in C2 fast_lock() and fast_unlock().


 664 
 665   // Biased locking support
 666   // lock_reg and obj_reg must be loaded up with the appropriate values.
 667   // swap_reg must be rax, and is killed.
 668   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 669   // be killed; if not supplied, push/pop will be used internally to
 670   // allocate a temporary (inefficient, avoid if possible).
 671   // Optional slow case is for implementations (interpreter and C1) which branch to
 672   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 673   // Returns offset of first potentially-faulting instruction for null
 674   // check info (currently consumed only by C1). If
 675   // swap_reg_contains_mark is true then returns -1 as it is assumed
 676   // the calling code has already passed any potential faults.
 677   int biased_locking_enter(Register lock_reg, Register obj_reg,
 678                            Register swap_reg, Register tmp_reg,
 679                            bool swap_reg_contains_mark,
 680                            Label& done, Label* slow_case = NULL,
 681                            BiasedLockingCounters* counters = NULL);
 682   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 683 #ifdef COMPILER2

 684   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 685   // See full desription in macroAssembler_x86.cpp.
 686   void fast_lock(Register obj, Register box, Register tmp,
 687                  Register scr, Register cx1, Register cx2,
 688                  BiasedLockingCounters* counters,
 689                  RTMLockingCounters* rtm_counters,
 690                  RTMLockingCounters* stack_rtm_counters,
 691                  Metadata* method_data,
 692                  bool use_rtm, bool profile_rtm);
 693   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 694 #if INCLUDE_RTM_OPT
 695   void rtm_counters_update(Register abort_status, Register rtm_counters);
 696   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 697   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 698                                    RTMLockingCounters* rtm_counters,
 699                                    Metadata* method_data);
 700   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 701                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 702   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 703   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);




 664 
 665   // Biased locking support
 666   // lock_reg and obj_reg must be loaded up with the appropriate values.
 667   // swap_reg must be rax, and is killed.
 668   // tmp_reg is optional. If it is supplied (i.e., != noreg) it will
 669   // be killed; if not supplied, push/pop will be used internally to
 670   // allocate a temporary (inefficient, avoid if possible).
 671   // Optional slow case is for implementations (interpreter and C1) which branch to
 672   // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
 673   // Returns offset of first potentially-faulting instruction for null
 674   // check info (currently consumed only by C1). If
 675   // swap_reg_contains_mark is true then returns -1 as it is assumed
 676   // the calling code has already passed any potential faults.
 677   int biased_locking_enter(Register lock_reg, Register obj_reg,
 678                            Register swap_reg, Register tmp_reg,
 679                            bool swap_reg_contains_mark,
 680                            Label& done, Label* slow_case = NULL,
 681                            BiasedLockingCounters* counters = NULL);
 682   void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
 683 #ifdef COMPILER2
 684   void inc_om_ref_count(Register obj_reg, Register om_reg, Register temp_reg, Label& done);
 685   // Code used by cmpFastLock and cmpFastUnlock mach instructions in .ad file.
 686   // See full desription in macroAssembler_x86.cpp.
 687   void fast_lock(Register obj, Register box, Register tmp,
 688                  Register scr, Register cx1, Register cx2,
 689                  BiasedLockingCounters* counters,
 690                  RTMLockingCounters* rtm_counters,
 691                  RTMLockingCounters* stack_rtm_counters,
 692                  Metadata* method_data,
 693                  bool use_rtm, bool profile_rtm);
 694   void fast_unlock(Register obj, Register box, Register tmp, bool use_rtm);
 695 #if INCLUDE_RTM_OPT
 696   void rtm_counters_update(Register abort_status, Register rtm_counters);
 697   void branch_on_random_using_rdtsc(Register tmp, Register scr, int count, Label& brLabel);
 698   void rtm_abort_ratio_calculation(Register tmp, Register rtm_counters_reg,
 699                                    RTMLockingCounters* rtm_counters,
 700                                    Metadata* method_data);
 701   void rtm_profiling(Register abort_status_Reg, Register rtm_counters_Reg,
 702                      RTMLockingCounters* rtm_counters, Metadata* method_data, bool profile_rtm);
 703   void rtm_retry_lock_on_abort(Register retry_count, Register abort_status, Label& retryLabel);
 704   void rtm_retry_lock_on_busy(Register retry_count, Register box, Register tmp, Register scr, Label& retryLabel);


< prev index next >