< prev index next >

src/hotspot/cpu/x86/macroAssembler_x86.cpp

Print this page




1603     bind(L_decrement_retry);
1604     // Spin and retry if lock is busy.
1605     rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1606   }
1607   else {
1608     bind(L_decrement_retry);
1609   }
1610 }
1611 
1612 #endif //  INCLUDE_RTM_OPT
1613 
1614 // Fast_Lock and Fast_Unlock used by C2
1615 
1616 // Because the transitions from emitted code to the runtime
1617 // monitorenter/exit helper stubs are so slow it's critical that
1618 // we inline both the stack-locking fast-path and the inflated fast path.
1619 //
1620 // See also: cmpFastLock and cmpFastUnlock.
1621 //
1622 // What follows is a specialized inline transliteration of the code
1623 // in slow_enter() and slow_exit().  If we're concerned about I$ bloat
1624 // another option would be to emit TrySlowEnter and TrySlowExit methods
1625 // at startup-time.  These methods would accept arguments as
1626 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1627 // indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
1628 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1629 // In practice, however, the # of lock sites is bounded and is usually small.
1630 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1631 // if the processor uses simple bimodal branch predictors keyed by EIP
1632 // Since the helper routines would be called from multiple synchronization
1633 // sites.
1634 //
1635 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1636 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1637 // to those specialized methods.  That'd give us a mostly platform-independent
1638 // implementation that the JITs could optimize and inline at their pleasure.
1639 // Done correctly, the only time we'd need to cross to native could would be
1640 // to park() or unpark() threads.  We'd also need a few more unsafe operators
1641 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1642 // (b) explicit barriers or fence operations.
1643 //
1644 // TODO:




1603     bind(L_decrement_retry);
1604     // Spin and retry if lock is busy.
1605     rtm_retry_lock_on_busy(retry_on_busy_count_Reg, boxReg, tmpReg, scrReg, L_rtm_retry);
1606   }
1607   else {
1608     bind(L_decrement_retry);
1609   }
1610 }
1611 
1612 #endif //  INCLUDE_RTM_OPT
1613 
1614 // Fast_Lock and Fast_Unlock used by C2
1615 
1616 // Because the transitions from emitted code to the runtime
1617 // monitorenter/exit helper stubs are so slow it's critical that
1618 // we inline both the stack-locking fast-path and the inflated fast path.
1619 //
1620 // See also: cmpFastLock and cmpFastUnlock.
1621 //
1622 // What follows is a specialized inline transliteration of the code
1623 // in enter() and exit(). If we're concerned about I$ bloat another
1624 // option would be to emit TrySlowEnter and TrySlowExit methods
1625 // at startup-time.  These methods would accept arguments as
1626 // (rax,=Obj, rbx=Self, rcx=box, rdx=Scratch) and return success-failure
1627 // indications in the icc.ZFlag.  Fast_Lock and Fast_Unlock would simply
1628 // marshal the arguments and emit calls to TrySlowEnter and TrySlowExit.
1629 // In practice, however, the # of lock sites is bounded and is usually small.
1630 // Besides the call overhead, TrySlowEnter and TrySlowExit might suffer
1631 // if the processor uses simple bimodal branch predictors keyed by EIP
1632 // Since the helper routines would be called from multiple synchronization
1633 // sites.
1634 //
1635 // An even better approach would be write "MonitorEnter()" and "MonitorExit()"
1636 // in java - using j.u.c and unsafe - and just bind the lock and unlock sites
1637 // to those specialized methods.  That'd give us a mostly platform-independent
1638 // implementation that the JITs could optimize and inline at their pleasure.
1639 // Done correctly, the only time we'd need to cross to native could would be
1640 // to park() or unpark() threads.  We'd also need a few more unsafe operators
1641 // to (a) prevent compiler-JIT reordering of non-volatile accesses, and
1642 // (b) explicit barriers or fence operations.
1643 //
1644 // TODO:


< prev index next >