< prev index next >

src/cpu/x86/vm/macroAssembler_x86.cpp

Print this page
rev 13015 : [mq]: 8180612.patch


1475                                        Metadata* method_data, bool profile_rtm,
1476                                        Label& DONE_LABEL, Label& IsInflated) {
1477   assert(UseRTMForStackLocks, "why call this otherwise?");
1478   assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1479   assert(tmpReg == rax, "");
1480   assert(scrReg == rdx, "");
1481   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1482 
1483   if (RTMRetryCount > 0) {
1484     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1485     bind(L_rtm_retry);
1486   }
1487   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
1488   testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
1489   jcc(Assembler::notZero, IsInflated);
1490 
1491   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1492     Label L_noincrement;
1493     if (RTMTotalCountIncrRate > 1) {
1494       // tmpReg, scrReg and flags are killed
1495       branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1496     }
1497     assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1498     atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1499     bind(L_noincrement);
1500   }
1501   xbegin(L_on_abort);
1502   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));       // fetch markword
1503   andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1504   cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1505   jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
1506 
1507   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1508   if (UseRTMXendForLockBusy) {
1509     xend();
1510     movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
1511     jmp(L_decrement_retry);
1512   }
1513   else {
1514     xabort(0);
1515   }


1536                                           Label& DONE_LABEL) {
1537   assert(UseRTMLocking, "why call this otherwise?");
1538   assert(tmpReg == rax, "");
1539   assert(scrReg == rdx, "");
1540   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1541   int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1542 
1543   // Without cast to int32_t a movptr will destroy r10 which is typically obj
1544   movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1545   movptr(boxReg, tmpReg); // Save ObjectMonitor address
1546 
1547   if (RTMRetryCount > 0) {
1548     movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
1549     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1550     bind(L_rtm_retry);
1551   }
1552   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1553     Label L_noincrement;
1554     if (RTMTotalCountIncrRate > 1) {
1555       // tmpReg, scrReg and flags are killed
1556       branch_on_random_using_rdtsc(tmpReg, scrReg, (int)RTMTotalCountIncrRate, L_noincrement);
1557     }
1558     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1559     atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1560     bind(L_noincrement);
1561   }
1562   xbegin(L_on_abort);
1563   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
1564   movptr(tmpReg, Address(tmpReg, owner_offset));
1565   testptr(tmpReg, tmpReg);
1566   jcc(Assembler::zero, DONE_LABEL);
1567   if (UseRTMXendForLockBusy) {
1568     xend();
1569     jmp(L_decrement_retry);
1570   }
1571   else {
1572     xabort(0);
1573   }
1574   bind(L_on_abort);
1575   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1576   if (PrintPreciseRTMLockingStatistics || profile_rtm) {




1475                                        Metadata* method_data, bool profile_rtm,
1476                                        Label& DONE_LABEL, Label& IsInflated) {
1477   assert(UseRTMForStackLocks, "why call this otherwise?");
1478   assert(!UseBiasedLocking, "Biased locking is not supported with RTM locking");
1479   assert(tmpReg == rax, "");
1480   assert(scrReg == rdx, "");
1481   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1482 
1483   if (RTMRetryCount > 0) {
1484     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1485     bind(L_rtm_retry);
1486   }
1487   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
1488   testptr(tmpReg, markOopDesc::monitor_value);  // inflated vs stack-locked|neutral|biased
1489   jcc(Assembler::notZero, IsInflated);
1490 
1491   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1492     Label L_noincrement;
1493     if (RTMTotalCountIncrRate > 1) {
1494       // tmpReg, scrReg and flags are killed
1495       branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
1496     }
1497     assert(stack_rtm_counters != NULL, "should not be NULL when profiling RTM");
1498     atomic_incptr(ExternalAddress((address)stack_rtm_counters->total_count_addr()), scrReg);
1499     bind(L_noincrement);
1500   }
1501   xbegin(L_on_abort);
1502   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));       // fetch markword
1503   andptr(tmpReg, markOopDesc::biased_lock_mask_in_place); // look at 3 lock bits
1504   cmpptr(tmpReg, markOopDesc::unlocked_value);            // bits = 001 unlocked
1505   jcc(Assembler::equal, DONE_LABEL);        // all done if unlocked
1506 
1507   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1508   if (UseRTMXendForLockBusy) {
1509     xend();
1510     movptr(abort_status_Reg, 0x2);   // Set the abort status to 2 (so we can retry)
1511     jmp(L_decrement_retry);
1512   }
1513   else {
1514     xabort(0);
1515   }


1536                                           Label& DONE_LABEL) {
1537   assert(UseRTMLocking, "why call this otherwise?");
1538   assert(tmpReg == rax, "");
1539   assert(scrReg == rdx, "");
1540   Label L_rtm_retry, L_decrement_retry, L_on_abort;
1541   int owner_offset = OM_OFFSET_NO_MONITOR_VALUE_TAG(owner);
1542 
1543   // Without cast to int32_t a movptr will destroy r10 which is typically obj
1544   movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark()));
1545   movptr(boxReg, tmpReg); // Save ObjectMonitor address
1546 
1547   if (RTMRetryCount > 0) {
1548     movl(retry_on_busy_count_Reg, RTMRetryCount);  // Retry on lock busy
1549     movl(retry_on_abort_count_Reg, RTMRetryCount); // Retry on abort
1550     bind(L_rtm_retry);
1551   }
1552   if (PrintPreciseRTMLockingStatistics || profile_rtm) {
1553     Label L_noincrement;
1554     if (RTMTotalCountIncrRate > 1) {
1555       // tmpReg, scrReg and flags are killed
1556       branch_on_random_using_rdtsc(tmpReg, scrReg, RTMTotalCountIncrRate, L_noincrement);
1557     }
1558     assert(rtm_counters != NULL, "should not be NULL when profiling RTM");
1559     atomic_incptr(ExternalAddress((address)rtm_counters->total_count_addr()), scrReg);
1560     bind(L_noincrement);
1561   }
1562   xbegin(L_on_abort);
1563   movptr(tmpReg, Address(objReg, oopDesc::mark_offset_in_bytes()));
1564   movptr(tmpReg, Address(tmpReg, owner_offset));
1565   testptr(tmpReg, tmpReg);
1566   jcc(Assembler::zero, DONE_LABEL);
1567   if (UseRTMXendForLockBusy) {
1568     xend();
1569     jmp(L_decrement_retry);
1570   }
1571   else {
1572     xabort(0);
1573   }
1574   bind(L_on_abort);
1575   Register abort_status_Reg = tmpReg; // status of abort is stored in RAX
1576   if (PrintPreciseRTMLockingStatistics || profile_rtm) {


< prev index next >