< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page
rev 51786 : imported patch syncknobs-06-Knob_Verbose
rev 51787 : imported patch syncknobs-07-Knob_VerifyInUse


1082   const int MAXPRIVATE = 1024;
1083   for (;;) {
1084     ObjectMonitor * m;
1085 
1086     // 1: try to allocate from the thread's local omFreeList.
1087     // Threads will attempt to allocate first from their local list, then
1088     // from the global list, and only after those attempts fail will the thread
1089     // attempt to instantiate new monitors.   Thread-local free lists take
1090     // heat off the gListLock and improve allocation latency, as well as reducing
1091     // coherency traffic on the shared global list.
1092     m = Self->omFreeList;
1093     if (m != NULL) {
1094       Self->omFreeList = m->FreeNext;
1095       Self->omFreeCount--;
1096       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1097       guarantee(m->object() == NULL, "invariant");
1098       if (MonitorInUseLists) {
1099         m->FreeNext = Self->omInUseList;
1100         Self->omInUseList = m;
1101         Self->omInUseCount++;
1102         if (ObjectMonitor::Knob_VerifyInUse) {
1103           verifyInUse(Self);
1104         }
1105       } else {
1106         m->FreeNext = NULL;
1107       }
1108       return m;
1109     }
1110 
1111     // 2: try to allocate from the global gFreeList
1112     // CONSIDER: use muxTry() instead of muxAcquire().
1113     // If the muxTry() fails then drop immediately into case 3.
1114     // If we're using thread-local free lists then try
1115     // to reprovision the caller's free list.
1116     if (gFreeList != NULL) {
1117       // Reprovision the thread's omFreeList.
1118       // Use bulk transfers to reduce the allocation rate and heat
1119       // on various locks.
1120       Thread::muxAcquire(&gListLock, "omAlloc");
1121       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1122         gMonitorFreeCount--;
1123         ObjectMonitor * take = gFreeList;
1124         gFreeList = take->FreeNext;


1222 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1223 
1224 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1225                                    bool fromPerThreadAlloc) {
1226   guarantee(m->object() == NULL, "invariant");
1227   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1228   // Remove from omInUseList
1229   if (MonitorInUseLists && fromPerThreadAlloc) {
1230     ObjectMonitor* cur_mid_in_use = NULL;
1231     bool extracted = false;
1232     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1233       if (m == mid) {
1234         // extract from per-thread in-use list
1235         if (mid == Self->omInUseList) {
1236           Self->omInUseList = mid->FreeNext;
1237         } else if (cur_mid_in_use != NULL) {
1238           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1239         }
1240         extracted = true;
1241         Self->omInUseCount--;
1242         if (ObjectMonitor::Knob_VerifyInUse) {
1243           verifyInUse(Self);
1244         }
1245         break;
1246       }
1247     }
1248     assert(extracted, "Should have extracted from in-use list");
1249   }
1250 
1251   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1252   m->FreeNext = Self->omFreeList;
1253   Self->omFreeList = m;
1254   Self->omFreeCount++;
1255 }
1256 
1257 // Return the monitors of a moribund thread's local free list to
1258 // the global free list.  Typically a thread calls omFlush() when
1259 // it's dying.  We could also consider having the VM thread steal
1260 // monitors from threads that have not run java code over a few
1261 // consecutive STW safepoints.  Relatedly, we might decay
1262 // omFreeProvision at STW safepoints.
1263 //
1264 // Also return the monitors of a moribund thread's omInUseList to


1760   // TODO: Add objectMonitor leak detection.
1761   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1762   GVars.stwRandom = os::random();
1763   GVars.stwCycle++;
1764 }
1765 
1766 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1767   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1768   if (!MonitorInUseLists) return;
1769 
1770   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1771   ObjectMonitor * freeTailp = NULL;
1772 
1773   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
1774 
1775   Thread::muxAcquire(&gListLock, "scavenge - return");
1776 
1777   // Adjust counters
1778   counters->nInCirculation += thread->omInUseCount;
1779   thread->omInUseCount -= deflated_count;
1780   if (ObjectMonitor::Knob_VerifyInUse) {
1781     verifyInUse(thread);
1782   }
1783   counters->nScavenged += deflated_count;
1784   counters->nInuse += thread->omInUseCount;
1785 
1786   // Move the scavenged monitors back to the global free list.
1787   if (freeHeadp != NULL) {
1788     guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
1789     assert(freeTailp->FreeNext == NULL, "invariant");
1790 
1791     // constant-time list splice - prepend scavenged segment to gFreeList
1792     freeTailp->FreeNext = gFreeList;
1793     gFreeList = freeHeadp;
1794   }
1795   Thread::muxRelease(&gListLock);
1796 }
1797 
1798 // Monitor cleanup on JavaThread::exit
1799 
1800 // Iterate through monitor cache and attempt to release thread's monitors
1801 // Gives up on a particular monitor if an exception occurs, but continues
1802 // the overall iteration, swallowing the exception.




1082   const int MAXPRIVATE = 1024;
1083   for (;;) {
1084     ObjectMonitor * m;
1085 
1086     // 1: try to allocate from the thread's local omFreeList.
1087     // Threads will attempt to allocate first from their local list, then
1088     // from the global list, and only after those attempts fail will the thread
1089     // attempt to instantiate new monitors.   Thread-local free lists take
1090     // heat off the gListLock and improve allocation latency, as well as reducing
1091     // coherency traffic on the shared global list.
1092     m = Self->omFreeList;
1093     if (m != NULL) {
1094       Self->omFreeList = m->FreeNext;
1095       Self->omFreeCount--;
1096       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1097       guarantee(m->object() == NULL, "invariant");
1098       if (MonitorInUseLists) {
1099         m->FreeNext = Self->omInUseList;
1100         Self->omInUseList = m;
1101         Self->omInUseCount++;



1102       } else {
1103         m->FreeNext = NULL;
1104       }
1105       return m;
1106     }
1107 
1108     // 2: try to allocate from the global gFreeList
1109     // CONSIDER: use muxTry() instead of muxAcquire().
1110     // If the muxTry() fails then drop immediately into case 3.
1111     // If we're using thread-local free lists then try
1112     // to reprovision the caller's free list.
1113     if (gFreeList != NULL) {
1114       // Reprovision the thread's omFreeList.
1115       // Use bulk transfers to reduce the allocation rate and heat
1116       // on various locks.
1117       Thread::muxAcquire(&gListLock, "omAlloc");
1118       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1119         gMonitorFreeCount--;
1120         ObjectMonitor * take = gFreeList;
1121         gFreeList = take->FreeNext;


1219 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1220 
1221 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1222                                    bool fromPerThreadAlloc) {
1223   guarantee(m->object() == NULL, "invariant");
1224   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1225   // Remove from omInUseList
1226   if (MonitorInUseLists && fromPerThreadAlloc) {
1227     ObjectMonitor* cur_mid_in_use = NULL;
1228     bool extracted = false;
1229     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1230       if (m == mid) {
1231         // extract from per-thread in-use list
1232         if (mid == Self->omInUseList) {
1233           Self->omInUseList = mid->FreeNext;
1234         } else if (cur_mid_in_use != NULL) {
1235           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1236         }
1237         extracted = true;
1238         Self->omInUseCount--;



1239         break;
1240       }
1241     }
1242     assert(extracted, "Should have extracted from in-use list");
1243   }
1244 
1245   // FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
1246   m->FreeNext = Self->omFreeList;
1247   Self->omFreeList = m;
1248   Self->omFreeCount++;
1249 }
1250 
1251 // Return the monitors of a moribund thread's local free list to
1252 // the global free list.  Typically a thread calls omFlush() when
1253 // it's dying.  We could also consider having the VM thread steal
1254 // monitors from threads that have not run java code over a few
1255 // consecutive STW safepoints.  Relatedly, we might decay
1256 // omFreeProvision at STW safepoints.
1257 //
1258 // Also return the monitors of a moribund thread's omInUseList to


1754   // TODO: Add objectMonitor leak detection.
1755   // Audit/inventory the objectMonitors -- make sure they're all accounted for.
1756   GVars.stwRandom = os::random();
1757   GVars.stwCycle++;
1758 }
1759 
1760 void ObjectSynchronizer::deflate_thread_local_monitors(Thread* thread, DeflateMonitorCounters* counters) {
1761   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1762   if (!MonitorInUseLists) return;
1763 
1764   ObjectMonitor * freeHeadp = NULL;  // Local SLL of scavenged monitors
1765   ObjectMonitor * freeTailp = NULL;
1766 
1767   int deflated_count = deflate_monitor_list(thread->omInUseList_addr(), &freeHeadp, &freeTailp);
1768 
1769   Thread::muxAcquire(&gListLock, "scavenge - return");
1770 
1771   // Adjust counters
1772   counters->nInCirculation += thread->omInUseCount;
1773   thread->omInUseCount -= deflated_count;



1774   counters->nScavenged += deflated_count;
1775   counters->nInuse += thread->omInUseCount;
1776 
1777   // Move the scavenged monitors back to the global free list.
1778   if (freeHeadp != NULL) {
1779     guarantee(freeTailp != NULL && deflated_count > 0, "invariant");
1780     assert(freeTailp->FreeNext == NULL, "invariant");
1781 
1782     // constant-time list splice - prepend scavenged segment to gFreeList
1783     freeTailp->FreeNext = gFreeList;
1784     gFreeList = freeHeadp;
1785   }
1786   Thread::muxRelease(&gListLock);
1787 }
1788 
1789 // Monitor cleanup on JavaThread::exit
1790 
1791 // Iterate through monitor cache and attempt to release thread's monitors
1792 // Gives up on a particular monitor if an exception occurs, but continues
1793 // the overall iteration, swallowing the exception.


< prev index next >