< prev index next >

src/hotspot/share/runtime/synchronizer.cpp

Print this page




  94     }                                                                      \
  95   }
  96 
  97 #else //  ndef DTRACE_ENABLED
  98 
  99 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 100 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 101 
 102 #endif // ndef DTRACE_ENABLED
 103 
 104 // This exists only as a workaround of dtrace bug 6254741
 105 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 106   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 107   return 0;
 108 }
 109 
 110 #define NINFLATIONLOCKS 256
 111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 112 
 113 // global list of blocks of monitors
 114 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 115 // want to expose the PaddedEnd template more than necessary.
 116 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
 117 // global monitor free list
 118 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 119 // global monitor in-use list, for moribund threads,
 120 // monitors they inflated need to be scanned for deflation
 121 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 122 // count of entries in gOmInUseList
 123 int ObjectSynchronizer::gOmInUseCount = 0;
 124 
 125 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 126 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 127 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 128 
 129 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 130                                        const oop,
 131                                        const ObjectSynchronizer::InflateCause);
 132 
 133 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 134 
 135 
 136 // =====================> Quick functions


 224     // Case: TLE inimical operations such as nested/recursive synchronization
 225 
 226     if (owner == Self) {
 227       m->_recursions++;
 228       return true;
 229     }
 230 
 231     // This Java Monitor is inflated so obj's header will never be
 232     // displaced to this thread's BasicLock. Make the displaced header
 233     // non-NULL so this BasicLock is not seen as recursive nor as
 234     // being locked. We do this unconditionally so that this thread's
 235     // BasicLock cannot be mis-interpreted by any stack walkers. For
 236     // performance reasons, stack walkers generally first check for
 237     // Biased Locking in the object's header, the second check is for
 238     // stack-locking in the object's header, the third check is for
 239     // recursive stack-locking in the displaced header in the BasicLock,
 240     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 241     lock->set_displaced_header(markOopDesc::unused_mark());
 242 
 243     if (owner == NULL &&
 244         Atomic::cmpxchg_ptr(Self, &(m->_owner), NULL) == NULL) {
 245       assert(m->_recursions == 0, "invariant");
 246       assert(m->_owner == Self, "invariant");
 247       return true;
 248     }
 249   }
 250 
 251   // Note that we could inflate in quick_enter.
 252   // This is likely a useful optimization
 253   // Critically, in quick_enter() we must not:
 254   // -- perform bias revocation, or
 255   // -- block indefinitely, or
 256   // -- reach a safepoint
 257 
 258   return false;        // revert to slow-path
 259 }
 260 
 261 // -----------------------------------------------------------------------------
 262 //  Fast Monitor Enter/Exit
 263 // This the fast monitor enter. The interpreter and compiler use
 264 // some assembly copies of this code. Make sure update those code


 785     //   The displaced header is strictly immutable.
 786     // It can NOT be changed in ANY cases. So we have
 787     // to inflate the header into heavyweight monitor
 788     // even the current thread owns the lock. The reason
 789     // is the BasicLock (stack slot) will be asynchronously
 790     // read by other threads during the inflate() function.
 791     // Any change to stack may not propagate to other threads
 792     // correctly.
 793   }
 794 
 795   // Inflate the monitor to set hash code
 796   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 797   // Load displaced header and check it has hash code
 798   mark = monitor->header();
 799   assert(mark->is_neutral(), "invariant");
 800   hash = mark->hash();
 801   if (hash == 0) {
 802     hash = get_next_hash(Self, obj);
 803     temp = mark->copy_set_hash(hash); // merge hash code into header
 804     assert(temp->is_neutral(), "invariant");
 805     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 806     if (test != mark) {
 807       // The only update to the header in the monitor (outside GC)
 808       // is install the hash code. If someone add new usage of
 809       // displaced header, please update this code
 810       hash = test->hash();
 811       assert(test->is_neutral(), "invariant");
 812       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 813     }
 814   }
 815   // We finally get the hash
 816   return hash;
 817 }
 818 
 819 // Deprecated -- use FastHashCode() instead.
 820 
 821 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 822   return FastHashCode(Thread::current(), obj());
 823 }
 824 
 825 


 922     assert(monitor != NULL, "monitor should be non-null");
 923     owner = (address) monitor->owner();
 924   }
 925 
 926   if (owner != NULL) {
 927     // owning_thread_from_monitor_owner() may also return NULL here
 928     return Threads::owning_thread_from_monitor_owner(owner, doLock);
 929   }
 930 
 931   // Unlocked case, header in place
 932   // Cannot have assertion since this object may have been
 933   // locked by another thread when reaching here.
 934   // assert(mark->is_neutral(), "sanity check");
 935 
 936   return NULL;
 937 }
 938 
 939 // Visitors ...
 940 
 941 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 942   PaddedEnd<ObjectMonitor> * block =
 943     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
 944   while (block != NULL) {
 945     assert(block->object() == CHAINMARKER, "must be a block header");
 946     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 947       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
 948       oop object = (oop)mid->object();
 949       if (object != NULL) {
 950         closure->do_monitor(mid);
 951       }
 952     }
 953     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
 954   }
 955 }
 956 
 957 // Get the next block in the block list.
 958 static inline ObjectMonitor* next(ObjectMonitor* block) {
 959   assert(block->object() == CHAINMARKER, "must be a block header");
 960   block = block->FreeNext;
 961   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 962   return block;
 963 }


 974 bool ObjectSynchronizer::is_cleanup_needed() {
 975   if (MonitorUsedDeflationThreshold > 0) {
 976     return monitors_used_above_threshold();
 977   }
 978   return false;
 979 }
 980 
 981 void ObjectSynchronizer::oops_do(OopClosure* f) {
 982   if (MonitorInUseLists) {
 983     // When using thread local monitor lists, we only scan the
 984     // global used list here (for moribund threads), and
 985     // the thread-local monitors in Thread::oops_do().
 986     global_used_oops_do(f);
 987   } else {
 988     global_oops_do(f);
 989   }
 990 }
 991 
 992 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
 993   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 994   PaddedEnd<ObjectMonitor> * block =
 995     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
 996   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 997     assert(block->object() == CHAINMARKER, "must be a block header");
 998     for (int i = 1; i < _BLOCKSIZE; i++) {
 999       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
1000       if (mid->object() != NULL) {
1001         f->do_oop((oop*)mid->object_addr());
1002       }
1003     }
1004   }
1005 }
1006 
1007 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1008   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1009   list_oops_do(gOmInUseList, f);
1010 }
1011 
1012 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1013   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1014   list_oops_do(thread->omInUseList, f);
1015 }


1215 
1216     // Element [0] is reserved for global list linkage
1217     temp[0].set_object(CHAINMARKER);
1218 
1219     // Consider carving out this thread's current request from the
1220     // block in hand.  This avoids some lock traffic and redundant
1221     // list activity.
1222 
1223     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1224     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1225     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1226     gMonitorPopulation += _BLOCKSIZE-1;
1227     gMonitorFreeCount += _BLOCKSIZE-1;
1228 
1229     // Add the new block to the list of extant blocks (gBlockList).
1230     // The very first objectMonitor in a block is reserved and dedicated.
1231     // It serves as blocklist "next" linkage.
1232     temp[0].FreeNext = gBlockList;
1233     // There are lock-free uses of gBlockList so make sure that
1234     // the previous stores happen before we update gBlockList.
1235     OrderAccess::release_store_ptr(&gBlockList, temp);
1236 
1237     // Add the new string of objectMonitors to the global free list
1238     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1239     gFreeList = temp + 1;
1240     Thread::muxRelease(&gListLock);
1241     TEVENT(Allocate block of monitors);
1242   }
1243 }
1244 
1245 // Place "m" on the caller's private per-thread omFreeList.
1246 // In practice there's no need to clamp or limit the number of
1247 // monitors on a thread's omFreeList as the only time we'll call
1248 // omRelease is to return a monitor to the free list after a CAS
1249 // attempt failed.  This doesn't allow unbounded #s of monitors to
1250 // accumulate on a thread's free list.
1251 //
1252 // Key constraint: all ObjectMonitors on a thread's free list and the global
1253 // free list must have their object field set to null. This prevents the
1254 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1255 


1717   TEVENT(deflate_idle_monitors);
1718   // Prevent omFlush from changing mids in Thread dtor's during deflation
1719   // And in case the vm thread is acquiring a lock during a safepoint
1720   // See e.g. 6320749
1721   Thread::muxAcquire(&gListLock, "scavenge - return");
1722 
1723   if (MonitorInUseLists) {
1724     // Note: the thread-local monitors lists get deflated in
1725     // a separate pass. See deflate_thread_local_monitors().
1726 
1727     // For moribund threads, scan gOmInUseList
1728     if (gOmInUseList) {
1729       counters->nInCirculation += gOmInUseCount;
1730       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1731       gOmInUseCount -= deflated_count;
1732       counters->nScavenged += deflated_count;
1733       counters->nInuse += gOmInUseCount;
1734     }
1735 
1736   } else {
1737     PaddedEnd<ObjectMonitor> * block =
1738       (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1739     for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1740       // Iterate over all extant monitors - Scavenge all idle monitors.
1741       assert(block->object() == CHAINMARKER, "must be a block header");
1742       counters->nInCirculation += _BLOCKSIZE;
1743       for (int i = 1; i < _BLOCKSIZE; i++) {
1744         ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1745         oop obj = (oop)mid->object();
1746 
1747         if (obj == NULL) {
1748           // The monitor is not associated with an object.
1749           // The monitor should either be a thread-specific private
1750           // free list or the global free list.
1751           // obj == NULL IMPLIES mid->is_busy() == 0
1752           guarantee(!mid->is_busy(), "invariant");
1753           continue;
1754         }
1755         deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1756 
1757         if (deflated) {
1758           mid->FreeNext = NULL;


1952                     "line which permits false sharing.");
1953       (*warning_cnt_ptr)++;
1954     }
1955 
1956     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1957       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1958                     "to the struct end than a cache line which permits false "
1959                     "sharing.");
1960       (*warning_cnt_ptr)++;
1961     }
1962   }
1963 }
1964 
1965 #ifndef PRODUCT
1966 
1967 // Check if monitor belongs to the monitor cache
1968 // The list is grow-only so it's *relatively* safe to traverse
1969 // the list of extant blocks without taking a lock.
1970 
1971 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1972   PaddedEnd<ObjectMonitor> * block =
1973     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1974   while (block != NULL) {
1975     assert(block->object() == CHAINMARKER, "must be a block header");
1976     if (monitor > (ObjectMonitor *)&block[0] &&
1977         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1978       address mon = (address)monitor;
1979       address blk = (address)block;
1980       size_t diff = mon - blk;
1981       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1982       return 1;
1983     }
1984     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1985   }
1986   return 0;
1987 }
1988 
1989 #endif


  94     }                                                                      \
  95   }
  96 
  97 #else //  ndef DTRACE_ENABLED
  98 
  99 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
 100 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 101 
 102 #endif // ndef DTRACE_ENABLED
 103 
 104 // This exists only as a workaround of dtrace bug 6254741
 105 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 106   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 107   return 0;
 108 }
 109 
 110 #define NINFLATIONLOCKS 256
 111 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 112 
 113 // global list of blocks of monitors
 114 PaddedEnd<ObjectMonitor> * volatile ObjectSynchronizer::gBlockList = NULL;


 115 // global monitor free list
 116 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 117 // global monitor in-use list, for moribund threads,
 118 // monitors they inflated need to be scanned for deflation
 119 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 120 // count of entries in gOmInUseList
 121 int ObjectSynchronizer::gOmInUseCount = 0;
 122 
 123 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 124 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 125 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 126 
 127 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 128                                        const oop,
 129                                        const ObjectSynchronizer::InflateCause);
 130 
 131 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 132 
 133 
 134 // =====================> Quick functions


 222     // Case: TLE inimical operations such as nested/recursive synchronization
 223 
 224     if (owner == Self) {
 225       m->_recursions++;
 226       return true;
 227     }
 228 
 229     // This Java Monitor is inflated so obj's header will never be
 230     // displaced to this thread's BasicLock. Make the displaced header
 231     // non-NULL so this BasicLock is not seen as recursive nor as
 232     // being locked. We do this unconditionally so that this thread's
 233     // BasicLock cannot be mis-interpreted by any stack walkers. For
 234     // performance reasons, stack walkers generally first check for
 235     // Biased Locking in the object's header, the second check is for
 236     // stack-locking in the object's header, the third check is for
 237     // recursive stack-locking in the displaced header in the BasicLock,
 238     // and last are the inflated Java Monitor (ObjectMonitor) checks.
 239     lock->set_displaced_header(markOopDesc::unused_mark());
 240 
 241     if (owner == NULL &&
 242         Atomic::cmpxchg((void*)Self, &(m->_owner), (void*)NULL) == NULL) {
 243       assert(m->_recursions == 0, "invariant");
 244       assert(m->_owner == Self, "invariant");
 245       return true;
 246     }
 247   }
 248 
 249   // Note that we could inflate in quick_enter.
 250   // This is likely a useful optimization
 251   // Critically, in quick_enter() we must not:
 252   // -- perform bias revocation, or
 253   // -- block indefinitely, or
 254   // -- reach a safepoint
 255 
 256   return false;        // revert to slow-path
 257 }
 258 
 259 // -----------------------------------------------------------------------------
 260 //  Fast Monitor Enter/Exit
 261 // This the fast monitor enter. The interpreter and compiler use
 262 // some assembly copies of this code. Make sure update those code


 783     //   The displaced header is strictly immutable.
 784     // It can NOT be changed in ANY cases. So we have
 785     // to inflate the header into heavyweight monitor
 786     // even the current thread owns the lock. The reason
 787     // is the BasicLock (stack slot) will be asynchronously
 788     // read by other threads during the inflate() function.
 789     // Any change to stack may not propagate to other threads
 790     // correctly.
 791   }
 792 
 793   // Inflate the monitor to set hash code
 794   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 795   // Load displaced header and check it has hash code
 796   mark = monitor->header();
 797   assert(mark->is_neutral(), "invariant");
 798   hash = mark->hash();
 799   if (hash == 0) {
 800     hash = get_next_hash(Self, obj);
 801     temp = mark->copy_set_hash(hash); // merge hash code into header
 802     assert(temp->is_neutral(), "invariant");
 803     test = Atomic::cmpxchg(temp, monitor->header_addr(), mark);
 804     if (test != mark) {
 805       // The only update to the header in the monitor (outside GC)
 806       // is install the hash code. If someone add new usage of
 807       // displaced header, please update this code
 808       hash = test->hash();
 809       assert(test->is_neutral(), "invariant");
 810       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 811     }
 812   }
 813   // We finally get the hash
 814   return hash;
 815 }
 816 
 817 // Deprecated -- use FastHashCode() instead.
 818 
 819 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 820   return FastHashCode(Thread::current(), obj());
 821 }
 822 
 823 


 920     assert(monitor != NULL, "monitor should be non-null");
 921     owner = (address) monitor->owner();
 922   }
 923 
 924   if (owner != NULL) {
 925     // owning_thread_from_monitor_owner() may also return NULL here
 926     return Threads::owning_thread_from_monitor_owner(owner, doLock);
 927   }
 928 
 929   // Unlocked case, header in place
 930   // Cannot have assertion since this object may have been
 931   // locked by another thread when reaching here.
 932   // assert(mark->is_neutral(), "sanity check");
 933 
 934   return NULL;
 935 }
 936 
 937 // Visitors ...
 938 
 939 void ObjectSynchronizer::monitors_iterate(MonitorClosure* closure) {
 940   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);

 941   while (block != NULL) {
 942     assert(block->object() == CHAINMARKER, "must be a block header");
 943     for (int i = _BLOCKSIZE - 1; i > 0; i--) {
 944       ObjectMonitor* mid = (ObjectMonitor *)(block + i);
 945       oop object = (oop)mid->object();
 946       if (object != NULL) {
 947         closure->do_monitor(mid);
 948       }
 949     }
 950     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
 951   }
 952 }
 953 
 954 // Get the next block in the block list.
 955 static inline ObjectMonitor* next(ObjectMonitor* block) {
 956   assert(block->object() == CHAINMARKER, "must be a block header");
 957   block = block->FreeNext;
 958   assert(block == NULL || block->object() == CHAINMARKER, "must be a block header");
 959   return block;
 960 }


 971 bool ObjectSynchronizer::is_cleanup_needed() {
 972   if (MonitorUsedDeflationThreshold > 0) {
 973     return monitors_used_above_threshold();
 974   }
 975   return false;
 976 }
 977 
 978 void ObjectSynchronizer::oops_do(OopClosure* f) {
 979   if (MonitorInUseLists) {
 980     // When using thread local monitor lists, we only scan the
 981     // global used list here (for moribund threads), and
 982     // the thread-local monitors in Thread::oops_do().
 983     global_used_oops_do(f);
 984   } else {
 985     global_oops_do(f);
 986   }
 987 }
 988 
 989 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
 990   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 991   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);

 992   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 993     assert(block->object() == CHAINMARKER, "must be a block header");
 994     for (int i = 1; i < _BLOCKSIZE; i++) {
 995       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 996       if (mid->object() != NULL) {
 997         f->do_oop((oop*)mid->object_addr());
 998       }
 999     }
1000   }
1001 }
1002 
1003 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1004   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1005   list_oops_do(gOmInUseList, f);
1006 }
1007 
1008 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1009   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1010   list_oops_do(thread->omInUseList, f);
1011 }


1211 
1212     // Element [0] is reserved for global list linkage
1213     temp[0].set_object(CHAINMARKER);
1214 
1215     // Consider carving out this thread's current request from the
1216     // block in hand.  This avoids some lock traffic and redundant
1217     // list activity.
1218 
1219     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1220     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1221     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1222     gMonitorPopulation += _BLOCKSIZE-1;
1223     gMonitorFreeCount += _BLOCKSIZE-1;
1224 
1225     // Add the new block to the list of extant blocks (gBlockList).
1226     // The very first objectMonitor in a block is reserved and dedicated.
1227     // It serves as blocklist "next" linkage.
1228     temp[0].FreeNext = gBlockList;
1229     // There are lock-free uses of gBlockList so make sure that
1230     // the previous stores happen before we update gBlockList.
1231     OrderAccess::release_store(&gBlockList, temp);
1232 
1233     // Add the new string of objectMonitors to the global free list
1234     temp[_BLOCKSIZE - 1].FreeNext = gFreeList;
1235     gFreeList = temp + 1;
1236     Thread::muxRelease(&gListLock);
1237     TEVENT(Allocate block of monitors);
1238   }
1239 }
1240 
1241 // Place "m" on the caller's private per-thread omFreeList.
1242 // In practice there's no need to clamp or limit the number of
1243 // monitors on a thread's omFreeList as the only time we'll call
1244 // omRelease is to return a monitor to the free list after a CAS
1245 // attempt failed.  This doesn't allow unbounded #s of monitors to
1246 // accumulate on a thread's free list.
1247 //
1248 // Key constraint: all ObjectMonitors on a thread's free list and the global
1249 // free list must have their object field set to null. This prevents the
1250 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1251 


1713   TEVENT(deflate_idle_monitors);
1714   // Prevent omFlush from changing mids in Thread dtor's during deflation
1715   // And in case the vm thread is acquiring a lock during a safepoint
1716   // See e.g. 6320749
1717   Thread::muxAcquire(&gListLock, "scavenge - return");
1718 
1719   if (MonitorInUseLists) {
1720     // Note: the thread-local monitors lists get deflated in
1721     // a separate pass. See deflate_thread_local_monitors().
1722 
1723     // For moribund threads, scan gOmInUseList
1724     if (gOmInUseList) {
1725       counters->nInCirculation += gOmInUseCount;
1726       int deflated_count = deflate_monitor_list((ObjectMonitor **)&gOmInUseList, &freeHeadp, &freeTailp);
1727       gOmInUseCount -= deflated_count;
1728       counters->nScavenged += deflated_count;
1729       counters->nInuse += gOmInUseCount;
1730     }
1731 
1732   } else {
1733     PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);

1734     for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1735       // Iterate over all extant monitors - Scavenge all idle monitors.
1736       assert(block->object() == CHAINMARKER, "must be a block header");
1737       counters->nInCirculation += _BLOCKSIZE;
1738       for (int i = 1; i < _BLOCKSIZE; i++) {
1739         ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1740         oop obj = (oop)mid->object();
1741 
1742         if (obj == NULL) {
1743           // The monitor is not associated with an object.
1744           // The monitor should either be a thread-specific private
1745           // free list or the global free list.
1746           // obj == NULL IMPLIES mid->is_busy() == 0
1747           guarantee(!mid->is_busy(), "invariant");
1748           continue;
1749         }
1750         deflated = deflate_monitor(mid, obj, &freeHeadp, &freeTailp);
1751 
1752         if (deflated) {
1753           mid->FreeNext = NULL;


1947                     "line which permits false sharing.");
1948       (*warning_cnt_ptr)++;
1949     }
1950 
1951     if ((sizeof(SharedGlobals) - offset_hcSequence) < cache_line_size) {
1952       tty->print_cr("WARNING: the SharedGlobals.hcSequence field is closer "
1953                     "to the struct end than a cache line which permits false "
1954                     "sharing.");
1955       (*warning_cnt_ptr)++;
1956     }
1957   }
1958 }
1959 
1960 #ifndef PRODUCT
1961 
1962 // Check if monitor belongs to the monitor cache
1963 // The list is grow-only so it's *relatively* safe to traverse
1964 // the list of extant blocks without taking a lock.
1965 
1966 int ObjectSynchronizer::verify_objmon_isinpool(ObjectMonitor *monitor) {
1967   PaddedEnd<ObjectMonitor> * block = OrderAccess::load_acquire(&gBlockList);

1968   while (block != NULL) {
1969     assert(block->object() == CHAINMARKER, "must be a block header");
1970     if (monitor > (ObjectMonitor *)&block[0] &&
1971         monitor < (ObjectMonitor *)&block[_BLOCKSIZE]) {
1972       address mon = (address)monitor;
1973       address blk = (address)block;
1974       size_t diff = mon - blk;
1975       assert((diff % sizeof(PaddedEnd<ObjectMonitor>)) == 0, "must be aligned");
1976       return 1;
1977     }
1978     block = (PaddedEnd<ObjectMonitor> *)block->FreeNext;
1979   }
1980   return 0;
1981 }
1982 
1983 #endif
< prev index next >