< prev index next >

src/share/vm/runtime/synchronizer.cpp

Print this page
rev 13054 : imported patch monitor_deflate_conc


  98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 100 
 101 #endif // ndef DTRACE_ENABLED
 102 
 103 // This exists only as a workaround of dtrace bug 6254741
 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 105   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 106   return 0;
 107 }
 108 
 109 #define NINFLATIONLOCKS 256
 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 111 
 112 // global list of blocks of monitors
 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 114 // want to expose the PaddedEnd template more than necessary.
 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
 116 // global monitor free list
 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;


 118 // global monitor in-use list, for moribund threads,
 119 // monitors they inflated need to be scanned for deflation
 120 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 121 // count of entries in gOmInUseList
 122 int ObjectSynchronizer::gOmInUseCount = 0;
 123 


 124 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 125 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList

 126 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 127 
 128 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 129                                        const oop,
 130                                        const ObjectSynchronizer::InflateCause);
 131 
 132 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 133 
 134 
 135 // =====================> Quick functions
 136 
 137 // The quick_* forms are special fast-path variants used to improve
 138 // performance.  In the simplest case, a "quick_*" implementation could
 139 // simply return false, in which case the caller will perform the necessary
 140 // state transitions and call the slow-path form.
 141 // The fast-path is designed to handle frequently arising cases in an efficient
 142 // manner and is just a degenerate "optimistic" variant of the slow-path.
 143 // returns true  -- to indicate the call was satisfied.
 144 // returns false -- to indicate the call needs the services of the slow-path.
 145 // A no-loitering ordinance is in effect for code in the quick_* family


 323     // swing the displaced header from the BasicLock back to the mark.
 324     assert(dhw->is_neutral(), "invariant");
 325     if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
 326       TEVENT(fast_exit: release stack-lock);
 327       return;
 328     }
 329   }
 330 
 331   // We have to take the slow-path of possible inflation and then exit.
 332   ObjectSynchronizer::inflate(THREAD,
 333                               object,
 334                               inflate_cause_vm_internal)->exit(true, THREAD);
 335 }
 336 
 337 // -----------------------------------------------------------------------------
 338 // Interpreter/Compiler Slow Case
 339 // This routine is used to handle interpreter/compiler slow case
 340 // We don't need to use fast path here, because it must have been
 341 // failed in the interpreter/compiler code.
 342 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {

 343   markOop mark = obj->mark();
 344   assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 345 
 346   if (mark->is_neutral()) {
 347     // Anticipate successful CAS -- the ST of the displaced mark must
 348     // be visible <= the ST performed by the CAS.
 349     lock->set_displaced_header(mark);
 350     if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 351       TEVENT(slow_enter: release stacklock);
 352       return;
 353     }
 354     // Fall through to inflate() ...
 355   } else if (mark->has_locker() &&
 356              THREAD->is_lock_owned((address)mark->locker())) {
 357     assert(lock != mark->locker(), "must not re-lock the same lock");
 358     assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 359     lock->set_displaced_header(NULL);
 360     return;
 361   }
 362 
 363   // The object header will never be displaced to this lock,
 364   // so it does not matter what the value is, except that it
 365   // must be non-zero to avoid looking like a re-entrant lock,
 366   // and must not look locked either.
 367   lock->set_displaced_header(markOopDesc::unused_mark());
 368   ObjectSynchronizer::inflate(THREAD,
 369                               obj(),
 370                               inflate_cause_monitor_enter)->enter(THREAD);
 371 }
 372 
 373 // This routine is used to handle interpreter/compiler slow case
 374 // We don't need to use fast path here, because it must have
 375 // failed in the interpreter/compiler code. Simply use the heavy
 376 // weight monitor should be ok, unless someone find otherwise.
 377 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 378   fast_exit(object, lock, THREAD);
 379 }
 380 
 381 // -----------------------------------------------------------------------------
 382 // Class Loader  support to workaround deadlocks on the class loader lock objects
 383 // Also used by GC
 384 // complete_exit()/reenter() are used to wait on a nested lock
 385 // i.e. to give up an outer lock completely and then re-enter
 386 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 387 //  1) complete_exit lock1 - saving recursion count
 388 //  2) wait on lock2
 389 //  3) when notified on lock2, unlock lock2
 390 //  4) reenter lock1 with original recursion count


 395   if (UseBiasedLocking) {
 396     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 397     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 398   }
 399 
 400   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 401                                                        obj(),
 402                                                        inflate_cause_vm_internal);
 403 
 404   return monitor->complete_exit(THREAD);
 405 }
 406 
 407 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 408 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 409   TEVENT(reenter);
 410   if (UseBiasedLocking) {
 411     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 412     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 413   }
 414 
 415   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,


 416                                                        obj(),
 417                                                        inflate_cause_vm_internal);
 418 
 419   monitor->reenter(recursion, THREAD);
 420 }
 421 // -----------------------------------------------------------------------------
 422 // JNI locks on java objects
 423 // NOTE: must use heavy weight monitor to handle jni monitor enter
 424 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 425   // the current locking is from JNI instead of Java code
 426   TEVENT(jni_enter);
 427   if (UseBiasedLocking) {
 428     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 429     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 430   }
 431   THREAD->set_current_pending_monitor_is_from_java(false);
 432   ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD);
 433   THREAD->set_current_pending_monitor_is_from_java(true);
 434 }
 435 
 436 // NOTE: must use heavy weight monitor to handle jni monitor exit
 437 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 438   TEVENT(jni_exit);
 439   if (UseBiasedLocking) {
 440     Handle h_obj(THREAD, obj);
 441     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 442     obj = h_obj();
 443   }
 444   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 445 
 446   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 447                                                        obj,
 448                                                        inflate_cause_jni_exit);
 449   // If this thread has locked the object, exit the monitor.  Note:  can't use
 450   // monitor->check(CHECK); must exit even if an exception is pending.
 451   if (monitor->check(THREAD)) {
 452     monitor->exit(true, THREAD);


 695     // likely make this the default in future releases.
 696     unsigned t = Self->_hashStateX;
 697     t ^= (t << 11);
 698     Self->_hashStateX = Self->_hashStateY;
 699     Self->_hashStateY = Self->_hashStateZ;
 700     Self->_hashStateZ = Self->_hashStateW;
 701     unsigned v = Self->_hashStateW;
 702     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 703     Self->_hashStateW = v;
 704     value = v;
 705   }
 706 
 707   value &= markOopDesc::hash_mask;
 708   if (value == 0) value = 0xBAD;
 709   assert(value != markOopDesc::no_hash, "invariant");
 710   TEVENT(hashCode: GENERATE);
 711   return value;
 712 }
 713 
 714 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {

 715   if (UseBiasedLocking) {
 716     // NOTE: many places throughout the JVM do not expect a safepoint
 717     // to be taken here, in particular most operations on perm gen
 718     // objects. However, we only ever bias Java instances and all of
 719     // the call sites of identity_hash that might revoke biases have
 720     // been checked to make sure they can handle a safepoint. The
 721     // added check of the bias pattern is to avoid useless calls to
 722     // thread-local storage.
 723     if (obj->mark()->has_bias_pattern()) {
 724       // Handle for oop obj in case of STW safepoint
 725       Handle hobj(Self, obj);
 726       // Relaxing assertion for bug 6320749.
 727       assert(Universe::verify_in_progress() ||
 728              !SafepointSynchronize::is_at_safepoint(),
 729              "biases should not be seen by VM thread here");
 730       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 731       obj = hobj();
 732       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 733     }
 734   }


 751   assert(!mark->has_bias_pattern(), "invariant");
 752 
 753   if (mark->is_neutral()) {
 754     hash = mark->hash();              // this is a normal header
 755     if (hash) {                       // if it has hash, just return it
 756       return hash;
 757     }
 758     hash = get_next_hash(Self, obj);  // allocate a new hash code
 759     temp = mark->copy_set_hash(hash); // merge the hash code into header
 760     // use (machine word version) atomic operation to install the hash
 761     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 762     if (test == mark) {
 763       return hash;
 764     }
 765     // If atomic operation failed, we must inflate the header
 766     // into heavy weight monitor. We could add more code here
 767     // for fast path, but it does not worth the complexity.
 768   } else if (mark->has_monitor()) {
 769     monitor = mark->monitor();
 770     temp = monitor->header();
 771     assert(temp->is_neutral(), "invariant");
 772     hash = temp->hash();
 773     if (hash) {
 774       return hash;
 775     }
 776     // Skip to the following code to reduce code size
 777   } else if (Self->is_lock_owned((address)mark->locker())) {
 778     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 779     assert(temp->is_neutral(), "invariant");
 780     hash = temp->hash();              // by current thread, check if the displaced
 781     if (hash) {                       // header contains hash code
 782       return hash;
 783     }
 784     // WARNING:
 785     //   The displaced header is strictly immutable.
 786     // It can NOT be changed in ANY cases. So we have
 787     // to inflate the header into heavyweight monitor
 788     // even the current thread owns the lock. The reason
 789     // is the BasicLock (stack slot) will be asynchronously
 790     // read by other threads during the inflate() function.
 791     // Any change to stack may not propagate to other threads
 792     // correctly.
 793   }
 794 
 795   // Inflate the monitor to set hash code
 796   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 797   // Load displaced header and check it has hash code
 798   mark = monitor->header();
 799   assert(mark->is_neutral(), "invariant");
 800   hash = mark->hash();
 801   if (hash == 0) {
 802     hash = get_next_hash(Self, obj);
 803     temp = mark->copy_set_hash(hash); // merge hash code into header
 804     assert(temp->is_neutral(), "invariant");











 805     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 806     if (test != mark) {
 807       // The only update to the header in the monitor (outside GC)
 808       // is install the hash code. If someone add new usage of
 809       // displaced header, please update this code






 810       hash = test->hash();
 811       assert(test->is_neutral(), "invariant");
 812       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 813     }
 814   }
 815   // We finally get the hash
 816   return hash;
 817 }
 818 
 819 // Deprecated -- use FastHashCode() instead.
 820 
 821 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 822   return FastHashCode(Thread::current(), obj());
 823 }
 824 
 825 
 826 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 827                                                    Handle h_obj) {
 828   if (UseBiasedLocking) {
 829     BiasedLocking::revoke_and_rebias(h_obj, false, thread);


 965 
 966 void ObjectSynchronizer::oops_do(OopClosure* f) {
 967   if (MonitorInUseLists) {
 968     // When using thread local monitor lists, we only scan the
 969     // global used list here (for moribund threads), and
 970     // the thread-local monitors in Thread::oops_do().
 971     global_used_oops_do(f);
 972   } else {
 973     global_oops_do(f);
 974   }
 975 }
 976 
 977 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
 978   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 979   PaddedEnd<ObjectMonitor> * block =
 980     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
 981   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
 982     assert(block->object() == CHAINMARKER, "must be a block header");
 983     for (int i = 1; i < _BLOCKSIZE; i++) {
 984       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
 985       if (mid->object() != NULL) {

 986         f->do_oop((oop*)mid->object_addr());
 987       }
 988     }
 989   }
 990 }
 991 
 992 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
 993   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 994   list_oops_do(gOmInUseList, f);
 995 }
 996 
 997 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
 998   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
 999   list_oops_do(thread->omInUseList, f);
1000 }
1001 
1002 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1003   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1004   ObjectMonitor* mid;
1005   for (mid = list; mid != NULL; mid = mid->FreeNext) {


1061     }
1062     // Induce a 'null' safepoint to scavenge monitors
1063     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1064     // to the VMthread and have a lifespan longer than that of this activation record.
1065     // The VMThread will delete the op when completed.
1066     VMThread::execute(new VM_ScavengeMonitors());
1067 
1068     if (ObjectMonitor::Knob_Verbose) {
1069       tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)",
1070                     Whence, ForceMonitorScavenge) ;
1071       tty->flush();
1072     }
1073   }
1074 }
1075 
1076 void ObjectSynchronizer::verifyInUse(Thread *Self) {
1077   ObjectMonitor* mid;
1078   int in_use_tally = 0;
1079   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
1080     in_use_tally++;

1081   }
1082   assert(in_use_tally == Self->omInUseCount, "in-use count off");
1083 
1084   int free_tally = 0;
1085   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
1086     free_tally++;

1087   }
1088   assert(free_tally == Self->omFreeCount, "free count off");
1089 }
1090 
1091 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1092   // A large MAXPRIVATE value reduces both list lock contention
1093   // and list coherency traffic, but also tends to increase the
1094   // number of objectMonitors in circulation as well as the STW
1095   // scavenge costs.  As usual, we lean toward time in space-time
1096   // tradeoffs.
1097   const int MAXPRIVATE = 1024;
1098   for (;;) {
1099     ObjectMonitor * m;
1100 
1101     // 1: try to allocate from the thread's local omFreeList.
1102     // Threads will attempt to allocate first from their local list, then
1103     // from the global list, and only after those attempts fail will the thread
1104     // attempt to instantiate new monitors.   Thread-local free lists take
1105     // heat off the gListLock and improve allocation latency, as well as reducing
1106     // coherency traffic on the shared global list.
1107     m = Self->omFreeList;
1108     if (m != NULL) {
1109       Self->omFreeList = m->FreeNext;
1110       Self->omFreeCount--;
1111       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1112       guarantee(m->object() == NULL, "invariant");

1113       if (MonitorInUseLists) {
1114         m->FreeNext = Self->omInUseList;
1115         Self->omInUseList = m;
1116         Self->omInUseCount++;
1117         if (ObjectMonitor::Knob_VerifyInUse) {
1118           verifyInUse(Self);
1119         }
1120       } else {
1121         m->FreeNext = NULL;
1122       }

1123       return m;
1124     }
1125 
1126     // 2: try to allocate from the global gFreeList
1127     // CONSIDER: use muxTry() instead of muxAcquire().
1128     // If the muxTry() fails then drop immediately into case 3.
1129     // If we're using thread-local free lists then try
1130     // to reprovision the caller's free list.
1131     if (gFreeList != NULL) {
1132       // Reprovision the thread's omFreeList.
1133       // Use bulk transfers to reduce the allocation rate and heat
1134       // on various locks.
1135       Thread::muxAcquire(&gListLock, "omAlloc");
1136       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1137         gMonitorFreeCount--;
1138         ObjectMonitor * take = gFreeList;
1139         gFreeList = take->FreeNext;
1140         guarantee(take->object() == NULL, "invariant");


1141         guarantee(!take->is_busy(), "invariant");
1142         take->Recycle();

1143         omRelease(Self, take, false);
1144       }
1145       Thread::muxRelease(&gListLock);
1146       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1147       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1148       TEVENT(omFirst - reprovision);
1149 
1150       const int mx = MonitorBound;
1151       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1152         // We can't safely induce a STW safepoint from omAlloc() as our thread
1153         // state may not be appropriate for such activities and callers may hold
1154         // naked oops, so instead we defer the action.
1155         InduceScavenge(Self, "omAlloc");
1156       }
1157       continue;
1158     }
1159 
1160     // 3: allocate a block of new ObjectMonitors
1161     // Both the local and global free lists are empty -- resort to malloc().
1162     // In the current implementation objectMonitors are TSM - immortal.


1177 
1178     // NOTE: (almost) no way to recover if allocation failed.
1179     // We might be able to induce a STW safepoint and scavenge enough
1180     // objectMonitors to permit progress.
1181     if (temp == NULL) {
1182       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1183                             "Allocate ObjectMonitors");
1184     }
1185     (void)memset((void *) temp, 0, neededsize);
1186 
1187     // Format the block.
1188     // initialize the linked list, each monitor points to its next
1189     // forming the single linked free list, the very first monitor
1190     // will points to next block, which forms the block list.
1191     // The trick of using the 1st element in the block as gBlockList
1192     // linkage should be reconsidered.  A better implementation would
1193     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1194 
1195     for (int i = 1; i < _BLOCKSIZE; i++) {
1196       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];

1197     }
1198 
1199     // terminate the last monitor as the end of list
1200     temp[_BLOCKSIZE - 1].FreeNext = NULL;
1201 
1202     // Element [0] is reserved for global list linkage
1203     temp[0].set_object(CHAINMARKER);
1204 
1205     // Consider carving out this thread's current request from the
1206     // block in hand.  This avoids some lock traffic and redundant
1207     // list activity.
1208 
1209     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1210     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1211     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1212     gMonitorPopulation += _BLOCKSIZE-1;
1213     gMonitorFreeCount += _BLOCKSIZE-1;
1214 
1215     // Add the new block to the list of extant blocks (gBlockList).
1216     // The very first objectMonitor in a block is reserved and dedicated.


1226     Thread::muxRelease(&gListLock);
1227     TEVENT(Allocate block of monitors);
1228   }
1229 }
1230 
1231 // Place "m" on the caller's private per-thread omFreeList.
1232 // In practice there's no need to clamp or limit the number of
1233 // monitors on a thread's omFreeList as the only time we'll call
1234 // omRelease is to return a monitor to the free list after a CAS
1235 // attempt failed.  This doesn't allow unbounded #s of monitors to
1236 // accumulate on a thread's free list.
1237 //
1238 // Key constraint: all ObjectMonitors on a thread's free list and the global
1239 // free list must have their object field set to null. This prevents the
1240 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1241 
1242 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1243                                    bool fromPerThreadAlloc) {
1244   guarantee(m->object() == NULL, "invariant");
1245   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");

1246   // Remove from omInUseList
1247   if (MonitorInUseLists && fromPerThreadAlloc) {
1248     ObjectMonitor* cur_mid_in_use = NULL;
1249     bool extracted = false;
1250     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1251       if (m == mid) {
1252         // extract from per-thread in-use list
1253         if (mid == Self->omInUseList) {
1254           Self->omInUseList = mid->FreeNext;
1255         } else if (cur_mid_in_use != NULL) {
1256           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1257         }
1258         extracted = true;
1259         Self->omInUseCount--;
1260         if (ObjectMonitor::Knob_VerifyInUse) {
1261           verifyInUse(Self);
1262         }
1263         break;
1264       }
1265     }


1340     gMonitorFreeCount += tally;
1341     assert(Self->omFreeCount == tally, "free-count off");
1342     Self->omFreeCount = 0;
1343   }
1344 
1345   if (inUseTail != NULL) {
1346     inUseTail->FreeNext = gOmInUseList;
1347     gOmInUseList = inUseList;
1348     gOmInUseCount += inUseTally;
1349   }
1350 
1351   Thread::muxRelease(&gListLock);
1352   TEVENT(omFlush);
1353 }
1354 
1355 // Fast path code shared by multiple functions
1356 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1357   markOop mark = obj->mark();
1358   if (mark->has_monitor()) {
1359     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1360     assert(mark->monitor()->header()->is_neutral(), "monitor must record a good object header");


1361     return mark->monitor();
1362   }

1363   return ObjectSynchronizer::inflate(Thread::current(),
1364                                      obj,
1365                                      inflate_cause_vm_internal);
1366 }
1367 
1368 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1369                                                      oop object,
1370                                                      const InflateCause cause) {
1371 
1372   // Inflate mutates the heap ...
1373   // Relaxing assertion for bug 6320749.
1374   assert(Universe::verify_in_progress() ||
1375          !SafepointSynchronize::is_at_safepoint(), "invariant");
1376 
1377   EventJavaMonitorInflate event;
1378 
1379   for (;;) {
1380     const markOop mark = object->mark();
1381     assert(!mark->has_bias_pattern(), "invariant");
1382 
1383     // The mark can be in one of the following states:
1384     // *  Inflated     - just return
1385     // *  Stack-locked - coerce it to inflated
1386     // *  INFLATING    - busy wait for conversion to complete
1387     // *  Neutral      - aggressively inflate the object.
1388     // *  BIASED       - Illegal.  We should never see this
1389 
1390     // CASE: inflated
1391     if (mark->has_monitor()) {
1392       ObjectMonitor * inf = mark->monitor();
1393       assert(inf->header()->is_neutral(), "invariant");




1394       assert(inf->object() == object, "invariant");
1395       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1396       event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
1397       return inf;
1398     }
1399 
1400     // CASE: inflation in progress - inflating over a stack-lock.
1401     // Some other thread is converting from stack-locked to inflated.
1402     // Only that thread can complete inflation -- other threads must wait.
1403     // The INFLATING value is transient.
1404     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1405     // We could always eliminate polling by parking the thread on some auxiliary list.
1406     if (mark == markOopDesc::INFLATING()) {
1407       TEVENT(Inflate: spin while INFLATING);
1408       ReadStableMark(object);
1409       continue;
1410     }
1411 
1412     // CASE: stack-locked
1413     // Could be stack-locked either by this thread or by some other thread.


1580 // only scans the per-thread in-use lists. omAlloc() puts all
1581 // assigned monitors on the per-thread list. deflate_idle_monitors()
1582 // returns the non-busy monitors to the global free list.
1583 // When a thread dies, omFlush() adds the list of active monitors for
1584 // that thread to a global gOmInUseList acquiring the
1585 // global list lock. deflate_idle_monitors() acquires the global
1586 // list lock to scan for non-busy monitors to the global free list.
1587 // An alternative could have used a single global in-use list. The
1588 // downside would have been the additional cost of acquiring the global list lock
1589 // for every omAlloc().
1590 //
1591 // Perversely, the heap size -- and thus the STW safepoint rate --
1592 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1593 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1594 // This is an unfortunate aspect of this design.
1595 
1596 enum ManifestConstants {
1597   ClearResponsibleAtSTW = 0
1598 };
1599 












































































































1600 // Deflate a single monitor if not in-use
1601 // Return true if deflated, false if in-use
1602 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1603                                          ObjectMonitor** freeHeadp,
1604                                          ObjectMonitor** freeTailp) {
1605   bool deflated;
1606   // Normal case ... The monitor is associated with obj.
1607   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1608   guarantee(mid == obj->mark()->monitor(), "invariant");
1609   guarantee(mid->header()->is_neutral(), "invariant");
1610 
1611   if (mid->is_busy()) {
1612     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1613     deflated = false;
1614   } else {
1615     // Deflate the monitor if it is no longer being used
1616     // It's idle - scavenge and return to the global free list
1617     // plain old deflation ...
1618     TEVENT(deflate_idle_monitors - scavenge1);
1619     if (log_is_enabled(Debug, monitorinflation)) {
1620       if (obj->is_instance()) {
1621         ResourceMark rm;
1622         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1623                                     "mark " INTPTR_FORMAT " , type %s",
1624                                     p2i(obj), p2i(obj->mark()),
1625                                     obj->klass()->external_name());
1626       }
1627     }
1628 
1629     // Restore the header back to obj
1630     obj->release_set_mark(mid->header());
1631     mid->clear();
1632 
1633     assert(mid->object() == NULL, "invariant");

1634 
1635     // Move the object to the working free list defined by freeHeadp, freeTailp
1636     if (*freeHeadp == NULL) *freeHeadp = mid;
1637     if (*freeTailp != NULL) {
1638       ObjectMonitor * prevtail = *freeTailp;
1639       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1640       prevtail->FreeNext = mid;
1641     }
1642     *freeTailp = mid;
1643     deflated = true;
1644   }
1645   return deflated;
1646 }
1647 
1648 // Walk a given monitor list, and deflate idle monitors
1649 // The given list could be a per-thread list or a global list
1650 // Caller acquires gListLock
1651 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1652                                              ObjectMonitor** freeHeadp,
1653                                              ObjectMonitor** freeTailp) {




  98 #define DTRACE_MONITOR_WAIT_PROBE(obj, thread, millis, mon)    {;}
  99 #define DTRACE_MONITOR_PROBE(probe, obj, thread, mon)          {;}
 100 
 101 #endif // ndef DTRACE_ENABLED
 102 
 103 // This exists only as a workaround of dtrace bug 6254741
 104 int dtrace_waited_probe(ObjectMonitor* monitor, Handle obj, Thread* thr) {
 105   DTRACE_MONITOR_PROBE(waited, monitor, obj(), thr);
 106   return 0;
 107 }
 108 
 109 #define NINFLATIONLOCKS 256
 110 static volatile intptr_t gInflationLocks[NINFLATIONLOCKS];
 111 
 112 // global list of blocks of monitors
 113 // gBlockList is really PaddedEnd<ObjectMonitor> *, but we don't
 114 // want to expose the PaddedEnd template more than necessary.
 115 ObjectMonitor * volatile ObjectSynchronizer::gBlockList = NULL;
 116 // global monitor free list
 117 ObjectMonitor * volatile ObjectSynchronizer::gFreeList  = NULL;
 118 ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepoint  = NULL;
 119 ObjectMonitor * ObjectSynchronizer::gFreeListNextSafepointTail  = NULL;
 120 // global monitor in-use list, for moribund threads,
 121 // monitors they inflated need to be scanned for deflation
 122 ObjectMonitor * volatile ObjectSynchronizer::gOmInUseList  = NULL;
 123 // count of entries in gOmInUseList
 124 int ObjectSynchronizer::gOmInUseCount = 0;
 125 
 126 bool ObjectSynchronizer::_should_deflate_idle_monitors_conc = false;
 127 
 128 static volatile intptr_t gListLock = 0;      // protects global monitor lists
 129 static volatile int gMonitorFreeCount  = 0;  // # on gFreeList
 130 static int gMonitorFreeCountNextSafepoint = 0;
 131 static volatile int gMonitorPopulation = 0;  // # Extant -- in circulation
 132 
 133 static void post_monitor_inflate_event(EventJavaMonitorInflate&,
 134                                        const oop,
 135                                        const ObjectSynchronizer::InflateCause);
 136 
 137 #define CHAINMARKER (cast_to_oop<intptr_t>(-1))
 138 
 139 
 140 // =====================> Quick functions
 141 
 142 // The quick_* forms are special fast-path variants used to improve
 143 // performance.  In the simplest case, a "quick_*" implementation could
 144 // simply return false, in which case the caller will perform the necessary
 145 // state transitions and call the slow-path form.
 146 // The fast-path is designed to handle frequently arising cases in an efficient
 147 // manner and is just a degenerate "optimistic" variant of the slow-path.
 148 // returns true  -- to indicate the call was satisfied.
 149 // returns false -- to indicate the call needs the services of the slow-path.
 150 // A no-loitering ordinance is in effect for code in the quick_* family


 328     // swing the displaced header from the BasicLock back to the mark.
 329     assert(dhw->is_neutral(), "invariant");
 330     if ((markOop) Atomic::cmpxchg_ptr(dhw, object->mark_addr(), mark) == mark) {
 331       TEVENT(fast_exit: release stack-lock);
 332       return;
 333     }
 334   }
 335 
 336   // We have to take the slow-path of possible inflation and then exit.
 337   ObjectSynchronizer::inflate(THREAD,
 338                               object,
 339                               inflate_cause_vm_internal)->exit(true, THREAD);
 340 }
 341 
 342 // -----------------------------------------------------------------------------
 343 // Interpreter/Compiler Slow Case
 344 // This routine is used to handle interpreter/compiler slow case
 345 // We don't need to use fast path here, because it must have been
 346 // failed in the interpreter/compiler code.
 347 void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
 348   do {
 349     markOop mark = obj->mark();
 350     assert(!mark->has_bias_pattern(), "should not see bias pattern here");
 351 
 352     if (mark->is_neutral()) {
 353       // Anticipate successful CAS -- the ST of the displaced mark must
 354       // be visible <= the ST performed by the CAS.
 355       lock->set_displaced_header(mark);
 356       if (mark == (markOop) Atomic::cmpxchg_ptr(lock, obj()->mark_addr(), mark)) {
 357         TEVENT(slow_enter: release stacklock);
 358         return;
 359       }
 360       // Fall through to inflate() ...
 361     } else if (mark->has_locker() &&
 362                THREAD->is_lock_owned((address)mark->locker())) {
 363       assert(lock != mark->locker(), "must not re-lock the same lock");
 364       assert(lock != (BasicLock*)obj->mark(), "don't relock with same BasicLock");
 365       lock->set_displaced_header(NULL);
 366       return;
 367     }
 368 
 369     // The object header will never be displaced to this lock,
 370     // so it does not matter what the value is, except that it
 371     // must be non-zero to avoid looking like a re-entrant lock,
 372     // and must not look locked either.
 373     lock->set_displaced_header(markOopDesc::unused_mark());
 374   } while (!ObjectSynchronizer::inflate(THREAD,
 375                                 obj(),
 376                                 inflate_cause_monitor_enter)->enter(THREAD));
 377 }
 378 
 379 // This routine is used to handle interpreter/compiler slow case
 380 // We don't need to use fast path here, because it must have
 381 // failed in the interpreter/compiler code. Simply use the heavy
 382 // weight monitor should be ok, unless someone find otherwise.
 383 void ObjectSynchronizer::slow_exit(oop object, BasicLock* lock, TRAPS) {
 384   fast_exit(object, lock, THREAD);
 385 }
 386 
 387 // -----------------------------------------------------------------------------
 388 // Class Loader  support to workaround deadlocks on the class loader lock objects
 389 // Also used by GC
 390 // complete_exit()/reenter() are used to wait on a nested lock
 391 // i.e. to give up an outer lock completely and then re-enter
 392 // Used when holding nested locks - lock acquisition order: lock1 then lock2
 393 //  1) complete_exit lock1 - saving recursion count
 394 //  2) wait on lock2
 395 //  3) when notified on lock2, unlock lock2
 396 //  4) reenter lock1 with original recursion count


 401   if (UseBiasedLocking) {
 402     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 403     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 404   }
 405 
 406   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 407                                                        obj(),
 408                                                        inflate_cause_vm_internal);
 409 
 410   return monitor->complete_exit(THREAD);
 411 }
 412 
 413 // NOTE: must use heavy weight monitor to handle complete_exit/reenter()
 414 void ObjectSynchronizer::reenter(Handle obj, intptr_t recursion, TRAPS) {
 415   TEVENT(reenter);
 416   if (UseBiasedLocking) {
 417     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 418     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 419   }
 420 
 421   ObjectMonitor* monitor;
 422   do {
 423     monitor = ObjectSynchronizer::inflate(THREAD,
 424                                           obj(),
 425                                           inflate_cause_vm_internal);
 426   } while(!monitor->reenter(recursion, THREAD));

 427 }
 428 // -----------------------------------------------------------------------------
 429 // JNI locks on java objects
 430 // NOTE: must use heavy weight monitor to handle jni monitor enter
 431 void ObjectSynchronizer::jni_enter(Handle obj, TRAPS) {
 432   // the current locking is from JNI instead of Java code
 433   TEVENT(jni_enter);
 434   if (UseBiasedLocking) {
 435     BiasedLocking::revoke_and_rebias(obj, false, THREAD);
 436     assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 437   }
 438   THREAD->set_current_pending_monitor_is_from_java(false);
 439   while (!ObjectSynchronizer::inflate(THREAD, obj(), inflate_cause_jni_enter)->enter(THREAD));
 440   THREAD->set_current_pending_monitor_is_from_java(true);
 441 }
 442 
 443 // NOTE: must use heavy weight monitor to handle jni monitor exit
 444 void ObjectSynchronizer::jni_exit(oop obj, Thread* THREAD) {
 445   TEVENT(jni_exit);
 446   if (UseBiasedLocking) {
 447     Handle h_obj(THREAD, obj);
 448     BiasedLocking::revoke_and_rebias(h_obj, false, THREAD);
 449     obj = h_obj();
 450   }
 451   assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 452 
 453   ObjectMonitor* monitor = ObjectSynchronizer::inflate(THREAD,
 454                                                        obj,
 455                                                        inflate_cause_jni_exit);
 456   // If this thread has locked the object, exit the monitor.  Note:  can't use
 457   // monitor->check(CHECK); must exit even if an exception is pending.
 458   if (monitor->check(THREAD)) {
 459     monitor->exit(true, THREAD);


 702     // likely make this the default in future releases.
 703     unsigned t = Self->_hashStateX;
 704     t ^= (t << 11);
 705     Self->_hashStateX = Self->_hashStateY;
 706     Self->_hashStateY = Self->_hashStateZ;
 707     Self->_hashStateZ = Self->_hashStateW;
 708     unsigned v = Self->_hashStateW;
 709     v = (v ^ (v >> 19)) ^ (t ^ (t >> 8));
 710     Self->_hashStateW = v;
 711     value = v;
 712   }
 713 
 714   value &= markOopDesc::hash_mask;
 715   if (value == 0) value = 0xBAD;
 716   assert(value != markOopDesc::no_hash, "invariant");
 717   TEVENT(hashCode: GENERATE);
 718   return value;
 719 }
 720 
 721 intptr_t ObjectSynchronizer::FastHashCode(Thread * Self, oop obj) {
 722  Retry:
 723   if (UseBiasedLocking) {
 724     // NOTE: many places throughout the JVM do not expect a safepoint
 725     // to be taken here, in particular most operations on perm gen
 726     // objects. However, we only ever bias Java instances and all of
 727     // the call sites of identity_hash that might revoke biases have
 728     // been checked to make sure they can handle a safepoint. The
 729     // added check of the bias pattern is to avoid useless calls to
 730     // thread-local storage.
 731     if (obj->mark()->has_bias_pattern()) {
 732       // Handle for oop obj in case of STW safepoint
 733       Handle hobj(Self, obj);
 734       // Relaxing assertion for bug 6320749.
 735       assert(Universe::verify_in_progress() ||
 736              !SafepointSynchronize::is_at_safepoint(),
 737              "biases should not be seen by VM thread here");
 738       BiasedLocking::revoke_and_rebias(hobj, false, JavaThread::current());
 739       obj = hobj();
 740       assert(!obj->mark()->has_bias_pattern(), "biases should be revoked by now");
 741     }
 742   }


 759   assert(!mark->has_bias_pattern(), "invariant");
 760 
 761   if (mark->is_neutral()) {
 762     hash = mark->hash();              // this is a normal header
 763     if (hash) {                       // if it has hash, just return it
 764       return hash;
 765     }
 766     hash = get_next_hash(Self, obj);  // allocate a new hash code
 767     temp = mark->copy_set_hash(hash); // merge the hash code into header
 768     // use (machine word version) atomic operation to install the hash
 769     test = (markOop) Atomic::cmpxchg_ptr(temp, obj->mark_addr(), mark);
 770     if (test == mark) {
 771       return hash;
 772     }
 773     // If atomic operation failed, we must inflate the header
 774     // into heavy weight monitor. We could add more code here
 775     // for fast path, but it does not worth the complexity.
 776   } else if (mark->has_monitor()) {
 777     monitor = mark->monitor();
 778     temp = monitor->header();
 779     assert(temp->is_neutral() || temp->hash() == 0 && temp->is_marked(), "invariant");
 780     hash = temp->hash();
 781     if (hash) {
 782       return hash;
 783     }
 784     // Skip to the following code to reduce code size
 785   } else if (Self->is_lock_owned((address)mark->locker())) {
 786     temp = mark->displaced_mark_helper(); // this is a lightweight monitor owned
 787     assert(temp->is_neutral(), "invariant");
 788     hash = temp->hash();              // by current thread, check if the displaced
 789     if (hash) {                       // header contains hash code
 790       return hash;
 791     }
 792     // WARNING:
 793     //   The displaced header is strictly immutable.
 794     // It can NOT be changed in ANY cases. So we have
 795     // to inflate the header into heavyweight monitor
 796     // even the current thread owns the lock. The reason
 797     // is the BasicLock (stack slot) will be asynchronously
 798     // read by other threads during the inflate() function.
 799     // Any change to stack may not propagate to other threads
 800     // correctly.
 801   }
 802 
 803   // Inflate the monitor to set hash code
 804   monitor = ObjectSynchronizer::inflate(Self, obj, inflate_cause_hash_code);
 805   // Load displaced header and check it has hash code
 806   mark = monitor->header();
 807   assert(mark->is_neutral() || mark->hash() == 0 && mark->is_marked(), "invariant");
 808   hash = mark->hash();
 809   if (hash == 0) {
 810     hash = get_next_hash(Self, obj);
 811     temp = mark->set_unmarked()->copy_set_hash(hash); // merge hash code into header
 812     assert(temp->is_neutral(), "invariant");
 813     if (mark->is_marked()) {
 814       // Monitor is being deflated. Try installing mark word with hash code into obj.
 815       markOop monitor_mark = markOopDesc::encode(monitor);
 816       if (obj->cas_set_mark(temp, monitor_mark) == monitor_mark) {
 817         return hash;
 818       } else {
 819         // Somebody else installed a new mark word in obj. Start over. We are making progress,
 820         // as the new mark word is not a pointer to monitor.
 821         goto Retry;
 822       }
 823     }
 824     test = (markOop) Atomic::cmpxchg_ptr(temp, monitor, mark);
 825     if (test != mark) {
 826       // The only update to the header in the monitor (outside GC) is install
 827       // the hash code or mark the header to signal that the monitor is being
 828       // deflated. If someone add new usage of displaced header, please update
 829       // this code.
 830       if (test->is_marked()) {
 831         // Monitor is being deflated. Make progress by starting over.
 832         assert(test->hash() == 0, "invariant");
 833         goto Retry;
 834       }
 835       hash = test->hash();
 836       assert(test->is_neutral(), "invariant");
 837       assert(hash != 0, "Trivial unexpected object/monitor header usage.");
 838     }
 839   }
 840   // We finally get the hash
 841   return hash;
 842 }
 843 
 844 // Deprecated -- use FastHashCode() instead.
 845 
 846 intptr_t ObjectSynchronizer::identity_hash_value_for(Handle obj) {
 847   return FastHashCode(Thread::current(), obj());
 848 }
 849 
 850 
 851 bool ObjectSynchronizer::current_thread_holds_lock(JavaThread* thread,
 852                                                    Handle h_obj) {
 853   if (UseBiasedLocking) {
 854     BiasedLocking::revoke_and_rebias(h_obj, false, thread);


 990 
 991 void ObjectSynchronizer::oops_do(OopClosure* f) {
 992   if (MonitorInUseLists) {
 993     // When using thread local monitor lists, we only scan the
 994     // global used list here (for moribund threads), and
 995     // the thread-local monitors in Thread::oops_do().
 996     global_used_oops_do(f);
 997   } else {
 998     global_oops_do(f);
 999   }
1000 }
1001 
1002 void ObjectSynchronizer::global_oops_do(OopClosure* f) {
1003   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1004   PaddedEnd<ObjectMonitor> * block =
1005     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1006   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1007     assert(block->object() == CHAINMARKER, "must be a block header");
1008     for (int i = 1; i < _BLOCKSIZE; i++) {
1009       ObjectMonitor* mid = (ObjectMonitor *)&block[i];
1010       if (mid->is_active()) {
1011         assert(mid->object() != NULL, "invariant");
1012         f->do_oop((oop*)mid->object_addr());
1013       }
1014     }
1015   }
1016 }
1017 
1018 void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
1019   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1020   list_oops_do(gOmInUseList, f);
1021 }
1022 
1023 void ObjectSynchronizer::thread_local_used_oops_do(Thread* thread, OopClosure* f) {
1024   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1025   list_oops_do(thread->omInUseList, f);
1026 }
1027 
1028 void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
1029   assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
1030   ObjectMonitor* mid;
1031   for (mid = list; mid != NULL; mid = mid->FreeNext) {


1087     }
1088     // Induce a 'null' safepoint to scavenge monitors
1089     // Must VM_Operation instance be heap allocated as the op will be enqueue and posted
1090     // to the VMthread and have a lifespan longer than that of this activation record.
1091     // The VMThread will delete the op when completed.
1092     VMThread::execute(new VM_ScavengeMonitors());
1093 
1094     if (ObjectMonitor::Knob_Verbose) {
1095       tty->print_cr("INFO: Monitor scavenge - STW posted @%s (%d)",
1096                     Whence, ForceMonitorScavenge) ;
1097       tty->flush();
1098     }
1099   }
1100 }
1101 
1102 void ObjectSynchronizer::verifyInUse(Thread *Self) {
1103   ObjectMonitor* mid;
1104   int in_use_tally = 0;
1105   for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
1106     in_use_tally++;
1107     guarantee(mid->is_active(), "invariant");
1108   }
1109   guarantee(in_use_tally == Self->omInUseCount, "in-use count off");
1110 
1111   int free_tally = 0;
1112   for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
1113     free_tally++;
1114     guarantee(mid->is_free(), "invariant");
1115   }
1116   guarantee(free_tally == Self->omFreeCount, "free count off");
1117 }
1118 
1119 ObjectMonitor* ObjectSynchronizer::omAlloc(Thread * Self) {
1120   // A large MAXPRIVATE value reduces both list lock contention
1121   // and list coherency traffic, but also tends to increase the
1122   // number of objectMonitors in circulation as well as the STW
1123   // scavenge costs.  As usual, we lean toward time in space-time
1124   // tradeoffs.
1125   const int MAXPRIVATE = 1024;
1126   for (;;) {
1127     ObjectMonitor * m;
1128 
1129     // 1: try to allocate from the thread's local omFreeList.
1130     // Threads will attempt to allocate first from their local list, then
1131     // from the global list, and only after those attempts fail will the thread
1132     // attempt to instantiate new monitors.   Thread-local free lists take
1133     // heat off the gListLock and improve allocation latency, as well as reducing
1134     // coherency traffic on the shared global list.
1135     m = Self->omFreeList;
1136     if (m != NULL) {
1137       Self->omFreeList = m->FreeNext;
1138       Self->omFreeCount--;
1139       // CONSIDER: set m->FreeNext = BAD -- diagnostic hygiene
1140       guarantee(m->object() == NULL, "invariant");
1141       m->set_allocation_state(ObjectMonitor::New);
1142       if (MonitorInUseLists) {
1143         m->FreeNext = Self->omInUseList;
1144         Self->omInUseList = m;
1145         Self->omInUseCount++;
1146         if (ObjectMonitor::Knob_VerifyInUse) {
1147           verifyInUse(Self);
1148         }
1149       } else {
1150         m->FreeNext = NULL;
1151       }
1152       assert(!m->is_free(), "post-condition");
1153       return m;
1154     }
1155 
1156     // 2: try to allocate from the global gFreeList
1157     // CONSIDER: use muxTry() instead of muxAcquire().
1158     // If the muxTry() fails then drop immediately into case 3.
1159     // If we're using thread-local free lists then try
1160     // to reprovision the caller's free list.
1161     if (gFreeList != NULL) {
1162       // Reprovision the thread's omFreeList.
1163       // Use bulk transfers to reduce the allocation rate and heat
1164       // on various locks.
1165       Thread::muxAcquire(&gListLock, "omAlloc");
1166       for (int i = Self->omFreeProvision; --i >= 0 && gFreeList != NULL;) {
1167         gMonitorFreeCount--;
1168         ObjectMonitor * take = gFreeList;
1169         gFreeList = take->FreeNext;
1170         take->set_object(NULL);
1171         take->set_owner(NULL);
1172         take->_count = 0;
1173         guarantee(!take->is_busy(), "invariant");
1174         take->Recycle();
1175         assert(take->is_free(), "invariant");
1176         omRelease(Self, take, false);
1177       }
1178       Thread::muxRelease(&gListLock);
1179       Self->omFreeProvision += 1 + (Self->omFreeProvision/2);
1180       if (Self->omFreeProvision > MAXPRIVATE) Self->omFreeProvision = MAXPRIVATE;
1181       TEVENT(omFirst - reprovision);
1182 
1183       const int mx = MonitorBound;
1184       if (mx > 0 && (gMonitorPopulation-gMonitorFreeCount) > mx) {
1185         // We can't safely induce a STW safepoint from omAlloc() as our thread
1186         // state may not be appropriate for such activities and callers may hold
1187         // naked oops, so instead we defer the action.
1188         InduceScavenge(Self, "omAlloc");
1189       }
1190       continue;
1191     }
1192 
1193     // 3: allocate a block of new ObjectMonitors
1194     // Both the local and global free lists are empty -- resort to malloc().
1195     // In the current implementation objectMonitors are TSM - immortal.


1210 
1211     // NOTE: (almost) no way to recover if allocation failed.
1212     // We might be able to induce a STW safepoint and scavenge enough
1213     // objectMonitors to permit progress.
1214     if (temp == NULL) {
1215       vm_exit_out_of_memory(neededsize, OOM_MALLOC_ERROR,
1216                             "Allocate ObjectMonitors");
1217     }
1218     (void)memset((void *) temp, 0, neededsize);
1219 
1220     // Format the block.
1221     // initialize the linked list, each monitor points to its next
1222     // forming the single linked free list, the very first monitor
1223     // will points to next block, which forms the block list.
1224     // The trick of using the 1st element in the block as gBlockList
1225     // linkage should be reconsidered.  A better implementation would
1226     // look like: class Block { Block * next; int N; ObjectMonitor Body [N] ; }
1227 
1228     for (int i = 1; i < _BLOCKSIZE; i++) {
1229       temp[i].FreeNext = (ObjectMonitor *)&temp[i+1];
1230       assert(temp[i].is_free(), "invariant");
1231     }
1232 
1233     // terminate the last monitor as the end of list
1234     temp[_BLOCKSIZE - 1].FreeNext = NULL;
1235 
1236     // Element [0] is reserved for global list linkage
1237     temp[0].set_object(CHAINMARKER);
1238 
1239     // Consider carving out this thread's current request from the
1240     // block in hand.  This avoids some lock traffic and redundant
1241     // list activity.
1242 
1243     // Acquire the gListLock to manipulate gBlockList and gFreeList.
1244     // An Oyama-Taura-Yonezawa scheme might be more efficient.
1245     Thread::muxAcquire(&gListLock, "omAlloc [2]");
1246     gMonitorPopulation += _BLOCKSIZE-1;
1247     gMonitorFreeCount += _BLOCKSIZE-1;
1248 
1249     // Add the new block to the list of extant blocks (gBlockList).
1250     // The very first objectMonitor in a block is reserved and dedicated.


1260     Thread::muxRelease(&gListLock);
1261     TEVENT(Allocate block of monitors);
1262   }
1263 }
1264 
1265 // Place "m" on the caller's private per-thread omFreeList.
1266 // In practice there's no need to clamp or limit the number of
1267 // monitors on a thread's omFreeList as the only time we'll call
1268 // omRelease is to return a monitor to the free list after a CAS
1269 // attempt failed.  This doesn't allow unbounded #s of monitors to
1270 // accumulate on a thread's free list.
1271 //
1272 // Key constraint: all ObjectMonitors on a thread's free list and the global
1273 // free list must have their object field set to null. This prevents the
1274 // scavenger -- deflate_idle_monitors -- from reclaiming them.
1275 
1276 void ObjectSynchronizer::omRelease(Thread * Self, ObjectMonitor * m,
1277                                    bool fromPerThreadAlloc) {
1278   guarantee(m->object() == NULL, "invariant");
1279   guarantee(((m->is_busy()|m->_recursions) == 0), "freeing in-use monitor");
1280   m->set_allocation_state(ObjectMonitor::Free);
1281   // Remove from omInUseList
1282   if (MonitorInUseLists && fromPerThreadAlloc) {
1283     ObjectMonitor* cur_mid_in_use = NULL;
1284     bool extracted = false;
1285     for (ObjectMonitor* mid = Self->omInUseList; mid != NULL; cur_mid_in_use = mid, mid = mid->FreeNext) {
1286       if (m == mid) {
1287         // extract from per-thread in-use list
1288         if (mid == Self->omInUseList) {
1289           Self->omInUseList = mid->FreeNext;
1290         } else if (cur_mid_in_use != NULL) {
1291           cur_mid_in_use->FreeNext = mid->FreeNext; // maintain the current thread in-use list
1292         }
1293         extracted = true;
1294         Self->omInUseCount--;
1295         if (ObjectMonitor::Knob_VerifyInUse) {
1296           verifyInUse(Self);
1297         }
1298         break;
1299       }
1300     }


1375     gMonitorFreeCount += tally;
1376     assert(Self->omFreeCount == tally, "free-count off");
1377     Self->omFreeCount = 0;
1378   }
1379 
1380   if (inUseTail != NULL) {
1381     inUseTail->FreeNext = gOmInUseList;
1382     gOmInUseList = inUseList;
1383     gOmInUseCount += inUseTally;
1384   }
1385 
1386   Thread::muxRelease(&gListLock);
1387   TEVENT(omFlush);
1388 }
1389 
1390 // Fast path code shared by multiple functions
1391 ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
1392   markOop mark = obj->mark();
1393   if (mark->has_monitor()) {
1394     assert(ObjectSynchronizer::verify_objmon_isinpool(mark->monitor()), "monitor is invalid");
1395     markOop dmw = mark->monitor()->header();
1396     assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "monitor must record a good object header");
1397     if (dmw->is_neutral()) {
1398       return mark->monitor();
1399     }
1400   }
1401   return ObjectSynchronizer::inflate(Thread::current(),
1402                                      obj,
1403                                      inflate_cause_vm_internal);
1404 }
1405 
1406 ObjectMonitor* ObjectSynchronizer::inflate(Thread * Self,
1407                                                      oop object,
1408                                                      const InflateCause cause) {
1409  Retry:
1410   // Inflate mutates the heap ...
1411   // Relaxing assertion for bug 6320749.
1412   assert(Universe::verify_in_progress() ||
1413          !SafepointSynchronize::is_at_safepoint(), "invariant");
1414 
1415   EventJavaMonitorInflate event;
1416 
1417   for (;;) {
1418     const markOop mark = object->mark();
1419     assert(!mark->has_bias_pattern(), "invariant");
1420 
1421     // The mark can be in one of the following states:
1422     // *  Inflated     - just return
1423     // *  Stack-locked - coerce it to inflated
1424     // *  INFLATING    - busy wait for conversion to complete
1425     // *  Neutral      - aggressively inflate the object.
1426     // *  BIASED       - Illegal.  We should never see this
1427 
1428     // CASE: inflated
1429     if (mark->has_monitor()) {
1430       ObjectMonitor * inf = mark->monitor();
1431       markOop dmw = inf->header();
1432       assert(dmw->is_neutral() || dmw->hash() == 0 && dmw->is_marked(), "invariant");
1433       if (dmw->is_marked()) {
1434         goto Retry;
1435       }
1436       assert(inf->object() == object, "invariant");
1437       assert(ObjectSynchronizer::verify_objmon_isinpool(inf), "monitor is invalid");
1438       event.cancel(); // let's not post an inflation event, unless we did the deed ourselves
1439       return inf;
1440     }
1441 
1442     // CASE: inflation in progress - inflating over a stack-lock.
1443     // Some other thread is converting from stack-locked to inflated.
1444     // Only that thread can complete inflation -- other threads must wait.
1445     // The INFLATING value is transient.
1446     // Currently, we spin/yield/park and poll the markword, waiting for inflation to finish.
1447     // We could always eliminate polling by parking the thread on some auxiliary list.
1448     if (mark == markOopDesc::INFLATING()) {
1449       TEVENT(Inflate: spin while INFLATING);
1450       ReadStableMark(object);
1451       continue;
1452     }
1453 
1454     // CASE: stack-locked
1455     // Could be stack-locked either by this thread or by some other thread.


1622 // only scans the per-thread in-use lists. omAlloc() puts all
1623 // assigned monitors on the per-thread list. deflate_idle_monitors()
1624 // returns the non-busy monitors to the global free list.
1625 // When a thread dies, omFlush() adds the list of active monitors for
1626 // that thread to a global gOmInUseList acquiring the
1627 // global list lock. deflate_idle_monitors() acquires the global
1628 // list lock to scan for non-busy monitors to the global free list.
1629 // An alternative could have used a single global in-use list. The
1630 // downside would have been the additional cost of acquiring the global list lock
1631 // for every omAlloc().
1632 //
1633 // Perversely, the heap size -- and thus the STW safepoint rate --
1634 // typically drives the scavenge rate.  Large heaps can mean infrequent GC,
1635 // which in turn can mean large(r) numbers of objectmonitors in circulation.
1636 // This is an unfortunate aspect of this design.
1637 
1638 enum ManifestConstants {
1639   ClearResponsibleAtSTW = 0
1640 };
1641 
1642 void ObjectSynchronizer::do_safepoint_work() {
1643   if (MonitorInUseLists || !AsyncDeflateIdleMonitors) {
1644     ObjectSynchronizer::deflate_idle_monitors();
1645     return;
1646   }
1647   assert(AsyncDeflateIdleMonitors, "oops");
1648   if (gFreeListNextSafepoint != NULL) {
1649 #ifdef ASSERT
1650     for (ObjectMonitor* monitor = gFreeListNextSafepoint; monitor != NULL; monitor = monitor->FreeNext) {
1651       guarantee(monitor->owner() == NULL, "invariant");
1652       guarantee(monitor->waiters() == 0, "invariant");
1653       guarantee(monitor->recursions() == 0, "invariant");
1654       guarantee(monitor->object() != NULL, "invariant");
1655       guarantee(monitor->header() != 0, "invariant");
1656       guarantee(monitor->is_free(), "invariant");
1657     }
1658     guarantee(gFreeListNextSafepointTail != NULL, "invariant");
1659 #endif // def ASSERT
1660 
1661     Thread::muxAcquire(&gListLock, "do_safepoint_work");
1662     gFreeListNextSafepointTail->FreeNext = gFreeList;
1663     gFreeList = gFreeListNextSafepoint;
1664     gMonitorFreeCount += gMonitorFreeCountNextSafepoint;
1665     Thread::muxRelease(&gListLock);
1666 
1667     gFreeListNextSafepoint = NULL;
1668     gFreeListNextSafepointTail = NULL;
1669     gMonitorFreeCountNextSafepoint = 0;
1670   }
1671   set_should_deflate_idle_monitors_conc();
1672   MutexLockerEx ml(Service_lock, Mutex::_no_safepoint_check_flag);
1673   Service_lock->notify_all();
1674 }
1675 
1676 void ObjectSynchronizer::append_to_freelist_for_after_safepoint(int nScavenged, ObjectMonitor* const head, ObjectMonitor* const tail) {
1677 #ifdef ASSERT
1678   int count = 0;
1679   for(ObjectMonitor* m = head; m != NULL; m = m->FreeNext) { count++; }
1680   guarantee(count == nScavenged, "invariant");
1681 #endif // def ASSERT
1682   if (head != NULL) {
1683     assert(tail->FreeNext == NULL, "invariant");
1684     tail->FreeNext = gFreeListNextSafepoint;
1685     gFreeListNextSafepoint = head;
1686   }
1687   if (gFreeListNextSafepointTail == NULL) {
1688     gFreeListNextSafepointTail = tail;
1689   }
1690   gMonitorFreeCountNextSafepoint += nScavenged;
1691   OM_PERFDATA_OP(Deflations, inc(nScavenged));
1692 }
1693 
1694 void ObjectSynchronizer::deflate_idle_monitors_conc() {
1695   assert(Thread::current()->is_Java_thread(), "precondition");
1696   _should_deflate_idle_monitors_conc = false;
1697   if (MonitorInUseLists) {
1698     return; // Don't want to run over the thread list for now.
1699   }
1700 
1701   ObjectMonitor* freeHeadp = NULL;
1702   ObjectMonitor* freeTailp = NULL;
1703   int nScavenged = 0;
1704   int nInuse = 0;
1705   int nInCirculation = 0;
1706 
1707   PaddedEnd<ObjectMonitor> * block =
1708     (PaddedEnd<ObjectMonitor> *)OrderAccess::load_ptr_acquire(&gBlockList);
1709   for (; block != NULL; block = (PaddedEnd<ObjectMonitor> *)next(block)) {
1710     // Iterate over all extant monitors - Scavenge all idle monitors.
1711     assert(block->object() == CHAINMARKER, "must be a block header");
1712     if (SafepointSynchronize::is_synchronizing()) {
1713       append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp);
1714       nScavenged = 0;
1715       freeHeadp = NULL;
1716       freeTailp = NULL;
1717       JavaThread* const jt = (JavaThread*) Thread::current();
1718       ThreadBlockInVM blocker(jt);
1719     }
1720     nInCirculation += _BLOCKSIZE;
1721     for (int i = 1; i < _BLOCKSIZE; i++) {
1722       ObjectMonitor* mid = (ObjectMonitor*)&block[i];
1723       if (!mid->is_old()) {
1724         // Skip deflating newly allocated or free monitors.
1725         if (mid->is_new()) {
1726           // Mark mid as "old".
1727           mid->set_allocation_state(ObjectMonitor::Old);
1728         }
1729         continue;
1730       }
1731 
1732       oop obj = (oop)mid->object();
1733       assert(obj != NULL, "invariant");
1734 
1735       if (mid->try_disable_monitor()) {
1736         mid->FreeNext = NULL;
1737         if (freeHeadp == NULL) { freeHeadp = mid; }
1738         if (freeTailp != NULL) { freeTailp->FreeNext = mid; }
1739         freeTailp = mid;
1740         nScavenged++;
1741       } else {
1742         nInuse++;
1743       }
1744     }
1745   }
1746   append_to_freelist_for_after_safepoint(nScavenged, freeHeadp, freeTailp);
1747   OM_PERFDATA_OP(MonExtant, set_value(nInCirculation));
1748 }
1749 
1750 // Deflate a single monitor if not in-use
1751 // Return true if deflated, false if in-use
1752 bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
1753                                          ObjectMonitor** freeHeadp,
1754                                          ObjectMonitor** freeTailp) {
1755   bool deflated;
1756   // Normal case ... The monitor is associated with obj.
1757   guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
1758   guarantee(mid == obj->mark()->monitor(), "invariant");
1759   guarantee(mid->header()->is_neutral(), "invariant");
1760 
1761   if (mid->is_busy()) {
1762     if (ClearResponsibleAtSTW) mid->_Responsible = NULL;
1763     deflated = false;
1764   } else {
1765     // Deflate the monitor if it is no longer being used
1766     // It's idle - scavenge and return to the global free list
1767     // plain old deflation ...
1768     TEVENT(deflate_idle_monitors - scavenge1);
1769     if (log_is_enabled(Debug, monitorinflation)) {
1770       if (obj->is_instance()) {
1771         ResourceMark rm;
1772         log_debug(monitorinflation)("Deflating object " INTPTR_FORMAT " , "
1773                                     "mark " INTPTR_FORMAT " , type %s",
1774                                     p2i(obj), p2i(obj->mark()),
1775                                     obj->klass()->external_name());
1776       }
1777     }
1778 
1779     // Restore the header back to obj
1780     obj->release_set_mark(mid->header());
1781     mid->clear();
1782 
1783     assert(mid->object() == NULL, "invariant");
1784     assert(mid->is_free(), "invariant");
1785 
1786     // Move the object to the working free list defined by freeHeadp, freeTailp
1787     if (*freeHeadp == NULL) *freeHeadp = mid;
1788     if (*freeTailp != NULL) {
1789       ObjectMonitor * prevtail = *freeTailp;
1790       assert(prevtail->FreeNext == NULL, "cleaned up deflated?");
1791       prevtail->FreeNext = mid;
1792     }
1793     *freeTailp = mid;
1794     deflated = true;
1795   }
1796   return deflated;
1797 }
1798 
1799 // Walk a given monitor list, and deflate idle monitors
1800 // The given list could be a per-thread list or a global list
1801 // Caller acquires gListLock
1802 int ObjectSynchronizer::deflate_monitor_list(ObjectMonitor** listHeadp,
1803                                              ObjectMonitor** freeHeadp,
1804                                              ObjectMonitor** freeTailp) {


< prev index next >