1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "jfr/jfrEvents.hpp"
  28 #include "jfr/support/jfrThreadId.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/klass.inline.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/basicLock.hpp"
  36 #include "runtime/biasedLocking.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/handshake.hpp"
  39 #include "runtime/safepointMechanism.hpp"
  40 #include "runtime/task.hpp"
  41 #include "runtime/threadSMR.hpp"
  42 #include "runtime/vframe.hpp"
  43 #include "runtime/vmThread.hpp"
  44 #include "runtime/vmOperations.hpp"
  45 
  46 
  47 static bool _biased_locking_enabled = false;
  48 BiasedLockingCounters BiasedLocking::_counters;
  49 
  50 static GrowableArray<Handle>*   _preserved_oop_stack  = NULL;
  51 static GrowableArray<markWord>* _preserved_mark_stack = NULL;
  52 
  53 static void enable_biased_locking(InstanceKlass* k) {
  54   k->set_prototype_header(markWord::biased_locking_prototype());
  55 }
  56 
  57 static void enable_biased_locking() {
  58   _biased_locking_enabled = true;
  59   log_info(biasedlocking)("Biased locking enabled");
  60 }
  61 
  62 class VM_EnableBiasedLocking: public VM_Operation {
  63  public:
  64   VM_EnableBiasedLocking() {}
  65   VMOp_Type type() const          { return VMOp_EnableBiasedLocking; }
  66 
  67   void doit() {
  68     // Iterate the class loader data dictionaries enabling biased locking for all
  69     // currently loaded classes.
  70     ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
  71     // Indicate that future instances should enable it as well
  72     enable_biased_locking();
  73   }
  74 
  75   bool allow_nested_vm_operations() const        { return false; }
  76 };
  77 
  78 
  79 // One-shot PeriodicTask subclass for enabling biased locking
  80 class EnableBiasedLockingTask : public PeriodicTask {
  81  public:
  82   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
  83 
  84   virtual void task() {
  85     VM_EnableBiasedLocking op;
  86     VMThread::execute(&op);
  87 
  88     // Reclaim our storage and disenroll ourself
  89     delete this;
  90   }
  91 };
  92 
  93 
  94 void BiasedLocking::init() {
  95   // If biased locking is enabled and BiasedLockingStartupDelay is set,
  96   // schedule a task to fire after the specified delay which turns on
  97   // biased locking for all currently loaded classes as well as future
  98   // ones. This could be a workaround for startup time regressions
  99   // due to large number of safepoints being taken during VM startup for
 100   // bias revocation.
 101   if (UseBiasedLocking) {
 102     if (BiasedLockingStartupDelay > 0) {
 103       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
 104       task->enroll();
 105     } else {
 106       enable_biased_locking();
 107     }
 108   }
 109 }
 110 
 111 
 112 bool BiasedLocking::enabled() {
 113   assert(UseBiasedLocking, "precondition");
 114   // We check "BiasedLockingStartupDelay == 0" here to cover the
 115   // possibility of calls to BiasedLocking::enabled() before
 116   // BiasedLocking::init().
 117   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
 118 }
 119 
 120 
 121 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
 122 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
 123   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
 124   if (info != NULL) {
 125     return info;
 126   }
 127 
 128   info = new GrowableArray<MonitorInfo*>();
 129 
 130   // It's possible for the thread to not have any Java frames on it,
 131   // i.e., if it's the main thread and it's already returned from main()
 132   if (thread->has_last_Java_frame()) {
 133     RegisterMap rm(thread);
 134     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 135       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 136       if (monitors != NULL) {
 137         int len = monitors->length();
 138         // Walk monitors youngest to oldest
 139         for (int i = len - 1; i >= 0; i--) {
 140           MonitorInfo* mon_info = monitors->at(i);
 141           if (mon_info->eliminated()) continue;
 142           oop owner = mon_info->owner();
 143           if (owner != NULL) {
 144             info->append(mon_info);
 145           }
 146         }
 147       }
 148     }
 149   }
 150 
 151   thread->set_cached_monitor_info(info);
 152   return info;
 153 }
 154 
 155 
 156 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
 157 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
 158 void BiasedLocking::single_revoke_at_safepoint(oop obj, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
 159   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 160   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 161 
 162   markWord mark = obj->mark();
 163   if (!mark.has_bias_pattern()) {
 164     if (log_is_enabled(Info, biasedlocking)) {
 165       ResourceMark rm;
 166       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
 167                               ", mark " INTPTR_FORMAT ", type %s"
 168                               ", requesting thread " INTPTR_FORMAT
 169                               " because it's no longer biased)",
 170                               p2i((void *)obj), mark.value(),
 171                               obj->klass()->external_name(),
 172                               (intptr_t) requesting_thread);
 173     }
 174     return;
 175   }
 176 
 177   uint age = mark.age();
 178   markWord unbiased_prototype = markWord::prototype().set_age(age);
 179 
 180   // Log at "info" level if not bulk, else "trace" level
 181   if (!is_bulk) {
 182     ResourceMark rm;
 183     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
 184                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 185                             ", requesting thread " INTPTR_FORMAT,
 186                             p2i((void *)obj),
 187                             mark.value(),
 188                             obj->klass()->external_name(),
 189                             obj->klass()->prototype_header().value(),
 190                             (intptr_t) requesting_thread);
 191   } else {
 192     ResourceMark rm;
 193     log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
 194                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
 195                              " , requesting thread " INTPTR_FORMAT,
 196                              p2i((void *)obj),
 197                              mark.value(),
 198                              obj->klass()->external_name(),
 199                              obj->klass()->prototype_header().value(),
 200                              (intptr_t) requesting_thread);
 201   }
 202 
 203   JavaThread* biased_thread = mark.biased_locker();
 204   if (biased_thread == NULL) {
 205     // Object is anonymously biased. We can get here if, for
 206     // example, we revoke the bias due to an identity hash code
 207     // being computed for an object.
 208     obj->set_mark(unbiased_prototype);
 209 
 210     // Log at "info" level if not bulk, else "trace" level
 211     if (!is_bulk) {
 212       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
 213     } else {
 214       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
 215     }
 216     return;
 217   }
 218 
 219   // Handle case where the thread toward which the object was biased has exited
 220   bool thread_is_alive = false;
 221   if (requesting_thread == biased_thread) {
 222     thread_is_alive = true;
 223   } else {
 224     ThreadsListHandle tlh;
 225     thread_is_alive = tlh.includes(biased_thread);
 226   }
 227   if (!thread_is_alive) {
 228     obj->set_mark(unbiased_prototype);
 229     // Log at "info" level if not bulk, else "trace" level
 230     if (!is_bulk) {
 231       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 232                               PTR_FORMAT ")", p2i(biased_thread));
 233     } else {
 234       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 235                                PTR_FORMAT ")", p2i(biased_thread));
 236     }
 237     return;
 238   }
 239 
 240   // Log at "info" level if not bulk, else "trace" level
 241   if (!is_bulk) {
 242     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
 243                             PTR_FORMAT ")", p2i(biased_thread));
 244   } else {
 245     log_trace(biasedlocking)("  Revoked bias of object biased toward live thread ("
 246                                PTR_FORMAT ")", p2i(biased_thread));
 247   }
 248 
 249   // Thread owning bias is alive.
 250   // Check to see whether it currently owns the lock and, if so,
 251   // write down the needed displaced headers to the thread's stack.
 252   // Otherwise, restore the object's header either to the unlocked
 253   // or unbiased state.
 254   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
 255   BasicLock* highest_lock = NULL;
 256   for (int i = 0; i < cached_monitor_info->length(); i++) {
 257     MonitorInfo* mon_info = cached_monitor_info->at(i);
 258     if (mon_info->owner() == obj) {
 259       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 260                                p2i((void *) mon_info->owner()),
 261                                p2i((void *) obj));
 262       // Assume recursive case and fix up highest lock below
 263       markWord mark = markWord::encode((BasicLock*) NULL);
 264       highest_lock = mon_info->lock();
 265       highest_lock->set_displaced_header(mark);
 266     } else {
 267       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 268                                p2i((void *) mon_info->owner()),
 269                                p2i((void *) obj));
 270     }
 271   }
 272   if (highest_lock != NULL) {
 273     // Fix up highest lock to contain displaced header and point
 274     // object at it
 275     highest_lock->set_displaced_header(unbiased_prototype);
 276     // Reset object header to point to displaced mark.
 277     // Must release store the lock address for platforms without TSO
 278     // ordering (e.g. ppc).
 279     obj->release_set_mark(markWord::encode(highest_lock));
 280     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 281     // Log at "info" level if not bulk, else "trace" level
 282     if (!is_bulk) {
 283       log_info(biasedlocking)("  Revoked bias of currently-locked object");
 284     } else {
 285       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
 286     }
 287   } else {
 288     // Log at "info" level if not bulk, else "trace" level
 289     if (!is_bulk) {
 290       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 291     } else {
 292       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
 293     }
 294     // Store the unlocked value into the object's header.
 295     obj->set_mark(unbiased_prototype);
 296   }
 297 
 298   // If requested, return information on which thread held the bias
 299   if (biased_locker != NULL) {
 300     *biased_locker = biased_thread;
 301   }
 302 }
 303 
 304 
 305 enum HeuristicsResult {
 306   HR_NOT_BIASED    = 1,
 307   HR_SINGLE_REVOKE = 2,
 308   HR_BULK_REBIAS   = 3,
 309   HR_BULK_REVOKE   = 4
 310 };
 311 
 312 
 313 static HeuristicsResult update_heuristics(oop o) {
 314   markWord mark = o->mark();
 315   if (!mark.has_bias_pattern()) {
 316     return HR_NOT_BIASED;
 317   }
 318 
 319   // Heuristics to attempt to throttle the number of revocations.
 320   // Stages:
 321   // 1. Revoke the biases of all objects in the heap of this type,
 322   //    but allow rebiasing of those objects if unlocked.
 323   // 2. Revoke the biases of all objects in the heap of this type
 324   //    and don't allow rebiasing of these objects. Disable
 325   //    allocation of objects of that type with the bias bit set.
 326   Klass* k = o->klass();
 327   jlong cur_time = nanos_to_millis(os::javaTimeNanos());
 328   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
 329   int revocation_count = k->biased_lock_revocation_count();
 330   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
 331       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
 332       (last_bulk_revocation_time != 0) &&
 333       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
 334     // This is the first revocation we've seen in a while of an
 335     // object of this type since the last time we performed a bulk
 336     // rebiasing operation. The application is allocating objects in
 337     // bulk which are biased toward a thread and then handing them
 338     // off to another thread. We can cope with this allocation
 339     // pattern via the bulk rebiasing mechanism so we reset the
 340     // klass's revocation count rather than allow it to increase
 341     // monotonically. If we see the need to perform another bulk
 342     // rebias operation later, we will, and if subsequently we see
 343     // many more revocation operations in a short period of time we
 344     // will completely disable biasing for this type.
 345     k->set_biased_lock_revocation_count(0);
 346     revocation_count = 0;
 347   }
 348 
 349   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
 350   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
 351     revocation_count = k->atomic_incr_biased_lock_revocation_count();
 352   }
 353 
 354   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
 355     return HR_BULK_REVOKE;
 356   }
 357 
 358   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
 359     return HR_BULK_REBIAS;
 360   }
 361 
 362   return HR_SINGLE_REVOKE;
 363 }
 364 
 365 
 366 void BiasedLocking::bulk_revoke_at_safepoint(oop o, bool bulk_rebias, JavaThread* requesting_thread) {
 367   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 368   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 369 
 370   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
 371                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
 372                           (bulk_rebias ? "rebias" : "revoke"),
 373                           p2i((void *) o),
 374                           o->mark().value(),
 375                           o->klass()->external_name());
 376 
 377   jlong cur_time = nanos_to_millis(os::javaTimeNanos());
 378   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
 379 
 380   Klass* k_o = o->klass();
 381   Klass* klass = k_o;
 382 
 383   {
 384     JavaThreadIteratorWithHandle jtiwh;
 385 
 386     if (bulk_rebias) {
 387       // Use the epoch in the klass of the object to implicitly revoke
 388       // all biases of objects of this data type and force them to be
 389       // reacquired. However, we also need to walk the stacks of all
 390       // threads and update the headers of lightweight locked objects
 391       // with biases to have the current epoch.
 392 
 393       // If the prototype header doesn't have the bias pattern, don't
 394       // try to update the epoch -- assume another VM operation came in
 395       // and reset the header to the unbiased state, which will
 396       // implicitly cause all existing biases to be revoked
 397       if (klass->prototype_header().has_bias_pattern()) {
 398         int prev_epoch = klass->prototype_header().bias_epoch();
 399         klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
 400         int cur_epoch = klass->prototype_header().bias_epoch();
 401 
 402         // Now walk all threads' stacks and adjust epochs of any biased
 403         // and locked objects of this data type we encounter
 404         for (; JavaThread *thr = jtiwh.next(); ) {
 405           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 406           for (int i = 0; i < cached_monitor_info->length(); i++) {
 407             MonitorInfo* mon_info = cached_monitor_info->at(i);
 408             oop owner = mon_info->owner();
 409             markWord mark = owner->mark();
 410             if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
 411               // We might have encountered this object already in the case of recursive locking
 412               assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
 413               owner->set_mark(mark.set_bias_epoch(cur_epoch));
 414             }
 415           }
 416         }
 417       }
 418 
 419       // At this point we're done. All we have to do is potentially
 420       // adjust the header of the given object to revoke its bias.
 421       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
 422     } else {
 423       if (log_is_enabled(Info, biasedlocking)) {
 424         ResourceMark rm;
 425         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
 426       }
 427 
 428       // Disable biased locking for this data type. Not only will this
 429       // cause future instances to not be biased, but existing biased
 430       // instances will notice that this implicitly caused their biases
 431       // to be revoked.
 432       klass->set_prototype_header(markWord::prototype());
 433 
 434       // Now walk all threads' stacks and forcibly revoke the biases of
 435       // any locked and biased objects of this data type we encounter.
 436       for (; JavaThread *thr = jtiwh.next(); ) {
 437         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 438         for (int i = 0; i < cached_monitor_info->length(); i++) {
 439           MonitorInfo* mon_info = cached_monitor_info->at(i);
 440           oop owner = mon_info->owner();
 441           markWord mark = owner->mark();
 442           if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
 443             single_revoke_at_safepoint(owner, true, requesting_thread, NULL);
 444           }
 445         }
 446       }
 447 
 448       // Must force the bias of the passed object to be forcibly revoked
 449       // as well to ensure guarantees to callers
 450       single_revoke_at_safepoint(o, true, requesting_thread, NULL);
 451     }
 452   } // ThreadsListHandle is destroyed here.
 453 
 454   log_info(biasedlocking)("* Ending bulk revocation");
 455 
 456   assert(!o->mark().has_bias_pattern(), "bug in bulk bias revocation");
 457 }
 458 
 459 
 460 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
 461   if (thread != NULL) {
 462     thread->set_cached_monitor_info(NULL);
 463   } else {
 464     // Walk the thread list clearing out the cached monitors
 465     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
 466       thr->set_cached_monitor_info(NULL);
 467     }
 468   }
 469 }
 470 
 471 
 472 class VM_BulkRevokeBias : public VM_Operation {
 473 private:
 474   Handle* _obj;
 475   JavaThread* _requesting_thread;
 476   bool _bulk_rebias;
 477   uint64_t _safepoint_id;
 478 
 479 public:
 480   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
 481                     bool bulk_rebias)
 482     : _obj(obj)
 483     , _requesting_thread(requesting_thread)
 484     , _bulk_rebias(bulk_rebias)
 485     , _safepoint_id(0) {}
 486 
 487   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
 488 
 489   virtual void doit() {
 490     BiasedLocking::bulk_revoke_at_safepoint((*_obj)(), _bulk_rebias, _requesting_thread);
 491     _safepoint_id = SafepointSynchronize::safepoint_id();
 492     clean_up_cached_monitor_info();
 493   }
 494 
 495   bool is_bulk_rebias() const {
 496     return _bulk_rebias;
 497   }
 498 
 499   uint64_t safepoint_id() const {
 500     return _safepoint_id;
 501   }
 502 };
 503 
 504 
 505 class RevokeOneBias : public HandshakeClosure {
 506 protected:
 507   Handle _obj;
 508   JavaThread* _requesting_thread;
 509   JavaThread* _biased_locker;
 510   BiasedLocking::Condition _status_code;
 511   traceid _biased_locker_id;
 512 
 513 public:
 514   RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
 515     : HandshakeClosure("RevokeOneBias")
 516     , _obj(obj)
 517     , _requesting_thread(requesting_thread)
 518     , _biased_locker(biased_locker)
 519     , _status_code(BiasedLocking::NOT_BIASED)
 520     , _biased_locker_id(0) {}
 521 
 522   void do_thread(Thread* target) {
 523     assert(target == _biased_locker, "Wrong thread");
 524 
 525     oop o = _obj();
 526     markWord mark = o->mark();
 527 
 528     if (!mark.has_bias_pattern()) {
 529       return;
 530     }
 531 
 532     markWord prototype = o->klass()->prototype_header();
 533     if (!prototype.has_bias_pattern()) {
 534       // This object has a stale bias from before the handshake
 535       // was requested. If we fail this race, the object's bias
 536       // has been revoked by another thread so we simply return.
 537       markWord biased_value = mark;
 538       mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 539       assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
 540       if (biased_value == mark) {
 541         _status_code = BiasedLocking::BIAS_REVOKED;
 542       }
 543       return;
 544     }
 545 
 546     if (_biased_locker == mark.biased_locker()) {
 547       if (mark.bias_epoch() == prototype.bias_epoch()) {
 548         // Epoch is still valid. This means biaser could be currently
 549         // synchronized on this object. We must walk its stack looking
 550         // for monitor records associated with this object and change
 551         // them to be stack locks if any are found.
 552         ResourceMark rm;
 553         BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
 554         _biased_locker->set_cached_monitor_info(NULL);
 555         assert(!o->mark().has_bias_pattern(), "invariant");
 556         _biased_locker_id = JFR_THREAD_ID(_biased_locker);
 557         _status_code = BiasedLocking::BIAS_REVOKED;
 558         return;
 559       } else {
 560         markWord biased_value = mark;
 561         mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 562         if (mark == biased_value || !mark.has_bias_pattern()) {
 563           assert(!o->mark().has_bias_pattern(), "should be revoked");
 564           _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
 565           return;
 566         }
 567       }
 568     }
 569 
 570     _status_code = BiasedLocking::NOT_REVOKED;
 571   }
 572 
 573   BiasedLocking::Condition status_code() const {
 574     return _status_code;
 575   }
 576 
 577   traceid biased_locker() const {
 578     return _biased_locker_id;
 579   }
 580 };
 581 
 582 
 583 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
 584   assert(event != NULL, "invariant");
 585   assert(k != NULL, "invariant");
 586   assert(event->should_commit(), "invariant");
 587   event->set_lockClass(k);
 588   event->commit();
 589 }
 590 
 591 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) {
 592   assert(event != NULL, "invariant");
 593   assert(k != NULL, "invariant");
 594   assert(op != NULL, "invariant");
 595   assert(event->should_commit(), "invariant");
 596   event->set_lockClass(k);
 597   event->set_safepointId(0);
 598   event->set_previousOwner(op->biased_locker());
 599   event->commit();
 600 }
 601 
 602 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
 603   assert(event != NULL, "invariant");
 604   assert(k != NULL, "invariant");
 605   assert(op != NULL, "invariant");
 606   assert(event->should_commit(), "invariant");
 607   event->set_revokedClass(k);
 608   event->set_disableBiasing(!op->is_bulk_rebias());
 609   event->set_safepointId(op->safepoint_id());
 610   event->commit();
 611 }
 612 
 613 
 614 BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) {
 615 
 616   EventBiasedLockRevocation event;
 617   if (PrintBiasedLockingStatistics) {
 618     Atomic::inc(handshakes_count_addr());
 619   }
 620   log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread "
 621                                      INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester),
 622                                      p2i(biaser), p2i(obj()));
 623 
 624   RevokeOneBias revoke(obj, requester, biaser);
 625   bool executed = Handshake::execute(&revoke, biaser);
 626   if (revoke.status_code() == NOT_REVOKED) {
 627     return NOT_REVOKED;
 628   }
 629   if (executed) {
 630     log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked",
 631                                        p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already "));
 632     if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
 633       post_revocation_event(&event, obj->klass(), &revoke);
 634     }
 635     assert(!obj->mark().has_bias_pattern(), "invariant");
 636     return revoke.status_code();
 637   } else {
 638     // Thread was not alive.
 639     // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly
 640     // created JavaThread (that happens to get the same memory address as biaser) synchronizing
 641     // on this object.
 642     {
 643       MutexLocker ml(Threads_lock);
 644       markWord mark = obj->mark();
 645       // Check if somebody else was able to revoke it before biased thread exited.
 646       if (!mark.has_bias_pattern()) {
 647         return NOT_BIASED;
 648       }
 649       ThreadsListHandle tlh;
 650       markWord prototype = obj->klass()->prototype_header();
 651       if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
 652                                             prototype.bias_epoch() == mark.bias_epoch())) {
 653         obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 654         if (event.should_commit()) {
 655           post_revocation_event(&event, obj->klass(), &revoke);
 656         }
 657         assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
 658         return BIAS_REVOKED;
 659       }
 660     }
 661   }
 662 
 663   return NOT_REVOKED;
 664 }
 665 
 666 
 667 // Caller should have instantiated a ResourceMark object before calling this method
 668 void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) {
 669   assert(!SafepointSynchronize::is_at_safepoint() || !SafepointMechanism::uses_thread_local_poll(),
 670          "if SafepointMechanism::uses_thread_local_poll() is enabled this should always be executed outside safepoints");
 671   assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
 672 
 673   markWord mark = obj->mark();
 674   assert(mark.biased_locker() == biased_locker &&
 675          obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
 676 
 677   log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
 678                            INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 679                            ", biaser " INTPTR_FORMAT " %s",
 680                            Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
 681                            p2i(Thread::current()),
 682                            p2i(obj),
 683                            mark.value(),
 684                            obj->klass()->external_name(),
 685                            obj->klass()->prototype_header().value(),
 686                            p2i(biased_locker),
 687                            Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
 688 
 689   markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
 690 
 691   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
 692   BasicLock* highest_lock = NULL;
 693   for (int i = 0; i < cached_monitor_info->length(); i++) {
 694     MonitorInfo* mon_info = cached_monitor_info->at(i);
 695     if (mon_info->owner() == obj) {
 696       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 697                                p2i(mon_info->owner()),
 698                                p2i(obj));
 699       // Assume recursive case and fix up highest lock below
 700       markWord mark = markWord::encode((BasicLock*) NULL);
 701       highest_lock = mon_info->lock();
 702       highest_lock->set_displaced_header(mark);
 703     } else {
 704       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 705                                p2i(mon_info->owner()),
 706                                p2i(obj));
 707     }
 708   }
 709   if (highest_lock != NULL) {
 710     // Fix up highest lock to contain displaced header and point
 711     // object at it
 712     highest_lock->set_displaced_header(unbiased_prototype);
 713     // Reset object header to point to displaced mark.
 714     // Must release store the lock address for platforms without TSO
 715     // ordering (e.g. ppc).
 716     obj->release_set_mark(markWord::encode(highest_lock));
 717     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 718     log_info(biasedlocking)("  Revoked bias of currently-locked object");
 719   } else {
 720     log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 721     // Store the unlocked value into the object's header.
 722     obj->set_mark(unbiased_prototype);
 723   }
 724 
 725   assert(!obj->mark().has_bias_pattern(), "must not be biased");
 726 }
 727 
 728 void BiasedLocking::revoke_own_lock(Handle obj, TRAPS) {
 729   assert(THREAD->is_Java_thread(), "must be called by a JavaThread");
 730   JavaThread* thread = (JavaThread*)THREAD;
 731 
 732   markWord mark = obj->mark();
 733 
 734   if (!mark.has_bias_pattern()) {
 735     return;
 736   }
 737 
 738   Klass *k = obj->klass();
 739   assert(mark.biased_locker() == thread &&
 740          k->prototype_header().bias_epoch() == mark.bias_epoch(), "Revoke failed, unhandled biased lock state");
 741   ResourceMark rm;
 742   log_info(biasedlocking)("Revoking bias by walking my own stack:");
 743   EventBiasedLockSelfRevocation event;
 744   BiasedLocking::walk_stack_and_revoke(obj(), (JavaThread*) thread);
 745   thread->set_cached_monitor_info(NULL);
 746   assert(!obj->mark().has_bias_pattern(), "invariant");
 747   if (event.should_commit()) {
 748     post_self_revocation_event(&event, k);
 749   }
 750 }
 751 
 752 void BiasedLocking::revoke(Handle obj, TRAPS) {
 753   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 754 
 755   while (true) {
 756     // We can revoke the biases of anonymously-biased objects
 757     // efficiently enough that we should not cause these revocations to
 758     // update the heuristics because doing so may cause unwanted bulk
 759     // revocations (which are expensive) to occur.
 760     markWord mark = obj->mark();
 761 
 762     if (!mark.has_bias_pattern()) {
 763       return;
 764     }
 765 
 766     if (mark.is_biased_anonymously()) {
 767       // We are probably trying to revoke the bias of this object due to
 768       // an identity hash code computation. Try to revoke the bias
 769       // without a safepoint. This is possible if we can successfully
 770       // compare-and-exchange an unbiased header into the mark word of
 771       // the object, meaning that no other thread has raced to acquire
 772       // the bias of the object.
 773       markWord biased_value       = mark;
 774       markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
 775       markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 776       if (res_mark == biased_value) {
 777         return;
 778       }
 779       mark = res_mark;  // Refresh mark with the latest value.
 780     } else {
 781       Klass* k = obj->klass();
 782       markWord prototype_header = k->prototype_header();
 783       if (!prototype_header.has_bias_pattern()) {
 784         // This object has a stale bias from before the bulk revocation
 785         // for this data type occurred. It's pointless to update the
 786         // heuristics at this point so simply update the header with a
 787         // CAS. If we fail this race, the object's bias has been revoked
 788         // by another thread so we simply return and let the caller deal
 789         // with it.
 790         obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
 791         assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
 792         return;
 793       } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
 794         // The epoch of this biasing has expired indicating that the
 795         // object is effectively unbiased. We can revoke the bias of this
 796         // object efficiently enough with a CAS that we shouldn't update the
 797         // heuristics. This is normally done in the assembly code but we
 798         // can reach this point due to various points in the runtime
 799         // needing to revoke biases.
 800         markWord res_mark;
 801         markWord biased_value       = mark;
 802         markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
 803         res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 804         if (res_mark == biased_value) {
 805           return;
 806         }
 807         mark = res_mark;  // Refresh mark with the latest value.
 808       }
 809     }
 810 
 811     HeuristicsResult heuristics = update_heuristics(obj());
 812     if (heuristics == HR_NOT_BIASED) {
 813       return;
 814     } else if (heuristics == HR_SINGLE_REVOKE) {
 815       JavaThread *blt = mark.biased_locker();
 816       assert(blt != NULL, "invariant");
 817       if (blt == THREAD) {
 818         // A thread is trying to revoke the bias of an object biased
 819         // toward it, again likely due to an identity hash code
 820         // computation. We can again avoid a safepoint/handshake in this case
 821         // since we are only going to walk our own stack. There are no
 822         // races with revocations occurring in other threads because we
 823         // reach no safepoints in the revocation path.
 824         EventBiasedLockSelfRevocation event;
 825         ResourceMark rm;
 826         walk_stack_and_revoke(obj(), blt);
 827         blt->set_cached_monitor_info(NULL);
 828         assert(!obj->mark().has_bias_pattern(), "invariant");
 829         if (event.should_commit()) {
 830           post_self_revocation_event(&event, obj->klass());
 831         }
 832         return;
 833       } else {
 834         BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
 835         if (cond != NOT_REVOKED) {
 836           return;
 837         }
 838       }
 839     } else {
 840       assert((heuristics == HR_BULK_REVOKE) ||
 841          (heuristics == HR_BULK_REBIAS), "?");
 842       EventBiasedLockClassRevocation event;
 843       VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
 844                                     (heuristics == HR_BULK_REBIAS));
 845       VMThread::execute(&bulk_revoke);
 846       if (event.should_commit()) {
 847         post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
 848       }
 849       return;
 850     }
 851   }
 852 }
 853 
 854 // All objects in objs should be locked by biaser
 855 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
 856   bool clean_my_cache = false;
 857   for (int i = 0; i < objs->length(); i++) {
 858     oop obj = (objs->at(i))();
 859     markWord mark = obj->mark();
 860     if (mark.has_bias_pattern()) {
 861       walk_stack_and_revoke(obj, biaser);
 862       clean_my_cache = true;
 863     }
 864   }
 865   if (clean_my_cache) {
 866     clean_up_cached_monitor_info(biaser);
 867   }
 868 }
 869 
 870 
 871 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
 872   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 873   oop obj = h_obj();
 874   HeuristicsResult heuristics = update_heuristics(obj);
 875   if (heuristics == HR_SINGLE_REVOKE) {
 876     JavaThread* biased_locker = NULL;
 877     single_revoke_at_safepoint(obj, false, NULL, &biased_locker);
 878     if (biased_locker) {
 879       clean_up_cached_monitor_info(biased_locker);
 880     }
 881   } else if ((heuristics == HR_BULK_REBIAS) ||
 882              (heuristics == HR_BULK_REVOKE)) {
 883     bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
 884     clean_up_cached_monitor_info();
 885   }
 886 }
 887 
 888 
 889 void BiasedLocking::preserve_marks() {
 890   if (!UseBiasedLocking)
 891     return;
 892 
 893   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 894 
 895   assert(_preserved_oop_stack  == NULL, "double initialization");
 896   assert(_preserved_mark_stack == NULL, "double initialization");
 897 
 898   // In order to reduce the number of mark words preserved during GC
 899   // due to the presence of biased locking, we reinitialize most mark
 900   // words to the class's prototype during GC -- even those which have
 901   // a currently valid bias owner. One important situation where we
 902   // must not clobber a bias is when a biased object is currently
 903   // locked. To handle this case we iterate over the currently-locked
 904   // monitors in a prepass and, if they are biased, preserve their
 905   // mark words here. This should be a relatively small set of objects
 906   // especially compared to the number of objects in the heap.
 907   _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
 908   _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
 909 
 910   ResourceMark rm;
 911   Thread* cur = Thread::current();
 912   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 913     if (thread->has_last_Java_frame()) {
 914       RegisterMap rm(thread);
 915       for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 916         GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 917         if (monitors != NULL) {
 918           int len = monitors->length();
 919           // Walk monitors youngest to oldest
 920           for (int i = len - 1; i >= 0; i--) {
 921             MonitorInfo* mon_info = monitors->at(i);
 922             if (mon_info->owner_is_scalar_replaced()) continue;
 923             oop owner = mon_info->owner();
 924             if (owner != NULL) {
 925               markWord mark = owner->mark();
 926               if (mark.has_bias_pattern()) {
 927                 _preserved_oop_stack->push(Handle(cur, owner));
 928                 _preserved_mark_stack->push(mark);
 929               }
 930             }
 931           }
 932         }
 933       }
 934     }
 935   }
 936 }
 937 
 938 
 939 void BiasedLocking::restore_marks() {
 940   if (!UseBiasedLocking)
 941     return;
 942 
 943   assert(_preserved_oop_stack  != NULL, "double free");
 944   assert(_preserved_mark_stack != NULL, "double free");
 945 
 946   int len = _preserved_oop_stack->length();
 947   for (int i = 0; i < len; i++) {
 948     Handle owner = _preserved_oop_stack->at(i);
 949     markWord mark = _preserved_mark_stack->at(i);
 950     owner->set_mark(mark);
 951   }
 952 
 953   delete _preserved_oop_stack;
 954   _preserved_oop_stack = NULL;
 955   delete _preserved_mark_stack;
 956   _preserved_mark_stack = NULL;
 957 }
 958 
 959 
 960 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
 961 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
 962 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
 963 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
 964 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
 965 int* BiasedLocking::handshakes_count_addr()                    { return _counters.handshakes_count_addr(); }
 966 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
 967 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
 968 
 969 
 970 // BiasedLockingCounters
 971 
 972 int BiasedLockingCounters::slow_path_entry_count() const {
 973   if (_slow_path_entry_count != 0) {
 974     return _slow_path_entry_count;
 975   }
 976   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
 977             _rebiased_lock_entry_count + _revoked_lock_entry_count +
 978             _fast_path_entry_count;
 979 
 980   return _total_entry_count - sum;
 981 }
 982 
 983 void BiasedLockingCounters::print_on(outputStream* st) const {
 984   tty->print_cr("# total entries: %d", _total_entry_count);
 985   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
 986   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
 987   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
 988   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
 989   tty->print_cr("# handshakes entries: %d", _handshakes_count);
 990   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
 991   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
 992 }
 993 
 994 void BiasedLockingCounters::print() const { print_on(tty); }