1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "jfr/jfrEvents.hpp"
  28 #include "jfr/support/jfrThreadId.hpp"
  29 #include "logging/log.hpp"
  30 #include "memory/resourceArea.hpp"
  31 #include "oops/klass.inline.hpp"
  32 #include "oops/markWord.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "runtime/atomic.hpp"
  35 #include "runtime/basicLock.hpp"
  36 #include "runtime/biasedLocking.hpp"
  37 #include "runtime/handles.inline.hpp"
  38 #include "runtime/task.hpp"
  39 #include "runtime/threadSMR.hpp"
  40 #include "runtime/vframe.hpp"
  41 #include "runtime/vmThread.hpp"
  42 #include "runtime/vmOperations.hpp"
  43 
  44 
  45 static bool _biased_locking_enabled = false;
  46 BiasedLockingCounters BiasedLocking::_counters;
  47 
  48 static GrowableArray<Handle>*   _preserved_oop_stack  = NULL;
  49 static GrowableArray<markWord>* _preserved_mark_stack = NULL;
  50 
  51 static void enable_biased_locking(InstanceKlass* k) {
  52   k->set_prototype_header(markWord::biased_locking_prototype());
  53 }
  54 
  55 static void enable_biased_locking() {
  56   _biased_locking_enabled = true;
  57   log_info(biasedlocking)("Biased locking enabled");
  58 }
  59 
  60 class VM_EnableBiasedLocking: public VM_Operation {
  61  public:
  62   VM_EnableBiasedLocking() {}
  63   VMOp_Type type() const          { return VMOp_EnableBiasedLocking; }
  64   Mode evaluation_mode() const    { return _async_safepoint; }
  65   bool is_cheap_allocated() const { return true; }
  66 
  67   void doit() {
  68     // Iterate the class loader data dictionaries enabling biased locking for all
  69     // currently loaded classes.
  70     ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking);
  71     // Indicate that future instances should enable it as well
  72     enable_biased_locking();
  73   }
  74 
  75   bool allow_nested_vm_operations() const        { return false; }
  76 };
  77 
  78 
  79 // One-shot PeriodicTask subclass for enabling biased locking
  80 class EnableBiasedLockingTask : public PeriodicTask {
  81  public:
  82   EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
  83 
  84   virtual void task() {
  85     // Use async VM operation to avoid blocking the Watcher thread.
  86     // VM Thread will free C heap storage.
  87     VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking();
  88     VMThread::execute(op);
  89 
  90     // Reclaim our storage and disenroll ourself
  91     delete this;
  92   }
  93 };
  94 
  95 
  96 void BiasedLocking::init() {
  97   // If biased locking is enabled and BiasedLockingStartupDelay is set,
  98   // schedule a task to fire after the specified delay which turns on
  99   // biased locking for all currently loaded classes as well as future
 100   // ones. This could be a workaround for startup time regressions
 101   // due to large number of safepoints being taken during VM startup for
 102   // bias revocation.
 103   if (UseBiasedLocking) {
 104     if (BiasedLockingStartupDelay > 0) {
 105       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
 106       task->enroll();
 107     } else {
 108       enable_biased_locking();
 109     }
 110   }
 111 }
 112 
 113 
 114 bool BiasedLocking::enabled() {
 115   assert(UseBiasedLocking, "precondition");
 116   // We check "BiasedLockingStartupDelay == 0" here to cover the
 117   // possibility of calls to BiasedLocking::enabled() before
 118   // BiasedLocking::init().
 119   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
 120 }
 121 
 122 
 123 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
 124 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
 125   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
 126   if (info != NULL) {
 127     return info;
 128   }
 129 
 130   info = new GrowableArray<MonitorInfo*>();
 131 
 132   // It's possible for the thread to not have any Java frames on it,
 133   // i.e., if it's the main thread and it's already returned from main()
 134   if (thread->has_last_Java_frame()) {
 135     RegisterMap rm(thread);
 136     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 137       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 138       if (monitors != NULL) {
 139         int len = monitors->length();
 140         // Walk monitors youngest to oldest
 141         for (int i = len - 1; i >= 0; i--) {
 142           MonitorInfo* mon_info = monitors->at(i);
 143           if (mon_info->eliminated()) continue;
 144           oop owner = mon_info->owner();
 145           if (owner != NULL) {
 146             info->append(mon_info);
 147           }
 148         }
 149       }
 150     }
 151   }
 152 
 153   thread->set_cached_monitor_info(info);
 154   return info;
 155 }
 156 
 157 
 158 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
 159 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
 160 BiasedLocking::Condition BiasedLocking::single_revoke_at_safepoint(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
 161   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 162   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 163 
 164   markWord mark = obj->mark();
 165   if (!mark.has_bias_pattern()) {
 166     if (log_is_enabled(Info, biasedlocking)) {
 167       ResourceMark rm;
 168       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
 169                               ", mark " INTPTR_FORMAT ", type %s"
 170                               ", requesting thread " INTPTR_FORMAT
 171                               " because it's no longer biased)",
 172                               p2i((void *)obj), mark.value(),
 173                               obj->klass()->external_name(),
 174                               (intptr_t) requesting_thread);
 175     }
 176     return NOT_BIASED;
 177   }
 178 
 179   uint age = mark.age();
 180   markWord   biased_prototype = markWord::biased_locking_prototype().set_age(age);
 181   markWord unbiased_prototype = markWord::prototype().set_age(age);
 182 
 183   // Log at "info" level if not bulk, else "trace" level
 184   if (!is_bulk) {
 185     ResourceMark rm;
 186     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
 187                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 188                             ", allow rebias %d, requesting thread " INTPTR_FORMAT,
 189                             p2i((void *)obj),
 190                             mark.value(),
 191                             obj->klass()->external_name(),
 192                             obj->klass()->prototype_header().value(),
 193                             (allow_rebias ? 1 : 0),
 194                             (intptr_t) requesting_thread);
 195   } else {
 196     ResourceMark rm;
 197     log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
 198                              INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
 199                              " , allow rebias %d , requesting thread " INTPTR_FORMAT,
 200                              p2i((void *)obj),
 201                              mark.value(),
 202                              obj->klass()->external_name(),
 203                              obj->klass()->prototype_header().value(),
 204                              (allow_rebias ? 1 : 0),
 205                              (intptr_t) requesting_thread);
 206   }
 207 
 208   JavaThread* biased_thread = mark.biased_locker();
 209   if (biased_thread == NULL) {
 210     // Object is anonymously biased. We can get here if, for
 211     // example, we revoke the bias due to an identity hash code
 212     // being computed for an object.
 213     if (!allow_rebias) {
 214       obj->set_mark(unbiased_prototype);
 215     }
 216     // Log at "info" level if not bulk, else "trace" level
 217     if (!is_bulk) {
 218       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
 219     } else {
 220       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
 221     }
 222     return BIAS_REVOKED;
 223   }
 224 
 225   // Handle case where the thread toward which the object was biased has exited
 226   bool thread_is_alive = false;
 227   if (requesting_thread == biased_thread) {
 228     thread_is_alive = true;
 229   } else {
 230     ThreadsListHandle tlh;
 231     thread_is_alive = tlh.includes(biased_thread);
 232   }
 233   if (!thread_is_alive) {
 234     if (allow_rebias) {
 235       obj->set_mark(biased_prototype);
 236     } else {
 237       obj->set_mark(unbiased_prototype);
 238     }
 239     // Log at "info" level if not bulk, else "trace" level
 240     if (!is_bulk) {
 241       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 242                               PTR_FORMAT ")", p2i(biased_thread));
 243     } else {
 244       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 245                                PTR_FORMAT ")", p2i(biased_thread));
 246     }
 247     return BIAS_REVOKED;
 248   }
 249 
 250   // Log at "info" level if not bulk, else "trace" level
 251   if (!is_bulk) {
 252     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
 253                             PTR_FORMAT ")", p2i(biased_thread));
 254   } else {
 255     log_trace(biasedlocking)("  Revoked bias of object biased toward live thread ("
 256                                PTR_FORMAT ")", p2i(biased_thread));
 257   }
 258 
 259   // Thread owning bias is alive.
 260   // Check to see whether it currently owns the lock and, if so,
 261   // write down the needed displaced headers to the thread's stack.
 262   // Otherwise, restore the object's header either to the unlocked
 263   // or unbiased state.
 264   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
 265   BasicLock* highest_lock = NULL;
 266   for (int i = 0; i < cached_monitor_info->length(); i++) {
 267     MonitorInfo* mon_info = cached_monitor_info->at(i);
 268     if (oopDesc::equals(mon_info->owner(), obj)) {
 269       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 270                                p2i((void *) mon_info->owner()),
 271                                p2i((void *) obj));
 272       // Assume recursive case and fix up highest lock below
 273       markWord mark = markWord::encode((BasicLock*) NULL);
 274       highest_lock = mon_info->lock();
 275       highest_lock->set_displaced_header(mark);
 276     } else {
 277       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 278                                p2i((void *) mon_info->owner()),
 279                                p2i((void *) obj));
 280     }
 281   }
 282   if (highest_lock != NULL) {
 283     // Fix up highest lock to contain displaced header and point
 284     // object at it
 285     highest_lock->set_displaced_header(unbiased_prototype);
 286     // Reset object header to point to displaced mark.
 287     // Must release store the lock address for platforms without TSO
 288     // ordering (e.g. ppc).
 289     obj->release_set_mark(markWord::encode(highest_lock));
 290     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 291     // Log at "info" level if not bulk, else "trace" level
 292     if (!is_bulk) {
 293       log_info(biasedlocking)("  Revoked bias of currently-locked object");
 294     } else {
 295       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
 296     }
 297   } else {
 298     // Log at "info" level if not bulk, else "trace" level
 299     if (!is_bulk) {
 300       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 301     } else {
 302       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
 303     }
 304     if (allow_rebias) {
 305       obj->set_mark(biased_prototype);
 306     } else {
 307       // Store the unlocked value into the object's header.
 308       obj->set_mark(unbiased_prototype);
 309     }
 310   }
 311 
 312   // If requested, return information on which thread held the bias
 313   if (biased_locker != NULL) {
 314     *biased_locker = biased_thread;
 315   }
 316 
 317   return BIAS_REVOKED;
 318 }
 319 
 320 
 321 enum HeuristicsResult {
 322   HR_NOT_BIASED    = 1,
 323   HR_SINGLE_REVOKE = 2,
 324   HR_BULK_REBIAS   = 3,
 325   HR_BULK_REVOKE   = 4
 326 };
 327 
 328 
 329 static HeuristicsResult update_heuristics(oop o) {
 330   markWord mark = o->mark();
 331   if (!mark.has_bias_pattern()) {
 332     return HR_NOT_BIASED;
 333   }
 334 
 335   // Heuristics to attempt to throttle the number of revocations.
 336   // Stages:
 337   // 1. Revoke the biases of all objects in the heap of this type,
 338   //    but allow rebiasing of those objects if unlocked.
 339   // 2. Revoke the biases of all objects in the heap of this type
 340   //    and don't allow rebiasing of these objects. Disable
 341   //    allocation of objects of that type with the bias bit set.
 342   Klass* k = o->klass();
 343   jlong cur_time = os::javaTimeMillis();
 344   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
 345   int revocation_count = k->biased_lock_revocation_count();
 346   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
 347       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
 348       (last_bulk_revocation_time != 0) &&
 349       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {
 350     // This is the first revocation we've seen in a while of an
 351     // object of this type since the last time we performed a bulk
 352     // rebiasing operation. The application is allocating objects in
 353     // bulk which are biased toward a thread and then handing them
 354     // off to another thread. We can cope with this allocation
 355     // pattern via the bulk rebiasing mechanism so we reset the
 356     // klass's revocation count rather than allow it to increase
 357     // monotonically. If we see the need to perform another bulk
 358     // rebias operation later, we will, and if subsequently we see
 359     // many more revocation operations in a short period of time we
 360     // will completely disable biasing for this type.
 361     k->set_biased_lock_revocation_count(0);
 362     revocation_count = 0;
 363   }
 364 
 365   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
 366   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
 367     revocation_count = k->atomic_incr_biased_lock_revocation_count();
 368   }
 369 
 370   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
 371     return HR_BULK_REVOKE;
 372   }
 373 
 374   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
 375     return HR_BULK_REBIAS;
 376   }
 377 
 378   return HR_SINGLE_REVOKE;
 379 }
 380 
 381 
 382 BiasedLocking::Condition BiasedLocking::bulk_revoke_or_rebias_at_safepoint(oop o,
 383                                                                    bool bulk_rebias,
 384                                                                    bool attempt_rebias_of_object,
 385                                                                    JavaThread* requesting_thread) {
 386   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 387   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 388 
 389   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
 390                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
 391                           (bulk_rebias ? "rebias" : "revoke"),
 392                           p2i((void *) o),
 393                           o->mark().value(),
 394                           o->klass()->external_name());
 395 
 396   jlong cur_time = os::javaTimeMillis();
 397   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
 398 
 399   Klass* k_o = o->klass();
 400   Klass* klass = k_o;
 401 
 402   {
 403     JavaThreadIteratorWithHandle jtiwh;
 404 
 405     if (bulk_rebias) {
 406       // Use the epoch in the klass of the object to implicitly revoke
 407       // all biases of objects of this data type and force them to be
 408       // reacquired. However, we also need to walk the stacks of all
 409       // threads and update the headers of lightweight locked objects
 410       // with biases to have the current epoch.
 411 
 412       // If the prototype header doesn't have the bias pattern, don't
 413       // try to update the epoch -- assume another VM operation came in
 414       // and reset the header to the unbiased state, which will
 415       // implicitly cause all existing biases to be revoked
 416       if (klass->prototype_header().has_bias_pattern()) {
 417         int prev_epoch = klass->prototype_header().bias_epoch();
 418         klass->set_prototype_header(klass->prototype_header().incr_bias_epoch());
 419         int cur_epoch = klass->prototype_header().bias_epoch();
 420 
 421         // Now walk all threads' stacks and adjust epochs of any biased
 422         // and locked objects of this data type we encounter
 423         for (; JavaThread *thr = jtiwh.next(); ) {
 424           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 425           for (int i = 0; i < cached_monitor_info->length(); i++) {
 426             MonitorInfo* mon_info = cached_monitor_info->at(i);
 427             oop owner = mon_info->owner();
 428             markWord mark = owner->mark();
 429             if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
 430               // We might have encountered this object already in the case of recursive locking
 431               assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
 432               owner->set_mark(mark.set_bias_epoch(cur_epoch));
 433             }
 434           }
 435         }
 436       }
 437 
 438       // At this point we're done. All we have to do is potentially
 439       // adjust the header of the given object to revoke its bias.
 440       single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header().has_bias_pattern(), true, requesting_thread, NULL);
 441     } else {
 442       if (log_is_enabled(Info, biasedlocking)) {
 443         ResourceMark rm;
 444         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
 445       }
 446 
 447       // Disable biased locking for this data type. Not only will this
 448       // cause future instances to not be biased, but existing biased
 449       // instances will notice that this implicitly caused their biases
 450       // to be revoked.
 451       klass->set_prototype_header(markWord::prototype());
 452 
 453       // Now walk all threads' stacks and forcibly revoke the biases of
 454       // any locked and biased objects of this data type we encounter.
 455       for (; JavaThread *thr = jtiwh.next(); ) {
 456         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 457         for (int i = 0; i < cached_monitor_info->length(); i++) {
 458           MonitorInfo* mon_info = cached_monitor_info->at(i);
 459           oop owner = mon_info->owner();
 460           markWord mark = owner->mark();
 461           if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
 462             single_revoke_at_safepoint(owner, false, true, requesting_thread, NULL);
 463           }
 464         }
 465       }
 466 
 467       // Must force the bias of the passed object to be forcibly revoked
 468       // as well to ensure guarantees to callers
 469       single_revoke_at_safepoint(o, false, true, requesting_thread, NULL);
 470     }
 471   } // ThreadsListHandle is destroyed here.
 472 
 473   log_info(biasedlocking)("* Ending bulk revocation");
 474 
 475   BiasedLocking::Condition status_code = BIAS_REVOKED;
 476 
 477   if (attempt_rebias_of_object &&
 478       o->mark().has_bias_pattern() &&
 479       klass->prototype_header().has_bias_pattern()) {
 480     markWord new_mark = markWord::encode(requesting_thread, o->mark().age(),
 481                                          klass->prototype_header().bias_epoch());
 482     o->set_mark(new_mark);
 483     status_code = BIAS_REVOKED_AND_REBIASED;
 484     log_info(biasedlocking)("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
 485   }
 486 
 487   assert(!o->mark().has_bias_pattern() ||
 488          (attempt_rebias_of_object && (o->mark().biased_locker() == requesting_thread)),
 489          "bug in bulk bias revocation");
 490 
 491   return status_code;
 492 }
 493 
 494 
 495 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
 496   if (thread != NULL) {
 497     thread->set_cached_monitor_info(NULL);
 498   } else {
 499     // Walk the thread list clearing out the cached monitors
 500     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
 501       thr->set_cached_monitor_info(NULL);
 502     }
 503   }
 504 }
 505 
 506 
 507 class VM_BulkRevokeBias : public VM_Operation {
 508 private:
 509   Handle* _obj;
 510   JavaThread* _requesting_thread;
 511   bool _bulk_rebias;
 512   bool _attempt_rebias_of_object;
 513   BiasedLocking::Condition _status_code;
 514   uint64_t _safepoint_id;
 515 
 516 public:
 517   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
 518                     bool bulk_rebias,
 519                     bool attempt_rebias_of_object)
 520     : _obj(obj)
 521     , _requesting_thread(requesting_thread)
 522     , _bulk_rebias(bulk_rebias)
 523     , _attempt_rebias_of_object(attempt_rebias_of_object)
 524     , _status_code(BiasedLocking::NOT_BIASED)
 525     , _safepoint_id(0) {}
 526 
 527   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
 528 
 529   virtual void doit() {
 530     _status_code = BiasedLocking::bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
 531     _safepoint_id = SafepointSynchronize::safepoint_id();
 532     clean_up_cached_monitor_info();
 533   }
 534 
 535   bool is_bulk_rebias() const {
 536     return _bulk_rebias;
 537   }
 538 
 539   BiasedLocking::Condition status_code() const {
 540     return _status_code;
 541   }
 542 
 543   uint64_t safepoint_id() const {
 544     return _safepoint_id;
 545   }
 546 };
 547 
 548 
 549 class RevokeOneBias : public ThreadClosure {
 550 protected:
 551   Handle _obj;
 552   JavaThread* _requesting_thread;
 553   JavaThread* _biased_locker;
 554   BiasedLocking::Condition _status_code;
 555   traceid _biased_locker_id;
 556 
 557 public:
 558   RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
 559     : _obj(obj)
 560     , _requesting_thread(requesting_thread)
 561     , _biased_locker(biased_locker)
 562     , _status_code(BiasedLocking::NOT_BIASED)
 563     , _biased_locker_id(0) {}
 564 
 565   void do_thread(Thread* target) {
 566     assert(target == _biased_locker, "Wrong thread");
 567 
 568     oop o = _obj();
 569     markWord mark = o->mark();
 570 
 571     if (!mark.has_bias_pattern()) {
 572       return;
 573     }
 574 
 575     markWord prototype = o->klass()->prototype_header();
 576     if (!prototype.has_bias_pattern()) {
 577       // This object has a stale bias from before the handshake
 578       // was requested. If we fail this race, the object's bias
 579       // has been revoked by another thread so we simply return.
 580       markWord biased_value = mark;
 581       mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 582       assert(!o->mark().has_bias_pattern(), "even if we raced, should still be revoked");
 583       if (biased_value == mark) {
 584         _status_code = BiasedLocking::BIAS_REVOKED;
 585       }
 586       return;
 587     }
 588 
 589     if (_biased_locker == mark.biased_locker()) {
 590       if (mark.bias_epoch() == prototype.bias_epoch()) {
 591         // Epoch is still valid. This means biaser could be currently
 592         // synchronized on this object. We must walk its stack looking
 593         // for monitor records associated with this object and change
 594         // them to be stack locks if any are found.
 595         ResourceMark rm;
 596         BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
 597         _biased_locker->set_cached_monitor_info(NULL);
 598         assert(!o->mark().has_bias_pattern(), "invariant");
 599         _biased_locker_id = JFR_THREAD_ID(_biased_locker);
 600         _status_code = BiasedLocking::BIAS_REVOKED;
 601         return;
 602       } else {
 603         markWord biased_value = mark;
 604         mark = o->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 605         if (mark == biased_value || !mark.has_bias_pattern()) {
 606           assert(!o->mark().has_bias_pattern(), "should be revoked");
 607           _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
 608           return;
 609         }
 610       }
 611     }
 612 
 613     _status_code = BiasedLocking::NOT_REVOKED;
 614   }
 615 
 616   BiasedLocking::Condition status_code() const {
 617     return _status_code;
 618   }
 619 
 620   traceid biased_locker() const {
 621     return _biased_locker_id;
 622   }
 623 };
 624 
 625 
 626 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
 627   assert(event != NULL, "invariant");
 628   assert(k != NULL, "invariant");
 629   assert(event->should_commit(), "invariant");
 630   event->set_lockClass(k);
 631   event->commit();
 632 }
 633 
 634 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) {
 635   assert(event != NULL, "invariant");
 636   assert(k != NULL, "invariant");
 637   assert(op != NULL, "invariant");
 638   assert(event->should_commit(), "invariant");
 639   event->set_lockClass(k);
 640   event->set_safepointId(0);
 641   event->set_previousOwner(op->biased_locker());
 642   event->commit();
 643 }
 644 
 645 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
 646   assert(event != NULL, "invariant");
 647   assert(k != NULL, "invariant");
 648   assert(op != NULL, "invariant");
 649   assert(event->should_commit(), "invariant");
 650   event->set_revokedClass(k);
 651   event->set_disableBiasing(!op->is_bulk_rebias());
 652   event->set_safepointId(op->safepoint_id());
 653   event->commit();
 654 }
 655 
 656 
 657 BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) {
 658 
 659   EventBiasedLockRevocation event;
 660   if (PrintBiasedLockingStatistics) {
 661     Atomic::inc(handshakes_count_addr());
 662   }
 663   log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread "
 664                                      INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester),
 665                                      p2i(biaser), p2i(obj()));
 666 
 667   RevokeOneBias revoke(obj, requester, biaser);
 668   bool executed = Handshake::execute(&revoke, biaser);
 669   if (revoke.status_code() == NOT_REVOKED) {
 670     return NOT_REVOKED;
 671   }
 672   if (executed) {
 673     log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked",
 674                                        p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already "));
 675     if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
 676       post_revocation_event(&event, obj->klass(), &revoke);
 677     }
 678     assert(!obj->mark().has_bias_pattern(), "invariant");
 679     return revoke.status_code();
 680   } else {
 681     // Thread was not alive.
 682     // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly
 683     // created JavaThread (that happens to get the same memory address as biaser) synchronizing
 684     // on this object.
 685     {
 686       MutexLocker ml(Threads_lock);
 687       markWord mark = obj->mark();
 688       // Check if somebody else was able to revoke it before biased thread exited.
 689       if (!mark.has_bias_pattern()) {
 690         return NOT_BIASED;
 691       }
 692       ThreadsListHandle tlh;
 693       markWord prototype = obj->klass()->prototype_header();
 694       if (!prototype.has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark.biased_locker() &&
 695                                             prototype.bias_epoch() == mark.bias_epoch())) {
 696         obj->cas_set_mark(markWord::prototype().set_age(mark.age()), mark);
 697         if (event.should_commit()) {
 698           post_revocation_event(&event, obj->klass(), &revoke);
 699         }
 700         assert(!obj->mark().has_bias_pattern(), "bias should be revoked by now");
 701         return BIAS_REVOKED;
 702       }
 703     }
 704   }
 705 
 706   return NOT_REVOKED;
 707 }
 708 
 709 
 710 // Caller should have instantiated a ResourceMark object before calling this method
 711 void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) {
 712   assert(!SafepointSynchronize::is_at_safepoint() || !ThreadLocalHandshakes,
 713          "if ThreadLocalHandshakes is enabled this should always be executed outside safepoints");
 714   assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
 715 
 716   markWord mark = obj->mark();
 717   assert(mark.biased_locker() == biased_locker &&
 718          obj->klass()->prototype_header().bias_epoch() == mark.bias_epoch(), "invariant");
 719 
 720   log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
 721                            INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 722                            ", biaser " INTPTR_FORMAT " %s",
 723                            Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
 724                            p2i(Thread::current()),
 725                            p2i(obj),
 726                            mark.value(),
 727                            obj->klass()->external_name(),
 728                            obj->klass()->prototype_header().value(),
 729                            p2i(biased_locker),
 730                            Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
 731 
 732   markWord unbiased_prototype = markWord::prototype().set_age(obj->mark().age());
 733 
 734   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
 735   BasicLock* highest_lock = NULL;
 736   for (int i = 0; i < cached_monitor_info->length(); i++) {
 737     MonitorInfo* mon_info = cached_monitor_info->at(i);
 738     if (oopDesc::equals(mon_info->owner(), obj)) {
 739       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 740                                p2i(mon_info->owner()),
 741                                p2i(obj));
 742       // Assume recursive case and fix up highest lock below
 743       markWord mark = markWord::encode((BasicLock*) NULL);
 744       highest_lock = mon_info->lock();
 745       highest_lock->set_displaced_header(mark);
 746     } else {
 747       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 748                                p2i(mon_info->owner()),
 749                                p2i(obj));
 750     }
 751   }
 752   if (highest_lock != NULL) {
 753     // Fix up highest lock to contain displaced header and point
 754     // object at it
 755     highest_lock->set_displaced_header(unbiased_prototype);
 756     // Reset object header to point to displaced mark.
 757     // Must release store the lock address for platforms without TSO
 758     // ordering (e.g. ppc).
 759     obj->release_set_mark(markWord::encode(highest_lock));
 760     assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 761     log_info(biasedlocking)("  Revoked bias of currently-locked object");
 762   } else {
 763     log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 764     // Store the unlocked value into the object's header.
 765     obj->set_mark(unbiased_prototype);
 766   }
 767 
 768   assert(!obj->mark().has_bias_pattern(), "must not be biased");
 769 }
 770 
 771 
 772 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
 773   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 774 
 775   while (true) {
 776     // We can revoke the biases of anonymously-biased objects
 777     // efficiently enough that we should not cause these revocations to
 778     // update the heuristics because doing so may cause unwanted bulk
 779     // revocations (which are expensive) to occur.
 780     markWord mark = obj->mark();
 781     if (mark.is_biased_anonymously() && !attempt_rebias) {
 782       // We are probably trying to revoke the bias of this object due to
 783       // an identity hash code computation. Try to revoke the bias
 784       // without a safepoint. This is possible if we can successfully
 785       // compare-and-exchange an unbiased header into the mark word of
 786       // the object, meaning that no other thread has raced to acquire
 787       // the bias of the object.
 788       markWord biased_value       = mark;
 789       markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
 790       markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 791       if (res_mark == biased_value) {
 792         return BIAS_REVOKED;
 793       }
 794       mark = res_mark;  // Refresh mark with the latest value.
 795     } else if (mark.has_bias_pattern()) {
 796       Klass* k = obj->klass();
 797       markWord prototype_header = k->prototype_header();
 798       if (!prototype_header.has_bias_pattern()) {
 799         // This object has a stale bias from before the bulk revocation
 800         // for this data type occurred. It's pointless to update the
 801         // heuristics at this point so simply update the header with a
 802         // CAS. If we fail this race, the object's bias has been revoked
 803         // by another thread so we simply return and let the caller deal
 804         // with it.
 805         obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
 806         assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
 807         return BIAS_REVOKED;
 808       } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
 809         // The epoch of this biasing has expired indicating that the
 810         // object is effectively unbiased. Depending on whether we need
 811         // to rebias or revoke the bias of this object we can do it
 812         // efficiently enough with a CAS that we shouldn't update the
 813         // heuristics. This is normally done in the assembly code but we
 814         // can reach this point due to various points in the runtime
 815         // needing to revoke biases.
 816         markWord res_mark;
 817         if (attempt_rebias) {
 818           assert(THREAD->is_Java_thread(), "");
 819           markWord biased_value       = mark;
 820           markWord rebiased_prototype = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
 821           res_mark = obj->cas_set_mark(rebiased_prototype, mark);
 822           if (res_mark == biased_value) {
 823             return BIAS_REVOKED_AND_REBIASED;
 824           }
 825         } else {
 826           markWord biased_value       = mark;
 827           markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
 828           res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 829           if (res_mark == biased_value) {
 830             return BIAS_REVOKED;
 831           }
 832         }
 833         mark = res_mark;  // Refresh mark with the latest value.
 834       }
 835     }
 836 
 837     HeuristicsResult heuristics = update_heuristics(obj());
 838     if (heuristics == HR_NOT_BIASED) {
 839       return NOT_BIASED;
 840     } else if (heuristics == HR_SINGLE_REVOKE) {
 841       JavaThread *blt = mark.biased_locker();
 842       assert(blt != NULL, "invariant");
 843       if (blt == THREAD) {
 844         // A thread is trying to revoke the bias of an object biased
 845         // toward it, again likely due to an identity hash code
 846         // computation. We can again avoid a safepoint/handshake in this case
 847         // since we are only going to walk our own stack. There are no
 848         // races with revocations occurring in other threads because we
 849         // reach no safepoints in the revocation path.
 850         EventBiasedLockSelfRevocation event;
 851         ResourceMark rm;
 852         walk_stack_and_revoke(obj(), blt);
 853         blt->set_cached_monitor_info(NULL);
 854         assert(!obj->mark().has_bias_pattern(), "invariant");
 855         if (event.should_commit()) {
 856           post_self_revocation_event(&event, obj->klass());
 857         }
 858         return BIAS_REVOKED;
 859       } else {
 860         BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
 861         if (cond != NOT_REVOKED) {
 862           return cond;
 863         }
 864       }
 865     } else {
 866       assert((heuristics == HR_BULK_REVOKE) ||
 867          (heuristics == HR_BULK_REBIAS), "?");
 868       EventBiasedLockClassRevocation event;
 869       VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
 870                                     (heuristics == HR_BULK_REBIAS),
 871                                     attempt_rebias);
 872       VMThread::execute(&bulk_revoke);
 873       if (event.should_commit()) {
 874         post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
 875       }
 876       return bulk_revoke.status_code();
 877     }
 878   }
 879 }
 880 
 881 // All objects in objs should be locked by biaser
 882 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
 883   bool clean_my_cache = false;
 884   for (int i = 0; i < objs->length(); i++) {
 885     oop obj = (objs->at(i))();
 886     markWord mark = obj->mark();
 887     if (mark.has_bias_pattern()) {
 888       walk_stack_and_revoke(obj, biaser);
 889       clean_my_cache = true;
 890     }
 891   }
 892   if (clean_my_cache) {
 893     clean_up_cached_monitor_info(biaser);
 894   }
 895 }
 896 
 897 
 898 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
 899   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 900   oop obj = h_obj();
 901   HeuristicsResult heuristics = update_heuristics(obj);
 902   if (heuristics == HR_SINGLE_REVOKE) {
 903     JavaThread* biased_locker = NULL;
 904     single_revoke_at_safepoint(obj, false, false, NULL, &biased_locker);
 905     if (biased_locker) {
 906       clean_up_cached_monitor_info(biased_locker);
 907     }
 908   } else if ((heuristics == HR_BULK_REBIAS) ||
 909              (heuristics == HR_BULK_REVOKE)) {
 910     bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
 911     clean_up_cached_monitor_info();
 912   }
 913 }
 914 
 915 
 916 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
 917   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 918   int len = objs->length();
 919   for (int i = 0; i < len; i++) {
 920     oop obj = (objs->at(i))();
 921     HeuristicsResult heuristics = update_heuristics(obj);
 922     if (heuristics == HR_SINGLE_REVOKE) {
 923       single_revoke_at_safepoint(obj, false, false, NULL, NULL);
 924     } else if ((heuristics == HR_BULK_REBIAS) ||
 925                (heuristics == HR_BULK_REVOKE)) {
 926       bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
 927     }
 928   }
 929   clean_up_cached_monitor_info();
 930 }
 931 
 932 
 933 void BiasedLocking::preserve_marks() {
 934   if (!UseBiasedLocking)
 935     return;
 936 
 937   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 938 
 939   assert(_preserved_oop_stack  == NULL, "double initialization");
 940   assert(_preserved_mark_stack == NULL, "double initialization");
 941 
 942   // In order to reduce the number of mark words preserved during GC
 943   // due to the presence of biased locking, we reinitialize most mark
 944   // words to the class's prototype during GC -- even those which have
 945   // a currently valid bias owner. One important situation where we
 946   // must not clobber a bias is when a biased object is currently
 947   // locked. To handle this case we iterate over the currently-locked
 948   // monitors in a prepass and, if they are biased, preserve their
 949   // mark words here. This should be a relatively small set of objects
 950   // especially compared to the number of objects in the heap.
 951   _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markWord>(10, true);
 952   _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
 953 
 954   ResourceMark rm;
 955   Thread* cur = Thread::current();
 956   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
 957     if (thread->has_last_Java_frame()) {
 958       RegisterMap rm(thread);
 959       for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 960         GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 961         if (monitors != NULL) {
 962           int len = monitors->length();
 963           // Walk monitors youngest to oldest
 964           for (int i = len - 1; i >= 0; i--) {
 965             MonitorInfo* mon_info = monitors->at(i);
 966             if (mon_info->owner_is_scalar_replaced()) continue;
 967             oop owner = mon_info->owner();
 968             if (owner != NULL) {
 969               markWord mark = owner->mark();
 970               if (mark.has_bias_pattern()) {
 971                 _preserved_oop_stack->push(Handle(cur, owner));
 972                 _preserved_mark_stack->push(mark);
 973               }
 974             }
 975           }
 976         }
 977       }
 978     }
 979   }
 980 }
 981 
 982 
 983 void BiasedLocking::restore_marks() {
 984   if (!UseBiasedLocking)
 985     return;
 986 
 987   assert(_preserved_oop_stack  != NULL, "double free");
 988   assert(_preserved_mark_stack != NULL, "double free");
 989 
 990   int len = _preserved_oop_stack->length();
 991   for (int i = 0; i < len; i++) {
 992     Handle owner = _preserved_oop_stack->at(i);
 993     markWord mark = _preserved_mark_stack->at(i);
 994     owner->set_mark(mark);
 995   }
 996 
 997   delete _preserved_oop_stack;
 998   _preserved_oop_stack = NULL;
 999   delete _preserved_mark_stack;
1000   _preserved_mark_stack = NULL;
1001 }
1002 
1003 
1004 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
1005 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
1006 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
1007 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
1008 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
1009 int* BiasedLocking::handshakes_count_addr()                    { return _counters.handshakes_count_addr(); }
1010 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
1011 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
1012 
1013 
1014 // BiasedLockingCounters
1015 
1016 int BiasedLockingCounters::slow_path_entry_count() const {
1017   if (_slow_path_entry_count != 0) {
1018     return _slow_path_entry_count;
1019   }
1020   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
1021             _rebiased_lock_entry_count + _revoked_lock_entry_count +
1022             _fast_path_entry_count;
1023 
1024   return _total_entry_count - sum;
1025 }
1026 
1027 void BiasedLockingCounters::print_on(outputStream* st) const {
1028   tty->print_cr("# total entries: %d", _total_entry_count);
1029   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
1030   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
1031   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
1032   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
1033   tty->print_cr("# handshakes entries: %d", _handshakes_count);
1034   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
1035   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
1036 }
1037 
1038 void BiasedLockingCounters::print() const { print_on(tty); }