< prev index next >

src/hotspot/share/runtime/biasedLocking.cpp

Print this page




 102   // bias revocation.
 103   if (UseBiasedLocking) {
 104     if (BiasedLockingStartupDelay > 0) {
 105       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
 106       task->enroll();
 107     } else {
 108       enable_biased_locking();
 109     }
 110   }
 111 }
 112 
 113 
 114 bool BiasedLocking::enabled() {
 115   assert(UseBiasedLocking, "precondition");
 116   // We check "BiasedLockingStartupDelay == 0" here to cover the
 117   // possibility of calls to BiasedLocking::enabled() before
 118   // BiasedLocking::init().
 119   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
 120 }
 121 

 122 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
 123 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
 124   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
 125   if (info != NULL) {
 126     return info;
 127   }
 128 
 129   info = new GrowableArray<MonitorInfo*>();
 130 
 131   // It's possible for the thread to not have any Java frames on it,
 132   // i.e., if it's the main thread and it's already returned from main()
 133   if (thread->has_last_Java_frame()) {
 134     RegisterMap rm(thread);
 135     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 136       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 137       if (monitors != NULL) {
 138         int len = monitors->length();
 139         // Walk monitors youngest to oldest
 140         for (int i = len - 1; i >= 0; i--) {
 141           MonitorInfo* mon_info = monitors->at(i);
 142           if (mon_info->eliminated()) continue;
 143           oop owner = mon_info->owner();
 144           if (owner != NULL) {
 145             info->append(mon_info);
 146           }
 147         }
 148       }
 149     }
 150   }
 151 
 152   thread->set_cached_monitor_info(info);
 153   return info;
 154 }
 155 

 156 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
 157 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
 158 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {



 159   markOop mark = obj->mark();
 160   if (!mark->has_bias_pattern()) {
 161     if (log_is_enabled(Info, biasedlocking)) {
 162       ResourceMark rm;
 163       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
 164                               ", mark " INTPTR_FORMAT ", type %s"
 165                               ", requesting thread " INTPTR_FORMAT
 166                               " because it's no longer biased)",
 167                               p2i((void *)obj), (intptr_t) mark,
 168                               obj->klass()->external_name(),
 169                               (intptr_t) requesting_thread);
 170     }
 171     return BiasedLocking::NOT_BIASED;
 172   }
 173 
 174   uint age = mark->age();
 175   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
 176   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
 177 
 178   // Log at "info" level if not bulk, else "trace" level
 179   if (!is_bulk) {
 180     ResourceMark rm;
 181     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
 182                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 183                             ", allow rebias %d, requesting thread " INTPTR_FORMAT,
 184                             p2i((void *)obj),
 185                             (intptr_t) mark,
 186                             obj->klass()->external_name(),
 187                             (intptr_t) obj->klass()->prototype_header(),
 188                             (allow_rebias ? 1 : 0),
 189                             (intptr_t) requesting_thread);
 190   } else {
 191     ResourceMark rm;


 197                              obj->klass()->external_name(),
 198                              (intptr_t) obj->klass()->prototype_header(),
 199                              (allow_rebias ? 1 : 0),
 200                              (intptr_t) requesting_thread);
 201   }
 202 
 203   JavaThread* biased_thread = mark->biased_locker();
 204   if (biased_thread == NULL) {
 205     // Object is anonymously biased. We can get here if, for
 206     // example, we revoke the bias due to an identity hash code
 207     // being computed for an object.
 208     if (!allow_rebias) {
 209       obj->set_mark(unbiased_prototype);
 210     }
 211     // Log at "info" level if not bulk, else "trace" level
 212     if (!is_bulk) {
 213       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
 214     } else {
 215       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
 216     }
 217     return BiasedLocking::BIAS_REVOKED;
 218   }
 219 
 220   // Handle case where the thread toward which the object was biased has exited
 221   bool thread_is_alive = false;
 222   if (requesting_thread == biased_thread) {
 223     thread_is_alive = true;
 224   } else {
 225     ThreadsListHandle tlh;
 226     thread_is_alive = tlh.includes(biased_thread);
 227   }
 228   if (!thread_is_alive) {
 229     if (allow_rebias) {
 230       obj->set_mark(biased_prototype);
 231     } else {
 232       obj->set_mark(unbiased_prototype);
 233     }
 234     // Log at "info" level if not bulk, else "trace" level
 235     if (!is_bulk) {
 236       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 237                               PTR_FORMAT ")", p2i(biased_thread));
 238     } else {
 239       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 240                                PTR_FORMAT ")", p2i(biased_thread));
 241     }
 242     return BiasedLocking::BIAS_REVOKED;
 243   }
 244 
 245   // Log at "info" level if not bulk, else "trace" level
 246   if (!is_bulk) {
 247     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
 248                             PTR_FORMAT ")", p2i(biased_thread));
 249   } else {
 250     log_trace(biasedlocking)("  Revoked bias of object biased toward live thread ("
 251                                PTR_FORMAT ")", p2i(biased_thread));
 252   }
 253 
 254   // Thread owning bias is alive.
 255   // Check to see whether it currently owns the lock and, if so,
 256   // write down the needed displaced headers to the thread's stack.
 257   // Otherwise, restore the object's header either to the unlocked
 258   // or unbiased state.
 259   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
 260   BasicLock* highest_lock = NULL;
 261   for (int i = 0; i < cached_monitor_info->length(); i++) {
 262     MonitorInfo* mon_info = cached_monitor_info->at(i);
 263     if (oopDesc::equals(mon_info->owner(), obj)) {
 264       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 265                                p2i((void *) mon_info->owner()),
 266                                p2i((void *) obj));
 267       // Assume recursive case and fix up highest lock later
 268       markOop mark = markOopDesc::encode((BasicLock*) NULL);
 269       highest_lock = mon_info->lock();
 270       highest_lock->set_displaced_header(mark);
 271     } else {
 272       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 273                                p2i((void *) mon_info->owner()),
 274                                p2i((void *) obj));
 275     }
 276   }
 277   if (highest_lock != NULL) {
 278     // Fix up highest lock to contain displaced header and point
 279     // object at it
 280     highest_lock->set_displaced_header(unbiased_prototype);
 281     // Reset object header to point to displaced mark.
 282     // Must release storing the lock address for platforms without TSO
 283     // ordering (e.g. ppc).
 284     obj->release_set_mark(markOopDesc::encode(highest_lock));
 285     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 286     // Log at "info" level if not bulk, else "trace" level
 287     if (!is_bulk) {
 288       log_info(biasedlocking)("  Revoked bias of currently-locked object");
 289     } else {
 290       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
 291     }
 292   } else {
 293     // Log at "info" level if not bulk, else "trace" level
 294     if (!is_bulk) {
 295       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 296     } else {
 297       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
 298     }
 299     if (allow_rebias) {
 300       obj->set_mark(biased_prototype);
 301     } else {
 302       // Store the unlocked value into the object's header.
 303       obj->set_mark(unbiased_prototype);
 304     }
 305   }
 306 
 307   // If requested, return information on which thread held the bias
 308   if (biased_locker != NULL) {
 309     *biased_locker = biased_thread;
 310   }
 311 
 312   return BiasedLocking::BIAS_REVOKED;
 313 }
 314 
 315 
 316 enum HeuristicsResult {
 317   HR_NOT_BIASED    = 1,
 318   HR_SINGLE_REVOKE = 2,
 319   HR_BULK_REBIAS   = 3,
 320   HR_BULK_REVOKE   = 4
 321 };
 322 
 323 
 324 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) {
 325   markOop mark = o->mark();
 326   if (!mark->has_bias_pattern()) {
 327     return HR_NOT_BIASED;
 328   }
 329 
 330   // Heuristics to attempt to throttle the number of revocations.
 331   // Stages:
 332   // 1. Revoke the biases of all objects in the heap of this type,
 333   //    but allow rebiasing of those objects if unlocked.
 334   // 2. Revoke the biases of all objects in the heap of this type
 335   //    and don't allow rebiasing of these objects. Disable
 336   //    allocation of objects of that type with the bias bit set.
 337   Klass* k = o->klass();
 338   jlong cur_time = os::javaTimeMillis();
 339   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
 340   int revocation_count = k->biased_lock_revocation_count();
 341   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
 342       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
 343       (last_bulk_revocation_time != 0) &&
 344       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {


 357     revocation_count = 0;
 358   }
 359 
 360   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
 361   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
 362     revocation_count = k->atomic_incr_biased_lock_revocation_count();
 363   }
 364 
 365   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
 366     return HR_BULK_REVOKE;
 367   }
 368 
 369   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
 370     return HR_BULK_REBIAS;
 371   }
 372 
 373   return HR_SINGLE_REVOKE;
 374 }
 375 
 376 
 377 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
 378                                                                    bool bulk_rebias,
 379                                                                    bool attempt_rebias_of_object,
 380                                                                    JavaThread* requesting_thread) {
 381   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");

 382 
 383   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
 384                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
 385                           (bulk_rebias ? "rebias" : "revoke"),
 386                           p2i((void *) o),
 387                           (intptr_t) o->mark(),
 388                           o->klass()->external_name());
 389 
 390   jlong cur_time = os::javaTimeMillis();
 391   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
 392 
 393 
 394   Klass* k_o = o->klass();
 395   Klass* klass = k_o;
 396 
 397   {
 398     JavaThreadIteratorWithHandle jtiwh;
 399 
 400     if (bulk_rebias) {
 401       // Use the epoch in the klass of the object to implicitly revoke
 402       // all biases of objects of this data type and force them to be
 403       // reacquired. However, we also need to walk the stacks of all
 404       // threads and update the headers of lightweight locked objects
 405       // with biases to have the current epoch.
 406 
 407       // If the prototype header doesn't have the bias pattern, don't
 408       // try to update the epoch -- assume another VM operation came in
 409       // and reset the header to the unbiased state, which will
 410       // implicitly cause all existing biases to be revoked
 411       if (klass->prototype_header()->has_bias_pattern()) {
 412         int prev_epoch = klass->prototype_header()->bias_epoch();
 413         klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());


 415 
 416         // Now walk all threads' stacks and adjust epochs of any biased
 417         // and locked objects of this data type we encounter
 418         for (; JavaThread *thr = jtiwh.next(); ) {
 419           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 420           for (int i = 0; i < cached_monitor_info->length(); i++) {
 421             MonitorInfo* mon_info = cached_monitor_info->at(i);
 422             oop owner = mon_info->owner();
 423             markOop mark = owner->mark();
 424             if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
 425               // We might have encountered this object already in the case of recursive locking
 426               assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
 427               owner->set_mark(mark->set_bias_epoch(cur_epoch));
 428             }
 429           }
 430         }
 431       }
 432 
 433       // At this point we're done. All we have to do is potentially
 434       // adjust the header of the given object to revoke its bias.
 435       revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
 436     } else {
 437       if (log_is_enabled(Info, biasedlocking)) {
 438         ResourceMark rm;
 439         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
 440       }
 441 
 442       // Disable biased locking for this data type. Not only will this
 443       // cause future instances to not be biased, but existing biased
 444       // instances will notice that this implicitly caused their biases
 445       // to be revoked.
 446       klass->set_prototype_header(markOopDesc::prototype());
 447 
 448       // Now walk all threads' stacks and forcibly revoke the biases of
 449       // any locked and biased objects of this data type we encounter.
 450       for (; JavaThread *thr = jtiwh.next(); ) {
 451         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 452         for (int i = 0; i < cached_monitor_info->length(); i++) {
 453           MonitorInfo* mon_info = cached_monitor_info->at(i);
 454           oop owner = mon_info->owner();
 455           markOop mark = owner->mark();
 456           if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
 457             revoke_bias(owner, false, true, requesting_thread, NULL);
 458           }
 459         }
 460       }
 461 
 462       // Must force the bias of the passed object to be forcibly revoked
 463       // as well to ensure guarantees to callers
 464       revoke_bias(o, false, true, requesting_thread, NULL);
 465     }
 466   } // ThreadsListHandle is destroyed here.
 467 
 468   log_info(biasedlocking)("* Ending bulk revocation");
 469 
 470   BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
 471 
 472   if (attempt_rebias_of_object &&
 473       o->mark()->has_bias_pattern() &&
 474       klass->prototype_header()->has_bias_pattern()) {
 475     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
 476                                            klass->prototype_header()->bias_epoch());
 477     o->set_mark(new_mark);
 478     status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
 479     log_info(biasedlocking)("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
 480   }
 481 
 482   assert(!o->mark()->has_bias_pattern() ||
 483          (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
 484          "bug in bulk bias revocation");
 485 
 486   return status_code;
 487 }
 488 
 489 
 490 static void clean_up_cached_monitor_info() {



 491   // Walk the thread list clearing out the cached monitors
 492   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
 493     thr->set_cached_monitor_info(NULL);
 494   }

 495 }
 496 
 497 
 498 class VM_RevokeBias : public VM_Operation {
 499 protected:
 500   Handle* _obj;
 501   GrowableArray<Handle>* _objs;
 502   JavaThread* _requesting_thread;


 503   BiasedLocking::Condition _status_code;
 504   traceid _biased_locker_id;
 505   uint64_t _safepoint_id;
 506 
 507 public:
 508   VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)


 509     : _obj(obj)
 510     , _objs(NULL)
 511     , _requesting_thread(requesting_thread)
 512     , _status_code(BiasedLocking::NOT_BIASED)
 513     , _biased_locker_id(0)
 514     , _safepoint_id(0) {}
 515 
 516   VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
 517     : _obj(NULL)
 518     , _objs(objs)
 519     , _requesting_thread(requesting_thread)


 520     , _status_code(BiasedLocking::NOT_BIASED)
 521     , _biased_locker_id(0)
 522     , _safepoint_id(0) {}
 523 
 524   virtual VMOp_Type type() const { return VMOp_RevokeBias; }
 525 
 526   virtual bool doit_prologue() {
 527     // Verify that there is actual work to do since the callers just
 528     // give us locked object(s). If we don't find any biased objects
 529     // there is nothing to do and we avoid a safepoint.
 530     if (_obj != NULL) {
 531       markOop mark = (*_obj)()->mark();
 532       if (mark->has_bias_pattern()) {
 533         return true;
 534       }
 535     } else {
 536       for ( int i = 0 ; i < _objs->length(); i++ ) {
 537         markOop mark = (_objs->at(i))()->mark();
 538         if (mark->has_bias_pattern()) {
 539           return true;
 540         }
 541       }
 542     }
 543     return false;
 544   }
 545 
 546   virtual void doit() {
 547     if (_obj != NULL) {
 548       log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:");
 549       JavaThread* biased_locker = NULL;
 550       _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker);
 551       if (biased_locker != NULL) {
 552         _biased_locker_id = JFR_THREAD_ID(biased_locker);
 553       }
 554       _safepoint_id = SafepointSynchronize::safepoint_id();
 555       clean_up_cached_monitor_info();
 556       return;
 557     } else {
 558       log_info(biasedlocking)("Revoking bias with global safepoint:");
 559       BiasedLocking::revoke_at_safepoint(_objs);
 560     }



 561   }
 562 
 563   BiasedLocking::Condition status_code() const {
 564     return _status_code;
 565   }
 566 
 567   traceid biased_locker() const {
 568     return _biased_locker_id;
 569   }
 570 
 571   uint64_t safepoint_id() const {
 572     return _safepoint_id;
 573   }
 574 };
 575 
 576 
 577 class VM_BulkRevokeBias : public VM_RevokeBias {
 578 private:
 579   bool _bulk_rebias;
 580   bool _attempt_rebias_of_object;



 581 
 582 public:
 583   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
 584                     bool bulk_rebias,
 585                     bool attempt_rebias_of_object)
 586     : VM_RevokeBias(obj, requesting_thread)
 587     , _bulk_rebias(bulk_rebias)
 588     , _attempt_rebias_of_object(attempt_rebias_of_object) {}
 589 
 590   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
 591   virtual bool doit_prologue()   { return true; }
 592 
 593   virtual void doit() {
 594     _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
 595     _safepoint_id = SafepointSynchronize::safepoint_id();
 596     clean_up_cached_monitor_info();

 597   }
 598 
 599   bool is_bulk_rebias() const {
 600     return _bulk_rebias;













































 601   }
 602 };
 603 

 604 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
 605   assert(event != NULL, "invariant");
 606   assert(k != NULL, "invariant");
 607   assert(event->should_commit(), "invariant");
 608   event->set_lockClass(k);
 609   event->commit();
 610 }
 611 
 612 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, VM_RevokeBias* op) {
 613   assert(event != NULL, "invariant");
 614   assert(k != NULL, "invariant");
 615   assert(op != NULL, "invariant");
 616   assert(event->should_commit(), "invariant");
 617   event->set_lockClass(k);
 618   event->set_safepointId(op->safepoint_id());
 619   event->set_previousOwner(op->biased_locker());
 620   event->commit();
 621 }
 622 
 623 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
 624   assert(event != NULL, "invariant");
 625   assert(k != NULL, "invariant");
 626   assert(op != NULL, "invariant");
 627   assert(event->should_commit(), "invariant");
 628   event->set_revokedClass(k);
 629   event->set_disableBiasing(!op->is_bulk_rebias());
 630   event->set_safepointId(op->safepoint_id());
 631   event->commit();
 632 }
 633 




















































































































 634 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
 635   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 636 

 637   // We can revoke the biases of anonymously-biased objects
 638   // efficiently enough that we should not cause these revocations to
 639   // update the heuristics because doing so may cause unwanted bulk
 640   // revocations (which are expensive) to occur.
 641   markOop mark = obj->mark();
 642   if (mark->is_biased_anonymously() && !attempt_rebias) {
 643     // We are probably trying to revoke the bias of this object due to
 644     // an identity hash code computation. Try to revoke the bias
 645     // without a safepoint. This is possible if we can successfully
 646     // compare-and-exchange an unbiased header into the mark word of
 647     // the object, meaning that no other thread has raced to acquire
 648     // the bias of the object.
 649     markOop biased_value       = mark;
 650     markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
 651     markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 652     if (res_mark == biased_value) {
 653       return BIAS_REVOKED;
 654     }

 655   } else if (mark->has_bias_pattern()) {
 656     Klass* k = obj->klass();
 657     markOop prototype_header = k->prototype_header();
 658     if (!prototype_header->has_bias_pattern()) {
 659       // This object has a stale bias from before the bulk revocation
 660       // for this data type occurred. It's pointless to update the
 661       // heuristics at this point so simply update the header with a
 662       // CAS. If we fail this race, the object's bias has been revoked
 663       // by another thread so we simply return and let the caller deal
 664       // with it.
 665       markOop biased_value       = mark;
 666       markOop res_mark = obj->cas_set_mark(prototype_header, mark);
 667       assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
 668       return BIAS_REVOKED;
 669     } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
 670       // The epoch of this biasing has expired indicating that the
 671       // object is effectively unbiased. Depending on whether we need
 672       // to rebias or revoke the bias of this object we can do it
 673       // efficiently enough with a CAS that we shouldn't update the
 674       // heuristics. This is normally done in the assembly code but we
 675       // can reach this point due to various points in the runtime
 676       // needing to revoke biases.

 677       if (attempt_rebias) {
 678         assert(THREAD->is_Java_thread(), "");
 679         markOop biased_value       = mark;
 680         markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
 681         markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark);
 682         if (res_mark == biased_value) {
 683           return BIAS_REVOKED_AND_REBIASED;
 684         }
 685       } else {
 686         markOop biased_value       = mark;
 687         markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
 688         markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 689         if (res_mark == biased_value) {
 690           return BIAS_REVOKED;
 691         }
 692       }

 693     }
 694   }
 695 
 696   HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias);
 697   if (heuristics == HR_NOT_BIASED) {
 698     return NOT_BIASED;
 699   } else if (heuristics == HR_SINGLE_REVOKE) {
 700     Klass *k = obj->klass();
 701     markOop prototype_header = k->prototype_header();
 702     if (mark->biased_locker() == THREAD &&
 703         prototype_header->bias_epoch() == mark->bias_epoch()) {
 704       // A thread is trying to revoke the bias of an object biased
 705       // toward it, again likely due to an identity hash code
 706       // computation. We can again avoid a safepoint in this case
 707       // since we are only going to walk our own stack. There are no
 708       // races with revocations occurring in other threads because we
 709       // reach no safepoints in the revocation path.
 710       // Also check the epoch because even if threads match, another thread
 711       // can come in with a CAS to steal the bias of an object that has a
 712       // stale epoch.
 713       ResourceMark rm;
 714       log_info(biasedlocking)("Revoking bias by walking my own stack:");
 715       EventBiasedLockSelfRevocation event;
 716       BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL);
 717       ((JavaThread*) THREAD)->set_cached_monitor_info(NULL);
 718       assert(cond == BIAS_REVOKED, "why not?");

 719       if (event.should_commit()) {
 720         post_self_revocation_event(&event, k);
 721       }
 722       return cond;
 723     } else {
 724       EventBiasedLockRevocation event;
 725       VM_RevokeBias revoke(&obj, (JavaThread*) THREAD);
 726       VMThread::execute(&revoke);
 727       if (event.should_commit() && revoke.status_code() != NOT_BIASED) {
 728         post_revocation_event(&event, k, &revoke);
 729       }
 730       return revoke.status_code();
 731     }
 732   }
 733 
 734   assert((heuristics == HR_BULK_REVOKE) ||
 735          (heuristics == HR_BULK_REBIAS), "?");
 736   EventBiasedLockClassRevocation event;
 737   VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD,
 738                                 (heuristics == HR_BULK_REBIAS),
 739                                 attempt_rebias);
 740   VMThread::execute(&bulk_revoke);
 741   if (event.should_commit()) {
 742     post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
 743   }
 744   return bulk_revoke.status_code();


 745 }
 746 
 747 
 748 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
 749   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 750   if (objs->length() == 0) {
 751     return;








 752   }
 753   VM_RevokeBias revoke(objs, JavaThread::current());
 754   VMThread::execute(&revoke);
 755 }
 756 
 757 
 758 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
 759   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 760   oop obj = h_obj();
 761   HeuristicsResult heuristics = update_heuristics(obj, false);
 762   if (heuristics == HR_SINGLE_REVOKE) {
 763     revoke_bias(obj, false, false, NULL, NULL);




 764   } else if ((heuristics == HR_BULK_REBIAS) ||
 765              (heuristics == HR_BULK_REVOKE)) {
 766     bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
 767   }
 768   clean_up_cached_monitor_info();

 769 }
 770 
 771 
 772 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
 773   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 774   int len = objs->length();
 775   for (int i = 0; i < len; i++) {
 776     oop obj = (objs->at(i))();
 777     HeuristicsResult heuristics = update_heuristics(obj, false);
 778     if (heuristics == HR_SINGLE_REVOKE) {
 779       revoke_bias(obj, false, false, NULL, NULL);
 780     } else if ((heuristics == HR_BULK_REBIAS) ||
 781                (heuristics == HR_BULK_REVOKE)) {
 782       bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
 783     }
 784   }
 785   clean_up_cached_monitor_info();
 786 }
 787 
 788 
 789 void BiasedLocking::preserve_marks() {
 790   if (!UseBiasedLocking)
 791     return;
 792 
 793   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 794 
 795   assert(_preserved_oop_stack  == NULL, "double initialization");
 796   assert(_preserved_mark_stack == NULL, "double initialization");
 797 
 798   // In order to reduce the number of mark words preserved during GC
 799   // due to the presence of biased locking, we reinitialize most mark


 845 
 846   int len = _preserved_oop_stack->length();
 847   for (int i = 0; i < len; i++) {
 848     Handle owner = _preserved_oop_stack->at(i);
 849     markOop mark = _preserved_mark_stack->at(i);
 850     owner->set_mark(mark);
 851   }
 852 
 853   delete _preserved_oop_stack;
 854   _preserved_oop_stack = NULL;
 855   delete _preserved_mark_stack;
 856   _preserved_mark_stack = NULL;
 857 }
 858 
 859 
 860 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
 861 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
 862 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
 863 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
 864 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }

 865 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
 866 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
 867 
 868 
 869 // BiasedLockingCounters
 870 
 871 int BiasedLockingCounters::slow_path_entry_count() const {
 872   if (_slow_path_entry_count != 0) {
 873     return _slow_path_entry_count;
 874   }
 875   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
 876             _rebiased_lock_entry_count + _revoked_lock_entry_count +
 877             _fast_path_entry_count;
 878 
 879   return _total_entry_count - sum;
 880 }
 881 
 882 void BiasedLockingCounters::print_on(outputStream* st) const {
 883   tty->print_cr("# total entries: %d", _total_entry_count);
 884   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
 885   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
 886   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
 887   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);

 888   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
 889   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
 890 }
 891 
 892 void BiasedLockingCounters::print() const { print_on(tty); }


 102   // bias revocation.
 103   if (UseBiasedLocking) {
 104     if (BiasedLockingStartupDelay > 0) {
 105       EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay);
 106       task->enroll();
 107     } else {
 108       enable_biased_locking();
 109     }
 110   }
 111 }
 112 
 113 
 114 bool BiasedLocking::enabled() {
 115   assert(UseBiasedLocking, "precondition");
 116   // We check "BiasedLockingStartupDelay == 0" here to cover the
 117   // possibility of calls to BiasedLocking::enabled() before
 118   // BiasedLocking::init().
 119   return _biased_locking_enabled || BiasedLockingStartupDelay == 0;
 120 }
 121 
 122 
 123 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order
 124 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) {
 125   GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info();
 126   if (info != NULL) {
 127     return info;
 128   }
 129 
 130   info = new GrowableArray<MonitorInfo*>();
 131 
 132   // It's possible for the thread to not have any Java frames on it,
 133   // i.e., if it's the main thread and it's already returned from main()
 134   if (thread->has_last_Java_frame()) {
 135     RegisterMap rm(thread);
 136     for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
 137       GrowableArray<MonitorInfo*> *monitors = vf->monitors();
 138       if (monitors != NULL) {
 139         int len = monitors->length();
 140         // Walk monitors youngest to oldest
 141         for (int i = len - 1; i >= 0; i--) {
 142           MonitorInfo* mon_info = monitors->at(i);
 143           if (mon_info->eliminated()) continue;
 144           oop owner = mon_info->owner();
 145           if (owner != NULL) {
 146             info->append(mon_info);
 147           }
 148         }
 149       }
 150     }
 151   }
 152 
 153   thread->set_cached_monitor_info(info);
 154   return info;
 155 }
 156 
 157 
 158 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
 159 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
 160 BiasedLocking::Condition BiasedLocking::single_revoke_at_safepoint(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
 161   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 162   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 163 
 164   markOop mark = obj->mark();
 165   if (!mark->has_bias_pattern()) {
 166     if (log_is_enabled(Info, biasedlocking)) {
 167       ResourceMark rm;
 168       log_info(biasedlocking)("  (Skipping revocation of object " INTPTR_FORMAT
 169                               ", mark " INTPTR_FORMAT ", type %s"
 170                               ", requesting thread " INTPTR_FORMAT
 171                               " because it's no longer biased)",
 172                               p2i((void *)obj), (intptr_t) mark,
 173                               obj->klass()->external_name(),
 174                               (intptr_t) requesting_thread);
 175     }
 176     return NOT_BIASED;
 177   }
 178 
 179   uint age = mark->age();
 180   markOop   biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
 181   markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
 182 
 183   // Log at "info" level if not bulk, else "trace" level
 184   if (!is_bulk) {
 185     ResourceMark rm;
 186     log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
 187                             INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 188                             ", allow rebias %d, requesting thread " INTPTR_FORMAT,
 189                             p2i((void *)obj),
 190                             (intptr_t) mark,
 191                             obj->klass()->external_name(),
 192                             (intptr_t) obj->klass()->prototype_header(),
 193                             (allow_rebias ? 1 : 0),
 194                             (intptr_t) requesting_thread);
 195   } else {
 196     ResourceMark rm;


 202                              obj->klass()->external_name(),
 203                              (intptr_t) obj->klass()->prototype_header(),
 204                              (allow_rebias ? 1 : 0),
 205                              (intptr_t) requesting_thread);
 206   }
 207 
 208   JavaThread* biased_thread = mark->biased_locker();
 209   if (biased_thread == NULL) {
 210     // Object is anonymously biased. We can get here if, for
 211     // example, we revoke the bias due to an identity hash code
 212     // being computed for an object.
 213     if (!allow_rebias) {
 214       obj->set_mark(unbiased_prototype);
 215     }
 216     // Log at "info" level if not bulk, else "trace" level
 217     if (!is_bulk) {
 218       log_info(biasedlocking)("  Revoked bias of anonymously-biased object");
 219     } else {
 220       log_trace(biasedlocking)("  Revoked bias of anonymously-biased object");
 221     }
 222     return BIAS_REVOKED;
 223   }
 224 
 225   // Handle case where the thread toward which the object was biased has exited
 226   bool thread_is_alive = false;
 227   if (requesting_thread == biased_thread) {
 228     thread_is_alive = true;
 229   } else {
 230     ThreadsListHandle tlh;
 231     thread_is_alive = tlh.includes(biased_thread);
 232   }
 233   if (!thread_is_alive) {
 234     if (allow_rebias) {
 235       obj->set_mark(biased_prototype);
 236     } else {
 237       obj->set_mark(unbiased_prototype);
 238     }
 239     // Log at "info" level if not bulk, else "trace" level
 240     if (!is_bulk) {
 241       log_info(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 242                               PTR_FORMAT ")", p2i(biased_thread));
 243     } else {
 244       log_trace(biasedlocking)("  Revoked bias of object biased toward dead thread ("
 245                                PTR_FORMAT ")", p2i(biased_thread));
 246     }
 247     return BIAS_REVOKED;
 248   }
 249 
 250   // Log at "info" level if not bulk, else "trace" level
 251   if (!is_bulk) {
 252     log_info(biasedlocking)("  Revoked bias of object biased toward live thread ("
 253                             PTR_FORMAT ")", p2i(biased_thread));
 254   } else {
 255     log_trace(biasedlocking)("  Revoked bias of object biased toward live thread ("
 256                                PTR_FORMAT ")", p2i(biased_thread));
 257   }
 258 
 259   // Thread owning bias is alive.
 260   // Check to see whether it currently owns the lock and, if so,
 261   // write down the needed displaced headers to the thread's stack.
 262   // Otherwise, restore the object's header either to the unlocked
 263   // or unbiased state.
 264   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
 265   BasicLock* highest_lock = NULL;
 266   for (int i = 0; i < cached_monitor_info->length(); i++) {
 267     MonitorInfo* mon_info = cached_monitor_info->at(i);
 268     if (oopDesc::equals(mon_info->owner(), obj)) {
 269       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 270                                p2i((void *) mon_info->owner()),
 271                                p2i((void *) obj));
 272       // Assume recursive case and fix up highest lock below
 273       markOop mark = markOopDesc::encode((BasicLock*) NULL);
 274       highest_lock = mon_info->lock();
 275       highest_lock->set_displaced_header(mark);
 276     } else {
 277       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 278                                p2i((void *) mon_info->owner()),
 279                                p2i((void *) obj));
 280     }
 281   }
 282   if (highest_lock != NULL) {
 283     // Fix up highest lock to contain displaced header and point
 284     // object at it
 285     highest_lock->set_displaced_header(unbiased_prototype);
 286     // Reset object header to point to displaced mark.
 287     // Must release store the lock address for platforms without TSO
 288     // ordering (e.g. ppc).
 289     obj->release_set_mark(markOopDesc::encode(highest_lock));
 290     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 291     // Log at "info" level if not bulk, else "trace" level
 292     if (!is_bulk) {
 293       log_info(biasedlocking)("  Revoked bias of currently-locked object");
 294     } else {
 295       log_trace(biasedlocking)("  Revoked bias of currently-locked object");
 296     }
 297   } else {
 298     // Log at "info" level if not bulk, else "trace" level
 299     if (!is_bulk) {
 300       log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 301     } else {
 302       log_trace(biasedlocking)("  Revoked bias of currently-unlocked object");
 303     }
 304     if (allow_rebias) {
 305       obj->set_mark(biased_prototype);
 306     } else {
 307       // Store the unlocked value into the object's header.
 308       obj->set_mark(unbiased_prototype);
 309     }
 310   }
 311 
 312   // If requested, return information on which thread held the bias
 313   if (biased_locker != NULL) {
 314     *biased_locker = biased_thread;
 315   }
 316 
 317   return BIAS_REVOKED;
 318 }
 319 
 320 
 321 enum HeuristicsResult {
 322   HR_NOT_BIASED    = 1,
 323   HR_SINGLE_REVOKE = 2,
 324   HR_BULK_REBIAS   = 3,
 325   HR_BULK_REVOKE   = 4
 326 };
 327 
 328 
 329 static HeuristicsResult update_heuristics(oop o) {
 330   markOop mark = o->mark();
 331   if (!mark->has_bias_pattern()) {
 332     return HR_NOT_BIASED;
 333   }
 334 
 335   // Heuristics to attempt to throttle the number of revocations.
 336   // Stages:
 337   // 1. Revoke the biases of all objects in the heap of this type,
 338   //    but allow rebiasing of those objects if unlocked.
 339   // 2. Revoke the biases of all objects in the heap of this type
 340   //    and don't allow rebiasing of these objects. Disable
 341   //    allocation of objects of that type with the bias bit set.
 342   Klass* k = o->klass();
 343   jlong cur_time = os::javaTimeMillis();
 344   jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time();
 345   int revocation_count = k->biased_lock_revocation_count();
 346   if ((revocation_count >= BiasedLockingBulkRebiasThreshold) &&
 347       (revocation_count <  BiasedLockingBulkRevokeThreshold) &&
 348       (last_bulk_revocation_time != 0) &&
 349       (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) {


 362     revocation_count = 0;
 363   }
 364 
 365   // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
 366   if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
 367     revocation_count = k->atomic_incr_biased_lock_revocation_count();
 368   }
 369 
 370   if (revocation_count == BiasedLockingBulkRevokeThreshold) {
 371     return HR_BULK_REVOKE;
 372   }
 373 
 374   if (revocation_count == BiasedLockingBulkRebiasThreshold) {
 375     return HR_BULK_REBIAS;
 376   }
 377 
 378   return HR_SINGLE_REVOKE;
 379 }
 380 
 381 
 382 BiasedLocking::Condition BiasedLocking::bulk_revoke_or_rebias_at_safepoint(oop o,
 383                                                                    bool bulk_rebias,
 384                                                                    bool attempt_rebias_of_object,
 385                                                                    JavaThread* requesting_thread) {
 386   assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
 387   assert(Thread::current()->is_VM_thread(), "must be VMThread");
 388 
 389   log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
 390                           INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
 391                           (bulk_rebias ? "rebias" : "revoke"),
 392                           p2i((void *) o),
 393                           (intptr_t) o->mark(),
 394                           o->klass()->external_name());
 395 
 396   jlong cur_time = os::javaTimeMillis();
 397   o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
 398 

 399   Klass* k_o = o->klass();
 400   Klass* klass = k_o;
 401 
 402   {
 403     JavaThreadIteratorWithHandle jtiwh;
 404 
 405     if (bulk_rebias) {
 406       // Use the epoch in the klass of the object to implicitly revoke
 407       // all biases of objects of this data type and force them to be
 408       // reacquired. However, we also need to walk the stacks of all
 409       // threads and update the headers of lightweight locked objects
 410       // with biases to have the current epoch.
 411 
 412       // If the prototype header doesn't have the bias pattern, don't
 413       // try to update the epoch -- assume another VM operation came in
 414       // and reset the header to the unbiased state, which will
 415       // implicitly cause all existing biases to be revoked
 416       if (klass->prototype_header()->has_bias_pattern()) {
 417         int prev_epoch = klass->prototype_header()->bias_epoch();
 418         klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());


 420 
 421         // Now walk all threads' stacks and adjust epochs of any biased
 422         // and locked objects of this data type we encounter
 423         for (; JavaThread *thr = jtiwh.next(); ) {
 424           GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 425           for (int i = 0; i < cached_monitor_info->length(); i++) {
 426             MonitorInfo* mon_info = cached_monitor_info->at(i);
 427             oop owner = mon_info->owner();
 428             markOop mark = owner->mark();
 429             if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
 430               // We might have encountered this object already in the case of recursive locking
 431               assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
 432               owner->set_mark(mark->set_bias_epoch(cur_epoch));
 433             }
 434           }
 435         }
 436       }
 437 
 438       // At this point we're done. All we have to do is potentially
 439       // adjust the header of the given object to revoke its bias.
 440       single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
 441     } else {
 442       if (log_is_enabled(Info, biasedlocking)) {
 443         ResourceMark rm;
 444         log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
 445       }
 446 
 447       // Disable biased locking for this data type. Not only will this
 448       // cause future instances to not be biased, but existing biased
 449       // instances will notice that this implicitly caused their biases
 450       // to be revoked.
 451       klass->set_prototype_header(markOopDesc::prototype());
 452 
 453       // Now walk all threads' stacks and forcibly revoke the biases of
 454       // any locked and biased objects of this data type we encounter.
 455       for (; JavaThread *thr = jtiwh.next(); ) {
 456         GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
 457         for (int i = 0; i < cached_monitor_info->length(); i++) {
 458           MonitorInfo* mon_info = cached_monitor_info->at(i);
 459           oop owner = mon_info->owner();
 460           markOop mark = owner->mark();
 461           if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
 462             single_revoke_at_safepoint(owner, false, true, requesting_thread, NULL);
 463           }
 464         }
 465       }
 466 
 467       // Must force the bias of the passed object to be forcibly revoked
 468       // as well to ensure guarantees to callers
 469       single_revoke_at_safepoint(o, false, true, requesting_thread, NULL);
 470     }
 471   } // ThreadsListHandle is destroyed here.
 472 
 473   log_info(biasedlocking)("* Ending bulk revocation");
 474 
 475   BiasedLocking::Condition status_code = BIAS_REVOKED;
 476 
 477   if (attempt_rebias_of_object &&
 478       o->mark()->has_bias_pattern() &&
 479       klass->prototype_header()->has_bias_pattern()) {
 480     markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
 481                                            klass->prototype_header()->bias_epoch());
 482     o->set_mark(new_mark);
 483     status_code = BIAS_REVOKED_AND_REBIASED;
 484     log_info(biasedlocking)("  Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
 485   }
 486 
 487   assert(!o->mark()->has_bias_pattern() ||
 488          (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
 489          "bug in bulk bias revocation");
 490 
 491   return status_code;
 492 }
 493 
 494 
 495 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
 496   if (thread != NULL) {
 497     thread->set_cached_monitor_info(NULL);
 498   } else {
 499     // Walk the thread list clearing out the cached monitors
 500     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
 501       thr->set_cached_monitor_info(NULL);
 502     }
 503   }
 504 }
 505 
 506 
 507 class VM_BulkRevokeBias : public VM_Operation {
 508 private:
 509   Handle* _obj;

 510   JavaThread* _requesting_thread;
 511   bool _bulk_rebias;
 512   bool _attempt_rebias_of_object;
 513   BiasedLocking::Condition _status_code;

 514   uint64_t _safepoint_id;
 515 
 516 public:
 517   VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
 518                     bool bulk_rebias,
 519                     bool attempt_rebias_of_object)
 520     : _obj(obj)









 521     , _requesting_thread(requesting_thread)
 522     , _bulk_rebias(bulk_rebias)
 523     , _attempt_rebias_of_object(attempt_rebias_of_object)
 524     , _status_code(BiasedLocking::NOT_BIASED)

 525     , _safepoint_id(0) {}
 526 
 527   virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }




















 528 
 529   virtual void doit() {
 530     _status_code = BiasedLocking::bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);






 531     _safepoint_id = SafepointSynchronize::safepoint_id();
 532     clean_up_cached_monitor_info();




 533   }
 534 
 535   bool is_bulk_rebias() const {
 536     return _bulk_rebias;
 537   }
 538 
 539   BiasedLocking::Condition status_code() const {
 540     return _status_code;
 541   }
 542 




 543   uint64_t safepoint_id() const {
 544     return _safepoint_id;
 545   }
 546 };
 547 
 548 
 549 class RevokeOneBias : public ThreadClosure {
 550 protected:
 551   Handle _obj;
 552   JavaThread* _requesting_thread;
 553   JavaThread* _biased_locker;
 554   BiasedLocking::Condition _status_code;
 555   traceid _biased_locker_id;
 556 
 557 public:
 558   RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
 559     : _obj(obj)
 560     , _requesting_thread(requesting_thread)
 561     , _biased_locker(biased_locker)
 562     , _status_code(BiasedLocking::NOT_BIASED)
 563     , _biased_locker_id(0) {}
 564 
 565   void do_thread(Thread* target) {
 566     assert(target == _biased_locker, "Wrong thread");
 567 
 568     oop o = _obj();
 569     markOop mark = o->mark();
 570 
 571     if (!mark->has_bias_pattern()) {
 572       return;
 573     }
 574 
 575     markOop prototype = o->klass()->prototype_header();
 576     if (!prototype->has_bias_pattern()) {
 577       // This object has a stale bias from before the handshake
 578       // was requested. If we fail this race, the object's bias
 579       // has been revoked by another thread so we simply return.
 580       markOop biased_value = mark;
 581       mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
 582       assert(!o->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
 583       if (biased_value == mark) {
 584         _status_code = BiasedLocking::BIAS_REVOKED;
 585       }
 586       return;
 587     }
 588 
 589     if (_biased_locker == mark->biased_locker()) {
 590       if (mark->bias_epoch() == prototype->bias_epoch()) {
 591         // Epoch is still valid. This means biaser could be currently
 592         // synchronized on this object. We must walk its stack looking
 593         // for monitor records associated with this object and change
 594         // them to be stack locks if any are found.
 595         ResourceMark rm;
 596         BiasedLocking::walk_stack_and_revoke(o, _biased_locker);
 597         _biased_locker->set_cached_monitor_info(NULL);
 598         assert(!o->mark()->has_bias_pattern(), "invariant");
 599         _biased_locker_id = JFR_THREAD_ID(_biased_locker);
 600         _status_code = BiasedLocking::BIAS_REVOKED;
 601         return;
 602       } else {
 603         markOop biased_value = mark;
 604         mark = o->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
 605         if (mark == biased_value || !mark->has_bias_pattern()) {
 606           assert(!o->mark()->has_bias_pattern(), "should be revoked");
 607           _status_code = (biased_value == mark) ? BiasedLocking::BIAS_REVOKED : BiasedLocking::NOT_BIASED;
 608           return;
 609         }
 610       }
 611     }
 612 
 613     _status_code = BiasedLocking::NOT_REVOKED;
 614   }
 615 
 616   BiasedLocking::Condition status_code() const {
 617     return _status_code;
 618   }
 619 
 620   traceid biased_locker() const {
 621     return _biased_locker_id;
 622   }
 623 };
 624 
 625 
 626 static void post_self_revocation_event(EventBiasedLockSelfRevocation* event, Klass* k) {
 627   assert(event != NULL, "invariant");
 628   assert(k != NULL, "invariant");
 629   assert(event->should_commit(), "invariant");
 630   event->set_lockClass(k);
 631   event->commit();
 632 }
 633 
 634 static void post_revocation_event(EventBiasedLockRevocation* event, Klass* k, RevokeOneBias* op) {
 635   assert(event != NULL, "invariant");
 636   assert(k != NULL, "invariant");
 637   assert(op != NULL, "invariant");
 638   assert(event->should_commit(), "invariant");
 639   event->set_lockClass(k);
 640   event->set_safepointId(0);
 641   event->set_previousOwner(op->biased_locker());
 642   event->commit();
 643 }
 644 
 645 static void post_class_revocation_event(EventBiasedLockClassRevocation* event, Klass* k, VM_BulkRevokeBias* op) {
 646   assert(event != NULL, "invariant");
 647   assert(k != NULL, "invariant");
 648   assert(op != NULL, "invariant");
 649   assert(event->should_commit(), "invariant");
 650   event->set_revokedClass(k);
 651   event->set_disableBiasing(!op->is_bulk_rebias());
 652   event->set_safepointId(op->safepoint_id());
 653   event->commit();
 654 }
 655 
 656 
 657 BiasedLocking::Condition BiasedLocking::single_revoke_with_handshake(Handle obj, JavaThread *requester, JavaThread *biaser) {
 658 
 659   EventBiasedLockRevocation event;
 660   if (PrintBiasedLockingStatistics) {
 661     Atomic::inc(handshakes_count_addr());
 662   }
 663   log_info(biasedlocking, handshake)("JavaThread " INTPTR_FORMAT " handshaking JavaThread "
 664                                      INTPTR_FORMAT " to revoke object " INTPTR_FORMAT, p2i(requester),
 665                                      p2i(biaser), p2i(obj()));
 666 
 667   RevokeOneBias revoke(obj, requester, biaser);
 668   bool executed = Handshake::execute(&revoke, biaser);
 669   if (revoke.status_code() == NOT_REVOKED) {
 670     return NOT_REVOKED;
 671   }
 672   if (executed) {
 673     log_info(biasedlocking, handshake)("Handshake revocation for object " INTPTR_FORMAT " succeeded. Bias was %srevoked",
 674                                        p2i(obj()), (revoke.status_code() == BIAS_REVOKED ? "" : "already "));
 675     if (event.should_commit() && revoke.status_code() == BIAS_REVOKED) {
 676       post_revocation_event(&event, obj->klass(), &revoke);
 677     }
 678     assert(!obj->mark()->has_bias_pattern(), "invariant");
 679     return revoke.status_code();
 680   } else {
 681     // Thread was not alive.
 682     // Grab Threads_lock before manually trying to revoke bias. This avoids race with a newly
 683     // created JavaThread (that happens to get the same memory address as biaser) synchronizing
 684     // on this object.
 685     {
 686       MutexLocker ml(Threads_lock);
 687       markOop mark = obj->mark();
 688       // Check if somebody else was able to revoke it before biased thread exited.
 689       if (!mark->has_bias_pattern()) {
 690         return NOT_BIASED;
 691       }
 692       ThreadsListHandle tlh;
 693       markOop prototype = obj->klass()->prototype_header();
 694       if (!prototype->has_bias_pattern() || (!tlh.includes(biaser) && biaser == mark->biased_locker() &&
 695                                               prototype->bias_epoch() == mark->bias_epoch())) {
 696         obj->cas_set_mark(markOopDesc::prototype()->set_age(mark->age()), mark);
 697         if (event.should_commit()) {
 698           post_revocation_event(&event, obj->klass(), &revoke);
 699         }
 700         assert(!obj->mark()->has_bias_pattern(), "bias should be revoked by now");
 701         return BIAS_REVOKED;
 702       }
 703     }
 704   }
 705 
 706   return NOT_REVOKED;
 707 }
 708 
 709 
 710 // Caller should have instantiated a ResourceMark object before calling this method
 711 void BiasedLocking::walk_stack_and_revoke(oop obj, JavaThread* biased_locker) {
 712   assert(!SafepointSynchronize::is_at_safepoint() || !ThreadLocalHandshakes,
 713          "if ThreadLocalHandshakes is enabled this should always be executed outside safepoints");
 714   assert(Thread::current() == biased_locker || Thread::current()->is_VM_thread(), "wrong thread");
 715 
 716   markOop mark = obj->mark();
 717   assert(mark->biased_locker() == biased_locker &&
 718          obj->klass()->prototype_header()->bias_epoch() == mark->bias_epoch(), "invariant");
 719 
 720   log_trace(biasedlocking)("%s(" INTPTR_FORMAT ") revoking object " INTPTR_FORMAT ", mark "
 721                            INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
 722                            ", biaser " INTPTR_FORMAT " %s",
 723                            Thread::current()->is_VM_thread() ? "VMThread" : "JavaThread",
 724                            p2i(Thread::current()),
 725                            p2i(obj),
 726                            p2i(mark),
 727                            obj->klass()->external_name(),
 728                            p2i(obj->klass()->prototype_header()),
 729                            p2i(biased_locker),
 730                            Thread::current()->is_VM_thread() ? "" : "(walking own stack)");
 731 
 732   markOop unbiased_prototype = markOopDesc::prototype()->set_age(obj->mark()->age());
 733 
 734   GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_locker);
 735   BasicLock* highest_lock = NULL;
 736   for (int i = 0; i < cached_monitor_info->length(); i++) {
 737     MonitorInfo* mon_info = cached_monitor_info->at(i);
 738     if (oopDesc::equals(mon_info->owner(), obj)) {
 739       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")",
 740                                p2i(mon_info->owner()),
 741                                p2i(obj));
 742       // Assume recursive case and fix up highest lock below
 743       markOop mark = markOopDesc::encode((BasicLock*) NULL);
 744       highest_lock = mon_info->lock();
 745       highest_lock->set_displaced_header(mark);
 746     } else {
 747       log_trace(biasedlocking)("   mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")",
 748                                p2i(mon_info->owner()),
 749                                p2i(obj));
 750     }
 751   }
 752   if (highest_lock != NULL) {
 753     // Fix up highest lock to contain displaced header and point
 754     // object at it
 755     highest_lock->set_displaced_header(unbiased_prototype);
 756     // Reset object header to point to displaced mark.
 757     // Must release store the lock address for platforms without TSO
 758     // ordering (e.g. ppc).
 759     obj->release_set_mark(markOopDesc::encode(highest_lock));
 760     assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit");
 761     log_info(biasedlocking)("  Revoked bias of currently-locked object");
 762   } else {
 763     log_info(biasedlocking)("  Revoked bias of currently-unlocked object");
 764     // Store the unlocked value into the object's header.
 765     obj->set_mark(unbiased_prototype);
 766   }
 767 
 768   assert(!obj->mark()->has_bias_pattern(), "must not be biased");
 769 }
 770 
 771 
 772 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
 773   assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
 774 
 775   while (true) {
 776     // We can revoke the biases of anonymously-biased objects
 777     // efficiently enough that we should not cause these revocations to
 778     // update the heuristics because doing so may cause unwanted bulk
 779     // revocations (which are expensive) to occur.
 780     markOop mark = obj->mark();
 781     if (mark->is_biased_anonymously() && !attempt_rebias) {
 782       // We are probably trying to revoke the bias of this object due to
 783       // an identity hash code computation. Try to revoke the bias
 784       // without a safepoint. This is possible if we can successfully
 785       // compare-and-exchange an unbiased header into the mark word of
 786       // the object, meaning that no other thread has raced to acquire
 787       // the bias of the object.
 788       markOop biased_value       = mark;
 789       markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
 790       markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 791       if (res_mark == biased_value) {
 792         return BIAS_REVOKED;
 793       }
 794       mark = res_mark;  // Refresh mark with the latest value.
 795     } else if (mark->has_bias_pattern()) {
 796       Klass* k = obj->klass();
 797       markOop prototype_header = k->prototype_header();
 798       if (!prototype_header->has_bias_pattern()) {
 799         // This object has a stale bias from before the bulk revocation
 800         // for this data type occurred. It's pointless to update the
 801         // heuristics at this point so simply update the header with a
 802         // CAS. If we fail this race, the object's bias has been revoked
 803         // by another thread so we simply return and let the caller deal
 804         // with it.
 805         obj->cas_set_mark(prototype_header->set_age(mark->age()), mark);

 806         assert(!obj->mark()->has_bias_pattern(), "even if we raced, should still be revoked");
 807         return BIAS_REVOKED;
 808       } else if (prototype_header->bias_epoch() != mark->bias_epoch()) {
 809         // The epoch of this biasing has expired indicating that the
 810         // object is effectively unbiased. Depending on whether we need
 811         // to rebias or revoke the bias of this object we can do it
 812         // efficiently enough with a CAS that we shouldn't update the
 813         // heuristics. This is normally done in the assembly code but we
 814         // can reach this point due to various points in the runtime
 815         // needing to revoke biases.
 816         markOop res_mark;
 817         if (attempt_rebias) {
 818           assert(THREAD->is_Java_thread(), "");
 819           markOop biased_value       = mark;
 820           markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch());
 821           res_mark = obj->cas_set_mark(rebiased_prototype, mark);
 822           if (res_mark == biased_value) {
 823             return BIAS_REVOKED_AND_REBIASED;
 824           }
 825         } else {
 826           markOop biased_value       = mark;
 827           markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
 828           res_mark = obj->cas_set_mark(unbiased_prototype, mark);
 829           if (res_mark == biased_value) {
 830             return BIAS_REVOKED;
 831           }
 832         }
 833         mark = res_mark;  // Refresh mark with the latest value.
 834       }
 835     }
 836 
 837     HeuristicsResult heuristics = update_heuristics(obj());
 838     if (heuristics == HR_NOT_BIASED) {
 839       return NOT_BIASED;
 840     } else if (heuristics == HR_SINGLE_REVOKE) {
 841       JavaThread *blt = mark->biased_locker();
 842       assert(blt != NULL, "invariant");
 843       if (blt == THREAD) {

 844         // A thread is trying to revoke the bias of an object biased
 845         // toward it, again likely due to an identity hash code
 846         // computation. We can again avoid a safepoint/handshake in this case
 847         // since we are only going to walk our own stack. There are no
 848         // races with revocations occurring in other threads because we
 849         // reach no safepoints in the revocation path.





 850         EventBiasedLockSelfRevocation event;
 851         ResourceMark rm;
 852         walk_stack_and_revoke(obj(), blt);
 853         blt->set_cached_monitor_info(NULL);
 854         assert(!obj->mark()->has_bias_pattern(), "invariant");
 855         if (event.should_commit()) {
 856           post_self_revocation_event(&event, obj->klass());
 857         }
 858         return BIAS_REVOKED;
 859       } else {
 860         BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
 861         if (cond != NOT_REVOKED) {
 862           return cond;




 863         }
 864       }
 865     } else {
 866       assert((heuristics == HR_BULK_REVOKE) ||
 867          (heuristics == HR_BULK_REBIAS), "?");
 868       EventBiasedLockClassRevocation event;
 869       VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
 870                                     (heuristics == HR_BULK_REBIAS),
 871                                     attempt_rebias);
 872       VMThread::execute(&bulk_revoke);
 873       if (event.should_commit()) {
 874         post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
 875       }
 876       return bulk_revoke.status_code();
 877     }
 878   }
 879 }
 880 
 881 // All objects in objs should be locked by biaser
 882 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
 883   bool clean_my_cache = false;
 884   for (int i = 0; i < objs->length(); i++) {
 885     oop obj = (objs->at(i))();
 886     markOop mark = obj->mark();
 887     if (mark->has_bias_pattern()) {
 888       walk_stack_and_revoke(obj, biaser);
 889       clean_my_cache = true;
 890     }
 891   }
 892   if (clean_my_cache) {
 893     clean_up_cached_monitor_info(biaser);
 894   }


 895 }
 896 
 897 
 898 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
 899   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 900   oop obj = h_obj();
 901   HeuristicsResult heuristics = update_heuristics(obj);
 902   if (heuristics == HR_SINGLE_REVOKE) {
 903     JavaThread* biased_locker = NULL;
 904     single_revoke_at_safepoint(obj, false, false, NULL, &biased_locker);
 905     if (biased_locker) {
 906       clean_up_cached_monitor_info(biased_locker);
 907     }
 908   } else if ((heuristics == HR_BULK_REBIAS) ||
 909              (heuristics == HR_BULK_REVOKE)) {
 910     bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);

 911     clean_up_cached_monitor_info();
 912   }
 913 }
 914 
 915 
 916 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
 917   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 918   int len = objs->length();
 919   for (int i = 0; i < len; i++) {
 920     oop obj = (objs->at(i))();
 921     HeuristicsResult heuristics = update_heuristics(obj);
 922     if (heuristics == HR_SINGLE_REVOKE) {
 923       single_revoke_at_safepoint(obj, false, false, NULL, NULL);
 924     } else if ((heuristics == HR_BULK_REBIAS) ||
 925                (heuristics == HR_BULK_REVOKE)) {
 926       bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
 927     }
 928   }
 929   clean_up_cached_monitor_info();
 930 }
 931 
 932 
 933 void BiasedLocking::preserve_marks() {
 934   if (!UseBiasedLocking)
 935     return;
 936 
 937   assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
 938 
 939   assert(_preserved_oop_stack  == NULL, "double initialization");
 940   assert(_preserved_mark_stack == NULL, "double initialization");
 941 
 942   // In order to reduce the number of mark words preserved during GC
 943   // due to the presence of biased locking, we reinitialize most mark


 989 
 990   int len = _preserved_oop_stack->length();
 991   for (int i = 0; i < len; i++) {
 992     Handle owner = _preserved_oop_stack->at(i);
 993     markOop mark = _preserved_mark_stack->at(i);
 994     owner->set_mark(mark);
 995   }
 996 
 997   delete _preserved_oop_stack;
 998   _preserved_oop_stack = NULL;
 999   delete _preserved_mark_stack;
1000   _preserved_mark_stack = NULL;
1001 }
1002 
1003 
1004 int* BiasedLocking::total_entry_count_addr()                   { return _counters.total_entry_count_addr(); }
1005 int* BiasedLocking::biased_lock_entry_count_addr()             { return _counters.biased_lock_entry_count_addr(); }
1006 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); }
1007 int* BiasedLocking::rebiased_lock_entry_count_addr()           { return _counters.rebiased_lock_entry_count_addr(); }
1008 int* BiasedLocking::revoked_lock_entry_count_addr()            { return _counters.revoked_lock_entry_count_addr(); }
1009 int* BiasedLocking::handshakes_count_addr()                    { return _counters.handshakes_count_addr(); }
1010 int* BiasedLocking::fast_path_entry_count_addr()               { return _counters.fast_path_entry_count_addr(); }
1011 int* BiasedLocking::slow_path_entry_count_addr()               { return _counters.slow_path_entry_count_addr(); }
1012 
1013 
1014 // BiasedLockingCounters
1015 
1016 int BiasedLockingCounters::slow_path_entry_count() const {
1017   if (_slow_path_entry_count != 0) {
1018     return _slow_path_entry_count;
1019   }
1020   int sum = _biased_lock_entry_count   + _anonymously_biased_lock_entry_count +
1021             _rebiased_lock_entry_count + _revoked_lock_entry_count +
1022             _fast_path_entry_count;
1023 
1024   return _total_entry_count - sum;
1025 }
1026 
1027 void BiasedLockingCounters::print_on(outputStream* st) const {
1028   tty->print_cr("# total entries: %d", _total_entry_count);
1029   tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count);
1030   tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count);
1031   tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count);
1032   tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count);
1033   tty->print_cr("# handshakes entries: %d", _handshakes_count);
1034   tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count);
1035   tty->print_cr("# slow path lock entries: %d", slow_path_entry_count());
1036 }
1037 
1038 void BiasedLockingCounters::print() const { print_on(tty); }
< prev index next >