1 /* 2 * Copyright (c) 2005, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "oops/klass.inline.hpp" 27 #include "oops/markOop.hpp" 28 #include "runtime/basicLock.hpp" 29 #include "runtime/biasedLocking.hpp" 30 #include "runtime/task.hpp" 31 #include "runtime/vframe.hpp" 32 #include "runtime/vmThread.hpp" 33 #include "runtime/vm_operations.hpp" 34 #if INCLUDE_JFR 35 #include "jfr/support/jfrThreadId.hpp" 36 #include "jfr/jfrEvents.hpp" 37 #endif 38 39 static bool _biased_locking_enabled = false; 40 BiasedLockingCounters BiasedLocking::_counters; 41 42 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 43 static GrowableArray<markOop>* _preserved_mark_stack = NULL; 44 45 static void enable_biased_locking(Klass* k) { 46 k->set_prototype_header(markOopDesc::biased_locking_prototype()); 47 } 48 49 class VM_EnableBiasedLocking: public VM_Operation { 50 private: 51 bool _is_cheap_allocated; 52 public: 53 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } 54 VMOp_Type type() const { return VMOp_EnableBiasedLocking; } 55 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } 56 bool is_cheap_allocated() const { return _is_cheap_allocated; } 57 58 void doit() { 59 // Iterate the system dictionary enabling biased locking for all 60 // currently loaded classes 61 SystemDictionary::classes_do(enable_biased_locking); 62 // Indicate that future instances should enable it as well 63 _biased_locking_enabled = true; 64 65 if (TraceBiasedLocking) { 66 tty->print_cr("Biased locking enabled"); 67 } 68 } 69 70 bool allow_nested_vm_operations() const { return false; } 71 }; 72 73 74 // One-shot PeriodicTask subclass for enabling biased locking 75 class EnableBiasedLockingTask : public PeriodicTask { 76 public: 77 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} 78 79 virtual void task() { 80 // Use async VM operation to avoid blocking the Watcher thread. 81 // VM Thread will free C heap storage. 82 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); 83 VMThread::execute(op); 84 85 // Reclaim our storage and disenroll ourself 86 delete this; 87 } 88 }; 89 90 91 void BiasedLocking::init() { 92 // If biased locking is enabled, schedule a task to fire a few 93 // seconds into the run which turns on biased locking for all 94 // currently loaded classes as well as future ones. This is a 95 // workaround for startup time regressions due to a large number of 96 // safepoints being taken during VM startup for bias revocation. 97 // Ideally we would have a lower cost for individual bias revocation 98 // and not need a mechanism like this. 99 if (UseBiasedLocking) { 100 if (BiasedLockingStartupDelay > 0) { 101 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); 102 task->enroll(); 103 } else { 104 VM_EnableBiasedLocking op(false); 105 VMThread::execute(&op); 106 } 107 } 108 } 109 110 111 bool BiasedLocking::enabled() { 112 return _biased_locking_enabled; 113 } 114 115 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order 116 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { 117 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); 118 if (info != NULL) { 119 return info; 120 } 121 122 info = new GrowableArray<MonitorInfo*>(); 123 124 // It's possible for the thread to not have any Java frames on it, 125 // i.e., if it's the main thread and it's already returned from main() 126 if (thread->has_last_Java_frame()) { 127 RegisterMap rm(thread); 128 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 129 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 130 if (monitors != NULL) { 131 int len = monitors->length(); 132 // Walk monitors youngest to oldest 133 for (int i = len - 1; i >= 0; i--) { 134 MonitorInfo* mon_info = monitors->at(i); 135 if (mon_info->eliminated()) continue; 136 oop owner = mon_info->owner(); 137 if (owner != NULL) { 138 info->append(mon_info); 139 } 140 } 141 } 142 } 143 } 144 145 thread->set_cached_monitor_info(info); 146 return info; 147 } 148 149 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL, 150 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization). 151 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { 152 markOop mark = obj->mark(); 153 if (!mark->has_bias_pattern()) { 154 if (TraceBiasedLocking) { 155 ResourceMark rm; 156 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", 157 obj->klass()->external_name()); 158 } 159 return BiasedLocking::NOT_BIASED; 160 } 161 162 uint age = mark->age(); 163 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); 164 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); 165 166 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 167 ResourceMark rm; 168 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT, 169 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread); 170 } 171 172 JavaThread* biased_thread = mark->biased_locker(); 173 if (biased_thread == NULL) { 174 // Object is anonymously biased. We can get here if, for 175 // example, we revoke the bias due to an identity hash code 176 // being computed for an object. 177 if (!allow_rebias) { 178 obj->set_mark(unbiased_prototype); 179 } 180 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 181 tty->print_cr(" Revoked bias of anonymously-biased object"); 182 } 183 return BiasedLocking::BIAS_REVOKED; 184 } 185 186 // Handle case where the thread toward which the object was biased has exited 187 bool thread_is_alive = false; 188 if (requesting_thread == biased_thread) { 189 thread_is_alive = true; 190 } else { 191 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) { 192 if (cur_thread == biased_thread) { 193 thread_is_alive = true; 194 break; 195 } 196 } 197 } 198 if (!thread_is_alive) { 199 if (allow_rebias) { 200 obj->set_mark(biased_prototype); 201 } else { 202 obj->set_mark(unbiased_prototype); 203 } 204 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 205 tty->print_cr(" Revoked bias of object biased toward dead thread"); 206 } 207 return BiasedLocking::BIAS_REVOKED; 208 } 209 210 // Thread owning bias is alive. 211 // Check to see whether it currently owns the lock and, if so, 212 // write down the needed displaced headers to the thread's stack. 213 // Otherwise, restore the object's header either to the unlocked 214 // or unbiased state. 215 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); 216 BasicLock* highest_lock = NULL; 217 for (int i = 0; i < cached_monitor_info->length(); i++) { 218 MonitorInfo* mon_info = cached_monitor_info->at(i); 219 if (mon_info->owner() == obj) { 220 if (TraceBiasedLocking && Verbose) { 221 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", 222 p2i((void *) mon_info->owner()), 223 p2i((void *) obj)); 224 } 225 // Assume recursive case and fix up highest lock later 226 markOop mark = markOopDesc::encode((BasicLock*) NULL); 227 highest_lock = mon_info->lock(); 228 highest_lock->set_displaced_header(mark); 229 } else { 230 if (TraceBiasedLocking && Verbose) { 231 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", 232 p2i((void *) mon_info->owner()), 233 p2i((void *) obj)); 234 } 235 } 236 } 237 if (highest_lock != NULL) { 238 // Fix up highest lock to contain displaced header and point 239 // object at it 240 highest_lock->set_displaced_header(unbiased_prototype); 241 // Reset object header to point to displaced mark. 242 // Must release storing the lock address for platforms without TSO 243 // ordering (e.g. ppc). 244 obj->release_set_mark(markOopDesc::encode(highest_lock)); 245 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); 246 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 247 tty->print_cr(" Revoked bias of currently-locked object"); 248 } 249 } else { 250 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 251 tty->print_cr(" Revoked bias of currently-unlocked object"); 252 } 253 if (allow_rebias) { 254 obj->set_mark(biased_prototype); 255 } else { 256 // Store the unlocked value into the object's header. 257 obj->set_mark(unbiased_prototype); 258 } 259 } 260 261 // If requested, return information on which thread held the bias 262 if (biased_locker != NULL) { 263 *biased_locker = biased_thread; 264 } 265 266 return BiasedLocking::BIAS_REVOKED; 267 } 268 269 270 enum HeuristicsResult { 271 HR_NOT_BIASED = 1, 272 HR_SINGLE_REVOKE = 2, 273 HR_BULK_REBIAS = 3, 274 HR_BULK_REVOKE = 4 275 }; 276 277 278 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { 279 markOop mark = o->mark(); 280 if (!mark->has_bias_pattern()) { 281 return HR_NOT_BIASED; 282 } 283 284 // Heuristics to attempt to throttle the number of revocations. 285 // Stages: 286 // 1. Revoke the biases of all objects in the heap of this type, 287 // but allow rebiasing of those objects if unlocked. 288 // 2. Revoke the biases of all objects in the heap of this type 289 // and don't allow rebiasing of these objects. Disable 290 // allocation of objects of that type with the bias bit set. 291 Klass* k = o->klass(); 292 jlong cur_time = os::javaTimeMillis(); 293 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); 294 int revocation_count = k->biased_lock_revocation_count(); 295 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && 296 (revocation_count < BiasedLockingBulkRevokeThreshold) && 297 (last_bulk_revocation_time != 0) && 298 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { 299 // This is the first revocation we've seen in a while of an 300 // object of this type since the last time we performed a bulk 301 // rebiasing operation. The application is allocating objects in 302 // bulk which are biased toward a thread and then handing them 303 // off to another thread. We can cope with this allocation 304 // pattern via the bulk rebiasing mechanism so we reset the 305 // klass's revocation count rather than allow it to increase 306 // monotonically. If we see the need to perform another bulk 307 // rebias operation later, we will, and if subsequently we see 308 // many more revocation operations in a short period of time we 309 // will completely disable biasing for this type. 310 k->set_biased_lock_revocation_count(0); 311 revocation_count = 0; 312 } 313 314 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold 315 if (revocation_count <= BiasedLockingBulkRevokeThreshold) { 316 revocation_count = k->atomic_incr_biased_lock_revocation_count(); 317 } 318 319 if (revocation_count == BiasedLockingBulkRevokeThreshold) { 320 return HR_BULK_REVOKE; 321 } 322 323 if (revocation_count == BiasedLockingBulkRebiasThreshold) { 324 return HR_BULK_REBIAS; 325 } 326 327 return HR_SINGLE_REVOKE; 328 } 329 330 331 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, 332 bool bulk_rebias, 333 bool attempt_rebias_of_object, 334 JavaThread* requesting_thread) { 335 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); 336 337 if (TraceBiasedLocking) { 338 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " 339 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 340 (bulk_rebias ? "rebias" : "revoke"), 341 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name()); 342 } 343 344 jlong cur_time = os::javaTimeMillis(); 345 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); 346 347 348 Klass* k_o = o->klass(); 349 Klass* klass = k_o; 350 351 if (bulk_rebias) { 352 // Use the epoch in the klass of the object to implicitly revoke 353 // all biases of objects of this data type and force them to be 354 // reacquired. However, we also need to walk the stacks of all 355 // threads and update the headers of lightweight locked objects 356 // with biases to have the current epoch. 357 358 // If the prototype header doesn't have the bias pattern, don't 359 // try to update the epoch -- assume another VM operation came in 360 // and reset the header to the unbiased state, which will 361 // implicitly cause all existing biases to be revoked 362 if (klass->prototype_header()->has_bias_pattern()) { 363 int prev_epoch = klass->prototype_header()->bias_epoch(); 364 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); 365 int cur_epoch = klass->prototype_header()->bias_epoch(); 366 367 // Now walk all threads' stacks and adjust epochs of any biased 368 // and locked objects of this data type we encounter 369 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 370 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 371 for (int i = 0; i < cached_monitor_info->length(); i++) { 372 MonitorInfo* mon_info = cached_monitor_info->at(i); 373 oop owner = mon_info->owner(); 374 markOop mark = owner->mark(); 375 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 376 // We might have encountered this object already in the case of recursive locking 377 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); 378 owner->set_mark(mark->set_bias_epoch(cur_epoch)); 379 } 380 } 381 } 382 } 383 384 // At this point we're done. All we have to do is potentially 385 // adjust the header of the given object to revoke its bias. 386 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL); 387 } else { 388 if (TraceBiasedLocking) { 389 ResourceMark rm; 390 tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); 391 } 392 393 // Disable biased locking for this data type. Not only will this 394 // cause future instances to not be biased, but existing biased 395 // instances will notice that this implicitly caused their biases 396 // to be revoked. 397 klass->set_prototype_header(markOopDesc::prototype()); 398 399 // Now walk all threads' stacks and forcibly revoke the biases of 400 // any locked and biased objects of this data type we encounter. 401 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 402 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 403 for (int i = 0; i < cached_monitor_info->length(); i++) { 404 MonitorInfo* mon_info = cached_monitor_info->at(i); 405 oop owner = mon_info->owner(); 406 markOop mark = owner->mark(); 407 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 408 revoke_bias(owner, false, true, requesting_thread, NULL); 409 } 410 } 411 } 412 413 // Must force the bias of the passed object to be forcibly revoked 414 // as well to ensure guarantees to callers 415 revoke_bias(o, false, true, requesting_thread, NULL); 416 } 417 418 if (TraceBiasedLocking) { 419 tty->print_cr("* Ending bulk revocation"); 420 } 421 422 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; 423 424 if (attempt_rebias_of_object && 425 o->mark()->has_bias_pattern() && 426 klass->prototype_header()->has_bias_pattern()) { 427 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), 428 klass->prototype_header()->bias_epoch()); 429 o->set_mark(new_mark); 430 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; 431 if (TraceBiasedLocking) { 432 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); 433 } 434 } 435 436 assert(!o->mark()->has_bias_pattern() || 437 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), 438 "bug in bulk bias revocation"); 439 440 return status_code; 441 } 442 443 444 static void clean_up_cached_monitor_info() { 445 // Walk the thread list clearing out the cached monitors 446 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 447 thr->set_cached_monitor_info(NULL); 448 } 449 } 450 451 452 class VM_RevokeBias : public VM_Operation { 453 protected: 454 Handle* _obj; 455 GrowableArray<Handle>* _objs; 456 JavaThread* _requesting_thread; 457 BiasedLocking::Condition _status_code; 458 JFR_ONLY(traceid _biased_locker_id;) 459 460 public: 461 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 462 : _obj(obj) 463 , _objs(NULL) 464 , _requesting_thread(requesting_thread) 465 , _status_code(BiasedLocking::NOT_BIASED) 466 #if INCLUDE_JFR 467 , _biased_locker_id(0) 468 #endif 469 {} 470 471 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 472 : _obj(NULL) 473 , _objs(objs) 474 , _requesting_thread(requesting_thread) 475 , _status_code(BiasedLocking::NOT_BIASED) 476 #if INCLUDE_JFR 477 , _biased_locker_id(0) 478 #endif 479 {} 480 481 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 482 483 virtual bool doit_prologue() { 484 // Verify that there is actual work to do since the callers just 485 // give us locked object(s). If we don't find any biased objects 486 // there is nothing to do and we avoid a safepoint. 487 if (_obj != NULL) { 488 markOop mark = (*_obj)()->mark(); 489 if (mark->has_bias_pattern()) { 490 return true; 491 } 492 } else { 493 for ( int i = 0 ; i < _objs->length(); i++ ) { 494 markOop mark = (_objs->at(i))()->mark(); 495 if (mark->has_bias_pattern()) { 496 return true; 497 } 498 } 499 } 500 return false; 501 } 502 503 virtual void doit() { 504 if (_obj != NULL) { 505 if (TraceBiasedLocking) { 506 tty->print_cr("Revoking bias with potentially per-thread safepoint:"); 507 } 508 JavaThread* biased_locker = NULL; 509 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); 510 #if INCLUDE_JFR 511 if (biased_locker != NULL) { 512 _biased_locker_id = JFR_THREAD_ID(biased_locker); 513 } 514 #endif 515 clean_up_cached_monitor_info(); 516 return; 517 } else { 518 if (TraceBiasedLocking) { 519 tty->print_cr("Revoking bias with global safepoint:"); 520 } 521 BiasedLocking::revoke_at_safepoint(_objs); 522 } 523 } 524 525 BiasedLocking::Condition status_code() const { 526 return _status_code; 527 } 528 529 #if INCLUDE_JFR 530 traceid biased_locker() const { 531 return _biased_locker_id; 532 } 533 #endif 534 }; 535 536 537 class VM_BulkRevokeBias : public VM_RevokeBias { 538 private: 539 bool _bulk_rebias; 540 bool _attempt_rebias_of_object; 541 542 public: 543 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, 544 bool bulk_rebias, 545 bool attempt_rebias_of_object) 546 : VM_RevokeBias(obj, requesting_thread) 547 , _bulk_rebias(bulk_rebias) 548 , _attempt_rebias_of_object(attempt_rebias_of_object) {} 549 550 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } 551 virtual bool doit_prologue() { return true; } 552 553 virtual void doit() { 554 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); 555 clean_up_cached_monitor_info(); 556 } 557 }; 558 559 560 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { 561 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 562 563 // We can revoke the biases of anonymously-biased objects 564 // efficiently enough that we should not cause these revocations to 565 // update the heuristics because doing so may cause unwanted bulk 566 // revocations (which are expensive) to occur. 567 markOop mark = obj->mark(); 568 if (mark->is_biased_anonymously() && !attempt_rebias) { 569 // We are probably trying to revoke the bias of this object due to 570 // an identity hash code computation. Try to revoke the bias 571 // without a safepoint. This is possible if we can successfully 572 // compare-and-exchange an unbiased header into the mark word of 573 // the object, meaning that no other thread has raced to acquire 574 // the bias of the object. 575 markOop biased_value = mark; 576 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 577 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); 578 if (res_mark == biased_value) { 579 return BIAS_REVOKED; 580 } 581 } else if (mark->has_bias_pattern()) { 582 Klass* k = obj->klass(); 583 markOop prototype_header = k->prototype_header(); 584 if (!prototype_header->has_bias_pattern()) { 585 // This object has a stale bias from before the bulk revocation 586 // for this data type occurred. It's pointless to update the 587 // heuristics at this point so simply update the header with a 588 // CAS. If we fail this race, the object's bias has been revoked 589 // by another thread so we simply return and let the caller deal 590 // with it. 591 markOop biased_value = mark; 592 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark); 593 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); 594 return BIAS_REVOKED; 595 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { 596 // The epoch of this biasing has expired indicating that the 597 // object is effectively unbiased. Depending on whether we need 598 // to rebias or revoke the bias of this object we can do it 599 // efficiently enough with a CAS that we shouldn't update the 600 // heuristics. This is normally done in the assembly code but we 601 // can reach this point due to various points in the runtime 602 // needing to revoke biases. 603 if (attempt_rebias) { 604 assert(THREAD->is_Java_thread(), ""); 605 markOop biased_value = mark; 606 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); 607 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark); 608 if (res_mark == biased_value) { 609 return BIAS_REVOKED_AND_REBIASED; 610 } 611 } else { 612 markOop biased_value = mark; 613 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 614 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); 615 if (res_mark == biased_value) { 616 return BIAS_REVOKED; 617 } 618 } 619 } 620 } 621 622 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); 623 if (heuristics == HR_NOT_BIASED) { 624 return NOT_BIASED; 625 } else if (heuristics == HR_SINGLE_REVOKE) { 626 Klass *k = obj->klass(); 627 markOop prototype_header = k->prototype_header(); 628 if (mark->biased_locker() == THREAD && 629 prototype_header->bias_epoch() == mark->bias_epoch()) { 630 // A thread is trying to revoke the bias of an object biased 631 // toward it, again likely due to an identity hash code 632 // computation. We can again avoid a safepoint in this case 633 // since we are only going to walk our own stack. There are no 634 // races with revocations occurring in other threads because we 635 // reach no safepoints in the revocation path. 636 // Also check the epoch because even if threads match, another thread 637 // can come in with a CAS to steal the bias of an object that has a 638 // stale epoch. 639 ResourceMark rm; 640 if (TraceBiasedLocking) { 641 tty->print_cr("Revoking bias by walking my own stack:"); 642 } 643 #if INCLUDE_JFR 644 EventBiasedLockSelfRevocation event; 645 #endif 646 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); 647 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 648 assert(cond == BIAS_REVOKED, "why not?"); 649 #if INCLUDE_JFR 650 if (event.should_commit()) { 651 event.set_lockClass(k); 652 event.commit(); 653 } 654 #endif 655 return cond; 656 } else { 657 #if INCLUDE_JFR 658 EventBiasedLockRevocation event; 659 #endif 660 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 661 VMThread::execute(&revoke); 662 #if INCLUDE_JFR 663 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { 664 event.set_lockClass(k); 665 // Subtract 1 to match the id of events committed inside the safepoint 666 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 667 event.set_previousOwner(revoke.biased_locker()); 668 event.commit(); 669 } 670 #endif 671 return revoke.status_code(); 672 } 673 } 674 675 assert((heuristics == HR_BULK_REVOKE) || 676 (heuristics == HR_BULK_REBIAS), "?"); 677 #if INCLUDE_JFR 678 EventBiasedLockClassRevocation event; 679 #endif 680 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 681 (heuristics == HR_BULK_REBIAS), 682 attempt_rebias); 683 VMThread::execute(&bulk_revoke); 684 #if INCLUDE_JFR 685 if (event.should_commit()) { 686 event.set_revokedClass(obj->klass()); 687 event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); 688 // Subtract 1 to match the id of events committed inside the safepoint 689 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 690 event.commit(); 691 } 692 #endif 693 return bulk_revoke.status_code(); 694 } 695 696 697 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 698 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 699 if (objs->length() == 0) { 700 return; 701 } 702 VM_RevokeBias revoke(objs, JavaThread::current()); 703 VMThread::execute(&revoke); 704 } 705 706 707 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 708 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 709 oop obj = h_obj(); 710 HeuristicsResult heuristics = update_heuristics(obj, false); 711 if (heuristics == HR_SINGLE_REVOKE) { 712 revoke_bias(obj, false, false, NULL, NULL); 713 } else if ((heuristics == HR_BULK_REBIAS) || 714 (heuristics == HR_BULK_REVOKE)) { 715 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 716 } 717 clean_up_cached_monitor_info(); 718 } 719 720 721 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { 722 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 723 int len = objs->length(); 724 for (int i = 0; i < len; i++) { 725 oop obj = (objs->at(i))(); 726 HeuristicsResult heuristics = update_heuristics(obj, false); 727 if (heuristics == HR_SINGLE_REVOKE) { 728 revoke_bias(obj, false, false, NULL, NULL); 729 } else if ((heuristics == HR_BULK_REBIAS) || 730 (heuristics == HR_BULK_REVOKE)) { 731 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 732 } 733 } 734 clean_up_cached_monitor_info(); 735 } 736 737 738 void BiasedLocking::preserve_marks() { 739 if (!UseBiasedLocking) 740 return; 741 742 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 743 744 assert(_preserved_oop_stack == NULL, "double initialization"); 745 assert(_preserved_mark_stack == NULL, "double initialization"); 746 747 // In order to reduce the number of mark words preserved during GC 748 // due to the presence of biased locking, we reinitialize most mark 749 // words to the class's prototype during GC -- even those which have 750 // a currently valid bias owner. One important situation where we 751 // must not clobber a bias is when a biased object is currently 752 // locked. To handle this case we iterate over the currently-locked 753 // monitors in a prepass and, if they are biased, preserve their 754 // mark words here. This should be a relatively small set of objects 755 // especially compared to the number of objects in the heap. 756 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); 757 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); 758 759 ResourceMark rm; 760 Thread* cur = Thread::current(); 761 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { 762 if (thread->has_last_Java_frame()) { 763 RegisterMap rm(thread); 764 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 765 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 766 if (monitors != NULL) { 767 int len = monitors->length(); 768 // Walk monitors youngest to oldest 769 for (int i = len - 1; i >= 0; i--) { 770 MonitorInfo* mon_info = monitors->at(i); 771 if (mon_info->owner_is_scalar_replaced()) continue; 772 oop owner = mon_info->owner(); 773 if (owner != NULL) { 774 markOop mark = owner->mark(); 775 if (mark->has_bias_pattern()) { 776 _preserved_oop_stack->push(Handle(cur, owner)); 777 _preserved_mark_stack->push(mark); 778 } 779 } 780 } 781 } 782 } 783 } 784 } 785 } 786 787 788 void BiasedLocking::restore_marks() { 789 if (!UseBiasedLocking) 790 return; 791 792 assert(_preserved_oop_stack != NULL, "double free"); 793 assert(_preserved_mark_stack != NULL, "double free"); 794 795 int len = _preserved_oop_stack->length(); 796 for (int i = 0; i < len; i++) { 797 Handle owner = _preserved_oop_stack->at(i); 798 markOop mark = _preserved_mark_stack->at(i); 799 owner->set_mark(mark); 800 } 801 802 delete _preserved_oop_stack; 803 _preserved_oop_stack = NULL; 804 delete _preserved_mark_stack; 805 _preserved_mark_stack = NULL; 806 } 807 808 809 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } 810 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } 811 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } 812 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } 813 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } 814 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } 815 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } 816 817 818 // BiasedLockingCounters 819 820 int BiasedLockingCounters::slow_path_entry_count() { 821 if (_slow_path_entry_count != 0) { 822 return _slow_path_entry_count; 823 } 824 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + 825 _rebiased_lock_entry_count + _revoked_lock_entry_count + 826 _fast_path_entry_count; 827 828 return _total_entry_count - sum; 829 } 830 831 void BiasedLockingCounters::print_on(outputStream* st) { 832 tty->print_cr("# total entries: %d", _total_entry_count); 833 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); 834 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); 835 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); 836 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); 837 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); 838 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); 839 }