1 /* 2 * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/klass.inline.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.hpp" 32 #include "runtime/basicLock.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/task.hpp" 35 #include "runtime/threadSMR.hpp" 36 #include "runtime/vframe.hpp" 37 #include "runtime/vmThread.hpp" 38 #include "runtime/vm_operations.hpp" 39 #include "trace/tracing.hpp" 40 41 static bool _biased_locking_enabled = false; 42 BiasedLockingCounters BiasedLocking::_counters; 43 44 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 45 static GrowableArray<markOop>* _preserved_mark_stack = NULL; 46 47 static void enable_biased_locking(InstanceKlass* k) { 48 k->set_prototype_header(markOopDesc::biased_locking_prototype()); 49 } 50 51 class VM_EnableBiasedLocking: public VM_Operation { 52 private: 53 bool _is_cheap_allocated; 54 public: 55 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } 56 VMOp_Type type() const { return VMOp_EnableBiasedLocking; } 57 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } 58 bool is_cheap_allocated() const { return _is_cheap_allocated; } 59 60 void doit() { 61 // Iterate the class loader data dictionaries enabling biased locking for all 62 // currently loaded classes. 63 ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking); 64 // Indicate that future instances should enable it as well 65 _biased_locking_enabled = true; 66 67 log_info(biasedlocking)("Biased locking enabled"); 68 } 69 70 bool allow_nested_vm_operations() const { return false; } 71 }; 72 73 74 // One-shot PeriodicTask subclass for enabling biased locking 75 class EnableBiasedLockingTask : public PeriodicTask { 76 public: 77 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} 78 79 virtual void task() { 80 // Use async VM operation to avoid blocking the Watcher thread. 81 // VM Thread will free C heap storage. 82 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); 83 VMThread::execute(op); 84 85 // Reclaim our storage and disenroll ourself 86 delete this; 87 } 88 }; 89 90 91 void BiasedLocking::init() { 92 // If biased locking is enabled, schedule a task to fire a few 93 // seconds into the run which turns on biased locking for all 94 // currently loaded classes as well as future ones. This is a 95 // workaround for startup time regressions due to a large number of 96 // safepoints being taken during VM startup for bias revocation. 97 // Ideally we would have a lower cost for individual bias revocation 98 // and not need a mechanism like this. 99 if (UseBiasedLocking) { 100 if (BiasedLockingStartupDelay > 0) { 101 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); 102 task->enroll(); 103 } else { 104 VM_EnableBiasedLocking op(false); 105 VMThread::execute(&op); 106 } 107 } 108 } 109 110 111 bool BiasedLocking::enabled() { 112 return _biased_locking_enabled; 113 } 114 115 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order 116 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { 117 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); 118 if (info != NULL) { 119 return info; 120 } 121 122 info = new GrowableArray<MonitorInfo*>(); 123 124 // It's possible for the thread to not have any Java frames on it, 125 // i.e., if it's the main thread and it's already returned from main() 126 if (thread->has_last_Java_frame()) { 127 RegisterMap rm(thread); 128 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 129 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 130 if (monitors != NULL) { 131 int len = monitors->length(); 132 // Walk monitors youngest to oldest 133 for (int i = len - 1; i >= 0; i--) { 134 MonitorInfo* mon_info = monitors->at(i); 135 if (mon_info->eliminated()) continue; 136 oop owner = mon_info->owner(); 137 if (owner != NULL) { 138 info->append(mon_info); 139 } 140 } 141 } 142 } 143 } 144 145 thread->set_cached_monitor_info(info); 146 return info; 147 } 148 149 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL, 150 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization). 151 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { 152 markOop mark = obj->mark(); 153 if (!mark->has_bias_pattern()) { 154 if (log_is_enabled(Info, biasedlocking)) { 155 ResourceMark rm; 156 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT 157 ", mark " INTPTR_FORMAT ", type %s" 158 ", requesting thread " INTPTR_FORMAT 159 " because it's no longer biased)", 160 p2i((void *)obj), (intptr_t) mark, 161 obj->klass()->external_name(), 162 (intptr_t) requesting_thread); 163 } 164 return BiasedLocking::NOT_BIASED; 165 } 166 167 uint age = mark->age(); 168 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); 169 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); 170 171 // Log at "info" level if not bulk, else "trace" level 172 if (!is_bulk) { 173 ResourceMark rm; 174 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark " 175 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT 176 ", allow rebias %d, requesting thread " INTPTR_FORMAT, 177 p2i((void *)obj), 178 (intptr_t) mark, 179 obj->klass()->external_name(), 180 (intptr_t) obj->klass()->prototype_header(), 181 (allow_rebias ? 1 : 0), 182 (intptr_t) requesting_thread); 183 } else { 184 ResourceMark rm; 185 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark " 186 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT 187 " , allow rebias %d , requesting thread " INTPTR_FORMAT, 188 p2i((void *)obj), 189 (intptr_t) mark, 190 obj->klass()->external_name(), 191 (intptr_t) obj->klass()->prototype_header(), 192 (allow_rebias ? 1 : 0), 193 (intptr_t) requesting_thread); 194 } 195 196 JavaThread* biased_thread = mark->biased_locker(); 197 if (biased_thread == NULL) { 198 // Object is anonymously biased. We can get here if, for 199 // example, we revoke the bias due to an identity hash code 200 // being computed for an object. 201 if (!allow_rebias) { 202 obj->set_mark(unbiased_prototype); 203 } 204 // Log at "info" level if not bulk, else "trace" level 205 if (!is_bulk) { 206 log_info(biasedlocking)(" Revoked bias of anonymously-biased object"); 207 } else { 208 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object"); 209 } 210 return BiasedLocking::BIAS_REVOKED; 211 } 212 213 // Handle case where the thread toward which the object was biased has exited 214 bool thread_is_alive = false; 215 if (requesting_thread == biased_thread) { 216 thread_is_alive = true; 217 } else { 218 ThreadsListHandle tlh; 219 thread_is_alive = tlh.includes(biased_thread); 220 } 221 if (!thread_is_alive) { 222 if (allow_rebias) { 223 obj->set_mark(biased_prototype); 224 } else { 225 obj->set_mark(unbiased_prototype); 226 } 227 // Log at "info" level if not bulk, else "trace" level 228 if (!is_bulk) { 229 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread (" 230 PTR_FORMAT ")", p2i(biased_thread)); 231 } else { 232 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread (" 233 PTR_FORMAT ")", p2i(biased_thread)); 234 } 235 return BiasedLocking::BIAS_REVOKED; 236 } 237 238 // Log at "info" level if not bulk, else "trace" level 239 if (!is_bulk) { 240 log_info(biasedlocking)(" Revoked bias of object biased toward live thread (" 241 PTR_FORMAT ")", p2i(biased_thread)); 242 } else { 243 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread (" 244 PTR_FORMAT ")", p2i(biased_thread)); 245 } 246 247 // Thread owning bias is alive. 248 // Check to see whether it currently owns the lock and, if so, 249 // write down the needed displaced headers to the thread's stack. 250 // Otherwise, restore the object's header either to the unlocked 251 // or unbiased state. 252 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); 253 BasicLock* highest_lock = NULL; 254 for (int i = 0; i < cached_monitor_info->length(); i++) { 255 MonitorInfo* mon_info = cached_monitor_info->at(i); 256 if (mon_info->owner() == obj) { 257 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", 258 p2i((void *) mon_info->owner()), 259 p2i((void *) obj)); 260 // Assume recursive case and fix up highest lock later 261 markOop mark = markOopDesc::encode((BasicLock*) NULL); 262 highest_lock = mon_info->lock(); 263 highest_lock->set_displaced_header(mark); 264 } else { 265 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", 266 p2i((void *) mon_info->owner()), 267 p2i((void *) obj)); 268 } 269 } 270 if (highest_lock != NULL) { 271 // Fix up highest lock to contain displaced header and point 272 // object at it 273 highest_lock->set_displaced_header(unbiased_prototype); 274 // Reset object header to point to displaced mark. 275 // Must release storing the lock address for platforms without TSO 276 // ordering (e.g. ppc). 277 obj->release_set_mark(markOopDesc::encode(highest_lock)); 278 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); 279 // Log at "info" level if not bulk, else "trace" level 280 if (!is_bulk) { 281 log_info(biasedlocking)(" Revoked bias of currently-locked object"); 282 } else { 283 log_trace(biasedlocking)(" Revoked bias of currently-locked object"); 284 } 285 } else { 286 // Log at "info" level if not bulk, else "trace" level 287 if (!is_bulk) { 288 log_info(biasedlocking)(" Revoked bias of currently-unlocked object"); 289 } else { 290 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object"); 291 } 292 if (allow_rebias) { 293 obj->set_mark(biased_prototype); 294 } else { 295 // Store the unlocked value into the object's header. 296 obj->set_mark(unbiased_prototype); 297 } 298 } 299 300 // If requested, return information on which thread held the bias 301 if (biased_locker != NULL) { 302 *biased_locker = biased_thread; 303 } 304 305 return BiasedLocking::BIAS_REVOKED; 306 } 307 308 309 enum HeuristicsResult { 310 HR_NOT_BIASED = 1, 311 HR_SINGLE_REVOKE = 2, 312 HR_BULK_REBIAS = 3, 313 HR_BULK_REVOKE = 4 314 }; 315 316 317 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { 318 markOop mark = o->mark(); 319 if (!mark->has_bias_pattern()) { 320 return HR_NOT_BIASED; 321 } 322 323 // Heuristics to attempt to throttle the number of revocations. 324 // Stages: 325 // 1. Revoke the biases of all objects in the heap of this type, 326 // but allow rebiasing of those objects if unlocked. 327 // 2. Revoke the biases of all objects in the heap of this type 328 // and don't allow rebiasing of these objects. Disable 329 // allocation of objects of that type with the bias bit set. 330 Klass* k = o->klass(); 331 jlong cur_time = os::javaTimeMillis(); 332 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); 333 int revocation_count = k->biased_lock_revocation_count(); 334 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && 335 (revocation_count < BiasedLockingBulkRevokeThreshold) && 336 (last_bulk_revocation_time != 0) && 337 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { 338 // This is the first revocation we've seen in a while of an 339 // object of this type since the last time we performed a bulk 340 // rebiasing operation. The application is allocating objects in 341 // bulk which are biased toward a thread and then handing them 342 // off to another thread. We can cope with this allocation 343 // pattern via the bulk rebiasing mechanism so we reset the 344 // klass's revocation count rather than allow it to increase 345 // monotonically. If we see the need to perform another bulk 346 // rebias operation later, we will, and if subsequently we see 347 // many more revocation operations in a short period of time we 348 // will completely disable biasing for this type. 349 k->set_biased_lock_revocation_count(0); 350 revocation_count = 0; 351 } 352 353 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold 354 if (revocation_count <= BiasedLockingBulkRevokeThreshold) { 355 revocation_count = k->atomic_incr_biased_lock_revocation_count(); 356 } 357 358 if (revocation_count == BiasedLockingBulkRevokeThreshold) { 359 return HR_BULK_REVOKE; 360 } 361 362 if (revocation_count == BiasedLockingBulkRebiasThreshold) { 363 return HR_BULK_REBIAS; 364 } 365 366 return HR_SINGLE_REVOKE; 367 } 368 369 370 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, 371 bool bulk_rebias, 372 bool attempt_rebias_of_object, 373 JavaThread* requesting_thread) { 374 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); 375 376 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object " 377 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 378 (bulk_rebias ? "rebias" : "revoke"), 379 p2i((void *) o), 380 (intptr_t) o->mark(), 381 o->klass()->external_name()); 382 383 jlong cur_time = os::javaTimeMillis(); 384 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); 385 386 387 Klass* k_o = o->klass(); 388 Klass* klass = k_o; 389 390 { 391 JavaThreadIteratorWithHandle jtiwh; 392 393 if (bulk_rebias) { 394 // Use the epoch in the klass of the object to implicitly revoke 395 // all biases of objects of this data type and force them to be 396 // reacquired. However, we also need to walk the stacks of all 397 // threads and update the headers of lightweight locked objects 398 // with biases to have the current epoch. 399 400 // If the prototype header doesn't have the bias pattern, don't 401 // try to update the epoch -- assume another VM operation came in 402 // and reset the header to the unbiased state, which will 403 // implicitly cause all existing biases to be revoked 404 if (klass->prototype_header()->has_bias_pattern()) { 405 int prev_epoch = klass->prototype_header()->bias_epoch(); 406 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); 407 int cur_epoch = klass->prototype_header()->bias_epoch(); 408 409 // Now walk all threads' stacks and adjust epochs of any biased 410 // and locked objects of this data type we encounter 411 for (; JavaThread *thr = jtiwh.next(); ) { 412 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 413 for (int i = 0; i < cached_monitor_info->length(); i++) { 414 MonitorInfo* mon_info = cached_monitor_info->at(i); 415 oop owner = mon_info->owner(); 416 markOop mark = owner->mark(); 417 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 418 // We might have encountered this object already in the case of recursive locking 419 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); 420 owner->set_mark(mark->set_bias_epoch(cur_epoch)); 421 } 422 } 423 } 424 } 425 426 // At this point we're done. All we have to do is potentially 427 // adjust the header of the given object to revoke its bias. 428 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL); 429 } else { 430 if (log_is_enabled(Info, biasedlocking)) { 431 ResourceMark rm; 432 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name()); 433 } 434 435 // Disable biased locking for this data type. Not only will this 436 // cause future instances to not be biased, but existing biased 437 // instances will notice that this implicitly caused their biases 438 // to be revoked. 439 klass->set_prototype_header(markOopDesc::prototype()); 440 441 // Now walk all threads' stacks and forcibly revoke the biases of 442 // any locked and biased objects of this data type we encounter. 443 for (; JavaThread *thr = jtiwh.next(); ) { 444 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 445 for (int i = 0; i < cached_monitor_info->length(); i++) { 446 MonitorInfo* mon_info = cached_monitor_info->at(i); 447 oop owner = mon_info->owner(); 448 markOop mark = owner->mark(); 449 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 450 revoke_bias(owner, false, true, requesting_thread, NULL); 451 } 452 } 453 } 454 455 // Must force the bias of the passed object to be forcibly revoked 456 // as well to ensure guarantees to callers 457 revoke_bias(o, false, true, requesting_thread, NULL); 458 } 459 } // ThreadsListHandle is destroyed here. 460 461 log_info(biasedlocking)("* Ending bulk revocation"); 462 463 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; 464 465 if (attempt_rebias_of_object && 466 o->mark()->has_bias_pattern() && 467 klass->prototype_header()->has_bias_pattern()) { 468 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), 469 klass->prototype_header()->bias_epoch()); 470 o->set_mark(new_mark); 471 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; 472 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); 473 } 474 475 assert(!o->mark()->has_bias_pattern() || 476 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), 477 "bug in bulk bias revocation"); 478 479 return status_code; 480 } 481 482 483 static void clean_up_cached_monitor_info() { 484 // Walk the thread list clearing out the cached monitors 485 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { 486 thr->set_cached_monitor_info(NULL); 487 } 488 } 489 490 491 class VM_RevokeBias : public VM_Operation { 492 protected: 493 Handle* _obj; 494 GrowableArray<Handle>* _objs; 495 JavaThread* _requesting_thread; 496 BiasedLocking::Condition _status_code; 497 traceid _biased_locker_id; 498 499 public: 500 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 501 : _obj(obj) 502 , _objs(NULL) 503 , _requesting_thread(requesting_thread) 504 , _status_code(BiasedLocking::NOT_BIASED) 505 , _biased_locker_id(0) {} 506 507 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 508 : _obj(NULL) 509 , _objs(objs) 510 , _requesting_thread(requesting_thread) 511 , _status_code(BiasedLocking::NOT_BIASED) 512 , _biased_locker_id(0) {} 513 514 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 515 516 virtual bool doit_prologue() { 517 // Verify that there is actual work to do since the callers just 518 // give us locked object(s). If we don't find any biased objects 519 // there is nothing to do and we avoid a safepoint. 520 if (_obj != NULL) { 521 markOop mark = (*_obj)()->mark(); 522 if (mark->has_bias_pattern()) { 523 return true; 524 } 525 } else { 526 for ( int i = 0 ; i < _objs->length(); i++ ) { 527 markOop mark = (_objs->at(i))()->mark(); 528 if (mark->has_bias_pattern()) { 529 return true; 530 } 531 } 532 } 533 return false; 534 } 535 536 virtual void doit() { 537 if (_obj != NULL) { 538 log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:"); 539 JavaThread* biased_locker = NULL; 540 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); 541 if (biased_locker != NULL) { 542 _biased_locker_id = THREAD_TRACE_ID(biased_locker); 543 } 544 clean_up_cached_monitor_info(); 545 return; 546 } else { 547 log_info(biasedlocking)("Revoking bias with global safepoint:"); 548 BiasedLocking::revoke_at_safepoint(_objs); 549 } 550 } 551 552 BiasedLocking::Condition status_code() const { 553 return _status_code; 554 } 555 556 traceid biased_locker() const { 557 return _biased_locker_id; 558 } 559 }; 560 561 562 class VM_BulkRevokeBias : public VM_RevokeBias { 563 private: 564 bool _bulk_rebias; 565 bool _attempt_rebias_of_object; 566 567 public: 568 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, 569 bool bulk_rebias, 570 bool attempt_rebias_of_object) 571 : VM_RevokeBias(obj, requesting_thread) 572 , _bulk_rebias(bulk_rebias) 573 , _attempt_rebias_of_object(attempt_rebias_of_object) {} 574 575 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } 576 virtual bool doit_prologue() { return true; } 577 578 virtual void doit() { 579 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); 580 clean_up_cached_monitor_info(); 581 } 582 }; 583 584 585 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { 586 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 587 588 // We can revoke the biases of anonymously-biased objects 589 // efficiently enough that we should not cause these revocations to 590 // update the heuristics because doing so may cause unwanted bulk 591 // revocations (which are expensive) to occur. 592 markOop mark = obj->mark(); 593 if (mark->is_biased_anonymously() && !attempt_rebias) { 594 // We are probably trying to revoke the bias of this object due to 595 // an identity hash code computation. Try to revoke the bias 596 // without a safepoint. This is possible if we can successfully 597 // compare-and-exchange an unbiased header into the mark word of 598 // the object, meaning that no other thread has raced to acquire 599 // the bias of the object. 600 markOop biased_value = mark; 601 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 602 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 603 if (res_mark == biased_value) { 604 return BIAS_REVOKED; 605 } 606 } else if (mark->has_bias_pattern()) { 607 Klass* k = obj->klass(); 608 markOop prototype_header = k->prototype_header(); 609 if (!prototype_header->has_bias_pattern()) { 610 // This object has a stale bias from before the bulk revocation 611 // for this data type occurred. It's pointless to update the 612 // heuristics at this point so simply update the header with a 613 // CAS. If we fail this race, the object's bias has been revoked 614 // by another thread so we simply return and let the caller deal 615 // with it. 616 markOop biased_value = mark; 617 markOop res_mark = obj->cas_set_mark(prototype_header, mark); 618 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); 619 return BIAS_REVOKED; 620 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { 621 // The epoch of this biasing has expired indicating that the 622 // object is effectively unbiased. Depending on whether we need 623 // to rebias or revoke the bias of this object we can do it 624 // efficiently enough with a CAS that we shouldn't update the 625 // heuristics. This is normally done in the assembly code but we 626 // can reach this point due to various points in the runtime 627 // needing to revoke biases. 628 if (attempt_rebias) { 629 assert(THREAD->is_Java_thread(), ""); 630 markOop biased_value = mark; 631 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); 632 markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark); 633 if (res_mark == biased_value) { 634 return BIAS_REVOKED_AND_REBIASED; 635 } 636 } else { 637 markOop biased_value = mark; 638 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 639 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 640 if (res_mark == biased_value) { 641 return BIAS_REVOKED; 642 } 643 } 644 } 645 } 646 647 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); 648 if (heuristics == HR_NOT_BIASED) { 649 return NOT_BIASED; 650 } else if (heuristics == HR_SINGLE_REVOKE) { 651 Klass *k = obj->klass(); 652 markOop prototype_header = k->prototype_header(); 653 if (mark->biased_locker() == THREAD && 654 prototype_header->bias_epoch() == mark->bias_epoch()) { 655 // A thread is trying to revoke the bias of an object biased 656 // toward it, again likely due to an identity hash code 657 // computation. We can again avoid a safepoint in this case 658 // since we are only going to walk our own stack. There are no 659 // races with revocations occurring in other threads because we 660 // reach no safepoints in the revocation path. 661 // Also check the epoch because even if threads match, another thread 662 // can come in with a CAS to steal the bias of an object that has a 663 // stale epoch. 664 ResourceMark rm; 665 log_info(biasedlocking)("Revoking bias by walking my own stack:"); 666 EventBiasedLockSelfRevocation event; 667 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); 668 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 669 assert(cond == BIAS_REVOKED, "why not?"); 670 if (event.should_commit()) { 671 event.set_lockClass(k); 672 event.commit(); 673 } 674 return cond; 675 } else { 676 EventBiasedLockRevocation event; 677 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 678 VMThread::execute(&revoke); 679 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { 680 event.set_lockClass(k); 681 // Subtract 1 to match the id of events committed inside the safepoint 682 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 683 event.set_previousOwner(revoke.biased_locker()); 684 event.commit(); 685 } 686 return revoke.status_code(); 687 } 688 } 689 690 assert((heuristics == HR_BULK_REVOKE) || 691 (heuristics == HR_BULK_REBIAS), "?"); 692 EventBiasedLockClassRevocation event; 693 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 694 (heuristics == HR_BULK_REBIAS), 695 attempt_rebias); 696 VMThread::execute(&bulk_revoke); 697 if (event.should_commit()) { 698 event.set_revokedClass(obj->klass()); 699 event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); 700 // Subtract 1 to match the id of events committed inside the safepoint 701 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 702 event.commit(); 703 } 704 return bulk_revoke.status_code(); 705 } 706 707 708 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 709 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 710 if (objs->length() == 0) { 711 return; 712 } 713 VM_RevokeBias revoke(objs, JavaThread::current()); 714 VMThread::execute(&revoke); 715 } 716 717 718 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 719 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 720 oop obj = h_obj(); 721 HeuristicsResult heuristics = update_heuristics(obj, false); 722 if (heuristics == HR_SINGLE_REVOKE) { 723 revoke_bias(obj, false, false, NULL, NULL); 724 } else if ((heuristics == HR_BULK_REBIAS) || 725 (heuristics == HR_BULK_REVOKE)) { 726 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 727 } 728 clean_up_cached_monitor_info(); 729 } 730 731 732 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { 733 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 734 int len = objs->length(); 735 for (int i = 0; i < len; i++) { 736 oop obj = (objs->at(i))(); 737 HeuristicsResult heuristics = update_heuristics(obj, false); 738 if (heuristics == HR_SINGLE_REVOKE) { 739 revoke_bias(obj, false, false, NULL, NULL); 740 } else if ((heuristics == HR_BULK_REBIAS) || 741 (heuristics == HR_BULK_REVOKE)) { 742 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 743 } 744 } 745 clean_up_cached_monitor_info(); 746 } 747 748 749 void BiasedLocking::preserve_marks() { 750 if (!UseBiasedLocking) 751 return; 752 753 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 754 755 assert(_preserved_oop_stack == NULL, "double initialization"); 756 assert(_preserved_mark_stack == NULL, "double initialization"); 757 758 // In order to reduce the number of mark words preserved during GC 759 // due to the presence of biased locking, we reinitialize most mark 760 // words to the class's prototype during GC -- even those which have 761 // a currently valid bias owner. One important situation where we 762 // must not clobber a bias is when a biased object is currently 763 // locked. To handle this case we iterate over the currently-locked 764 // monitors in a prepass and, if they are biased, preserve their 765 // mark words here. This should be a relatively small set of objects 766 // especially compared to the number of objects in the heap. 767 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); 768 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); 769 770 ResourceMark rm; 771 Thread* cur = Thread::current(); 772 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 773 if (thread->has_last_Java_frame()) { 774 RegisterMap rm(thread); 775 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 776 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 777 if (monitors != NULL) { 778 int len = monitors->length(); 779 // Walk monitors youngest to oldest 780 for (int i = len - 1; i >= 0; i--) { 781 MonitorInfo* mon_info = monitors->at(i); 782 if (mon_info->owner_is_scalar_replaced()) continue; 783 oop owner = mon_info->owner(); 784 if (owner != NULL) { 785 markOop mark = owner->mark(); 786 if (mark->has_bias_pattern()) { 787 _preserved_oop_stack->push(Handle(cur, owner)); 788 _preserved_mark_stack->push(mark); 789 } 790 } 791 } 792 } 793 } 794 } 795 } 796 } 797 798 799 void BiasedLocking::restore_marks() { 800 if (!UseBiasedLocking) 801 return; 802 803 assert(_preserved_oop_stack != NULL, "double free"); 804 assert(_preserved_mark_stack != NULL, "double free"); 805 806 int len = _preserved_oop_stack->length(); 807 for (int i = 0; i < len; i++) { 808 Handle owner = _preserved_oop_stack->at(i); 809 markOop mark = _preserved_mark_stack->at(i); 810 owner->set_mark(mark); 811 } 812 813 delete _preserved_oop_stack; 814 _preserved_oop_stack = NULL; 815 delete _preserved_mark_stack; 816 _preserved_mark_stack = NULL; 817 } 818 819 820 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } 821 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } 822 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } 823 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } 824 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } 825 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } 826 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } 827 828 829 // BiasedLockingCounters 830 831 int BiasedLockingCounters::slow_path_entry_count() { 832 if (_slow_path_entry_count != 0) { 833 return _slow_path_entry_count; 834 } 835 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + 836 _rebiased_lock_entry_count + _revoked_lock_entry_count + 837 _fast_path_entry_count; 838 839 return _total_entry_count - sum; 840 } 841 842 void BiasedLockingCounters::print_on(outputStream* st) { 843 tty->print_cr("# total entries: %d", _total_entry_count); 844 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); 845 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); 846 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); 847 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); 848 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); 849 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); 850 }