1 /* 2 * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/klass.inline.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.hpp" 32 #include "runtime/basicLock.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/task.hpp" 35 #include "runtime/threadSMR.hpp" 36 #include "runtime/vframe.hpp" 37 #include "runtime/vmThread.hpp" 38 #include "runtime/vm_operations.hpp" 39 40 static bool _biased_locking_enabled = false; 41 BiasedLockingCounters BiasedLocking::_counters; 42 43 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 44 static GrowableArray<markOop>* _preserved_mark_stack = NULL; 45 46 static void enable_biased_locking(InstanceKlass* k) { 47 k->set_prototype_header(markOopDesc::biased_locking_prototype()); 48 } 49 50 class VM_EnableBiasedLocking: public VM_Operation { 51 private: 52 bool _is_cheap_allocated; 53 public: 54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } 55 VMOp_Type type() const { return VMOp_EnableBiasedLocking; } 56 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } 57 bool is_cheap_allocated() const { return _is_cheap_allocated; } 58 59 void doit() { 60 // Iterate the class loader data dictionaries enabling biased locking for all 61 // currently loaded classes. 62 ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking); 63 // Indicate that future instances should enable it as well 64 _biased_locking_enabled = true; 65 66 log_info(biasedlocking)("Biased locking enabled"); 67 } 68 69 bool allow_nested_vm_operations() const { return false; } 70 }; 71 72 73 // One-shot PeriodicTask subclass for enabling biased locking 74 class EnableBiasedLockingTask : public PeriodicTask { 75 public: 76 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} 77 78 virtual void task() { 79 // Use async VM operation to avoid blocking the Watcher thread. 80 // VM Thread will free C heap storage. 81 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); 82 VMThread::execute(op); 83 84 // Reclaim our storage and disenroll ourself 85 delete this; 86 } 87 }; 88 89 90 void BiasedLocking::init() { 91 // If biased locking is enabled, schedule a task to fire a few 92 // seconds into the run which turns on biased locking for all 93 // currently loaded classes as well as future ones. This is a 94 // workaround for startup time regressions due to a large number of 95 // safepoints being taken during VM startup for bias revocation. 96 // Ideally we would have a lower cost for individual bias revocation 97 // and not need a mechanism like this. 98 if (UseBiasedLocking) { 99 if (BiasedLockingStartupDelay > 0) { 100 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); 101 task->enroll(); 102 } else { 103 VM_EnableBiasedLocking op(false); 104 VMThread::execute(&op); 105 } 106 } 107 } 108 109 110 bool BiasedLocking::enabled() { 111 return _biased_locking_enabled; 112 } 113 114 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order 115 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { 116 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); 117 if (info != NULL) { 118 return info; 119 } 120 121 info = new GrowableArray<MonitorInfo*>(); 122 123 // It's possible for the thread to not have any Java frames on it, 124 // i.e., if it's the main thread and it's already returned from main() 125 if (thread->has_last_Java_frame()) { 126 RegisterMap rm(thread); 127 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 128 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 129 if (monitors != NULL) { 130 int len = monitors->length(); 131 // Walk monitors youngest to oldest 132 for (int i = len - 1; i >= 0; i--) { 133 MonitorInfo* mon_info = monitors->at(i); 134 if (mon_info->eliminated()) continue; 135 oop owner = mon_info->owner(); 136 if (owner != NULL) { 137 info->append(mon_info); 138 } 139 } 140 } 141 } 142 } 143 144 thread->set_cached_monitor_info(info); 145 return info; 146 } 147 148 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) { 149 markOop mark = obj->mark(); 150 if (!mark->has_bias_pattern()) { 151 if (log_is_enabled(Info, biasedlocking)) { 152 ResourceMark rm; 153 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT 154 ", mark " INTPTR_FORMAT ", type %s" 155 ", requesting thread " INTPTR_FORMAT 156 " because it's no longer biased)", 157 p2i((void *)obj), (intptr_t) mark, 158 obj->klass()->external_name(), 159 (intptr_t) requesting_thread); 160 } 161 return BiasedLocking::NOT_BIASED; 162 } 163 164 uint age = mark->age(); 165 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); 166 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); 167 168 // Log at "info" level if not bulk, else "trace" level 169 if (!is_bulk) { 170 ResourceMark rm; 171 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark " 172 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT 173 ", allow rebias %d, requesting thread " INTPTR_FORMAT, 174 p2i((void *)obj), 175 (intptr_t) mark, 176 obj->klass()->external_name(), 177 (intptr_t) obj->klass()->prototype_header(), 178 (allow_rebias ? 1 : 0), 179 (intptr_t) requesting_thread); 180 } else { 181 ResourceMark rm; 182 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark " 183 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT 184 " , allow rebias %d , requesting thread " INTPTR_FORMAT, 185 p2i((void *)obj), 186 (intptr_t) mark, 187 obj->klass()->external_name(), 188 (intptr_t) obj->klass()->prototype_header(), 189 (allow_rebias ? 1 : 0), 190 (intptr_t) requesting_thread); 191 } 192 193 JavaThread* biased_thread = mark->biased_locker(); 194 if (biased_thread == NULL) { 195 // Object is anonymously biased. We can get here if, for 196 // example, we revoke the bias due to an identity hash code 197 // being computed for an object. 198 if (!allow_rebias) { 199 obj->set_mark(unbiased_prototype); 200 } 201 // Log at "info" level if not bulk, else "trace" level 202 if (!is_bulk) { 203 log_info(biasedlocking)(" Revoked bias of anonymously-biased object"); 204 } else { 205 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object"); 206 } 207 return BiasedLocking::BIAS_REVOKED; 208 } 209 210 // Handle case where the thread toward which the object was biased has exited 211 bool thread_is_alive = false; 212 if (requesting_thread == biased_thread) { 213 thread_is_alive = true; 214 } else { 215 ThreadsListHandle tlh; 216 JavaThreadIterator jti(tlh.list()); 217 for (JavaThread* cur_thread = jti.first(); cur_thread != NULL; cur_thread = jti.next()) { 218 if (cur_thread == biased_thread) { 219 thread_is_alive = true; 220 break; 221 } 222 } 223 } 224 if (!thread_is_alive) { 225 if (allow_rebias) { 226 obj->set_mark(biased_prototype); 227 } else { 228 obj->set_mark(unbiased_prototype); 229 } 230 // Log at "info" level if not bulk, else "trace" level 231 if (!is_bulk) { 232 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread (" 233 PTR_FORMAT ")", p2i(biased_thread)); 234 } else { 235 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread (" 236 PTR_FORMAT ")", p2i(biased_thread)); 237 } 238 return BiasedLocking::BIAS_REVOKED; 239 } 240 241 // Log at "info" level if not bulk, else "trace" level 242 if (!is_bulk) { 243 log_info(biasedlocking)(" Revoked bias of object biased toward live thread (" 244 PTR_FORMAT ")", p2i(biased_thread)); 245 } else { 246 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread (" 247 PTR_FORMAT ")", p2i(biased_thread)); 248 } 249 250 // Thread owning bias is alive. 251 // Check to see whether it currently owns the lock and, if so, 252 // write down the needed displaced headers to the thread's stack. 253 // Otherwise, restore the object's header either to the unlocked 254 // or unbiased state. 255 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); 256 BasicLock* highest_lock = NULL; 257 for (int i = 0; i < cached_monitor_info->length(); i++) { 258 MonitorInfo* mon_info = cached_monitor_info->at(i); 259 if (mon_info->owner() == obj) { 260 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", 261 p2i((void *) mon_info->owner()), 262 p2i((void *) obj)); 263 // Assume recursive case and fix up highest lock later 264 markOop mark = markOopDesc::encode((BasicLock*) NULL); 265 highest_lock = mon_info->lock(); 266 highest_lock->set_displaced_header(mark); 267 } else { 268 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", 269 p2i((void *) mon_info->owner()), 270 p2i((void *) obj)); 271 } 272 } 273 if (highest_lock != NULL) { 274 // Fix up highest lock to contain displaced header and point 275 // object at it 276 highest_lock->set_displaced_header(unbiased_prototype); 277 // Reset object header to point to displaced mark. 278 // Must release storing the lock address for platforms without TSO 279 // ordering (e.g. ppc). 280 obj->release_set_mark(markOopDesc::encode(highest_lock)); 281 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); 282 // Log at "info" level if not bulk, else "trace" level 283 if (!is_bulk) { 284 log_info(biasedlocking)(" Revoked bias of currently-locked object"); 285 } else { 286 log_trace(biasedlocking)(" Revoked bias of currently-locked object"); 287 } 288 } else { 289 // Log at "info" level if not bulk, else "trace" level 290 if (!is_bulk) { 291 log_info(biasedlocking)(" Revoked bias of currently-unlocked object"); 292 } else { 293 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object"); 294 } 295 if (allow_rebias) { 296 obj->set_mark(biased_prototype); 297 } else { 298 // Store the unlocked value into the object's header. 299 obj->set_mark(unbiased_prototype); 300 } 301 } 302 303 return BiasedLocking::BIAS_REVOKED; 304 } 305 306 307 enum HeuristicsResult { 308 HR_NOT_BIASED = 1, 309 HR_SINGLE_REVOKE = 2, 310 HR_BULK_REBIAS = 3, 311 HR_BULK_REVOKE = 4 312 }; 313 314 315 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { 316 markOop mark = o->mark(); 317 if (!mark->has_bias_pattern()) { 318 return HR_NOT_BIASED; 319 } 320 321 // Heuristics to attempt to throttle the number of revocations. 322 // Stages: 323 // 1. Revoke the biases of all objects in the heap of this type, 324 // but allow rebiasing of those objects if unlocked. 325 // 2. Revoke the biases of all objects in the heap of this type 326 // and don't allow rebiasing of these objects. Disable 327 // allocation of objects of that type with the bias bit set. 328 Klass* k = o->klass(); 329 jlong cur_time = os::javaTimeMillis(); 330 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); 331 int revocation_count = k->biased_lock_revocation_count(); 332 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && 333 (revocation_count < BiasedLockingBulkRevokeThreshold) && 334 (last_bulk_revocation_time != 0) && 335 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { 336 // This is the first revocation we've seen in a while of an 337 // object of this type since the last time we performed a bulk 338 // rebiasing operation. The application is allocating objects in 339 // bulk which are biased toward a thread and then handing them 340 // off to another thread. We can cope with this allocation 341 // pattern via the bulk rebiasing mechanism so we reset the 342 // klass's revocation count rather than allow it to increase 343 // monotonically. If we see the need to perform another bulk 344 // rebias operation later, we will, and if subsequently we see 345 // many more revocation operations in a short period of time we 346 // will completely disable biasing for this type. 347 k->set_biased_lock_revocation_count(0); 348 revocation_count = 0; 349 } 350 351 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold 352 if (revocation_count <= BiasedLockingBulkRevokeThreshold) { 353 revocation_count = k->atomic_incr_biased_lock_revocation_count(); 354 } 355 356 if (revocation_count == BiasedLockingBulkRevokeThreshold) { 357 return HR_BULK_REVOKE; 358 } 359 360 if (revocation_count == BiasedLockingBulkRebiasThreshold) { 361 return HR_BULK_REBIAS; 362 } 363 364 return HR_SINGLE_REVOKE; 365 } 366 367 368 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, 369 bool bulk_rebias, 370 bool attempt_rebias_of_object, 371 JavaThread* requesting_thread) { 372 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); 373 374 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object " 375 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 376 (bulk_rebias ? "rebias" : "revoke"), 377 p2i((void *) o), 378 (intptr_t) o->mark(), 379 o->klass()->external_name()); 380 381 jlong cur_time = os::javaTimeMillis(); 382 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); 383 384 385 Klass* k_o = o->klass(); 386 Klass* klass = k_o; 387 388 { 389 ThreadsListHandle tlh; 390 JavaThreadIterator jti(tlh.list()); 391 392 if (bulk_rebias) { 393 // Use the epoch in the klass of the object to implicitly revoke 394 // all biases of objects of this data type and force them to be 395 // reacquired. However, we also need to walk the stacks of all 396 // threads and update the headers of lightweight locked objects 397 // with biases to have the current epoch. 398 399 // If the prototype header doesn't have the bias pattern, don't 400 // try to update the epoch -- assume another VM operation came in 401 // and reset the header to the unbiased state, which will 402 // implicitly cause all existing biases to be revoked 403 if (klass->prototype_header()->has_bias_pattern()) { 404 int prev_epoch = klass->prototype_header()->bias_epoch(); 405 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); 406 int cur_epoch = klass->prototype_header()->bias_epoch(); 407 408 // Now walk all threads' stacks and adjust epochs of any biased 409 // and locked objects of this data type we encounter 410 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) { 411 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 412 for (int i = 0; i < cached_monitor_info->length(); i++) { 413 MonitorInfo* mon_info = cached_monitor_info->at(i); 414 oop owner = mon_info->owner(); 415 markOop mark = owner->mark(); 416 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 417 // We might have encountered this object already in the case of recursive locking 418 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); 419 owner->set_mark(mark->set_bias_epoch(cur_epoch)); 420 } 421 } 422 } 423 } 424 425 // At this point we're done. All we have to do is potentially 426 // adjust the header of the given object to revoke its bias. 427 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); 428 } else { 429 if (log_is_enabled(Info, biasedlocking)) { 430 ResourceMark rm; 431 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name()); 432 } 433 434 // Disable biased locking for this data type. Not only will this 435 // cause future instances to not be biased, but existing biased 436 // instances will notice that this implicitly caused their biases 437 // to be revoked. 438 klass->set_prototype_header(markOopDesc::prototype()); 439 440 // Now walk all threads' stacks and forcibly revoke the biases of 441 // any locked and biased objects of this data type we encounter. 442 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) { 443 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 444 for (int i = 0; i < cached_monitor_info->length(); i++) { 445 MonitorInfo* mon_info = cached_monitor_info->at(i); 446 oop owner = mon_info->owner(); 447 markOop mark = owner->mark(); 448 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 449 revoke_bias(owner, false, true, requesting_thread); 450 } 451 } 452 } 453 454 // Must force the bias of the passed object to be forcibly revoked 455 // as well to ensure guarantees to callers 456 revoke_bias(o, false, true, requesting_thread); 457 } 458 } // ThreadsListHandle is destroyed here. 459 460 log_info(biasedlocking)("* Ending bulk revocation"); 461 462 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; 463 464 if (attempt_rebias_of_object && 465 o->mark()->has_bias_pattern() && 466 klass->prototype_header()->has_bias_pattern()) { 467 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), 468 klass->prototype_header()->bias_epoch()); 469 o->set_mark(new_mark); 470 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; 471 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); 472 } 473 474 assert(!o->mark()->has_bias_pattern() || 475 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), 476 "bug in bulk bias revocation"); 477 478 return status_code; 479 } 480 481 482 static void clean_up_cached_monitor_info() { 483 // Walk the thread list clearing out the cached monitors 484 ThreadsListHandle tlh; 485 JavaThreadIterator jti(tlh.list()); 486 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) { 487 thr->set_cached_monitor_info(NULL); 488 } 489 } 490 491 492 class VM_RevokeBias : public VM_Operation { 493 protected: 494 Handle* _obj; 495 GrowableArray<Handle>* _objs; 496 JavaThread* _requesting_thread; 497 BiasedLocking::Condition _status_code; 498 499 public: 500 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 501 : _obj(obj) 502 , _objs(NULL) 503 , _requesting_thread(requesting_thread) 504 , _status_code(BiasedLocking::NOT_BIASED) {} 505 506 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 507 : _obj(NULL) 508 , _objs(objs) 509 , _requesting_thread(requesting_thread) 510 , _status_code(BiasedLocking::NOT_BIASED) {} 511 512 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 513 514 virtual bool doit_prologue() { 515 // Verify that there is actual work to do since the callers just 516 // give us locked object(s). If we don't find any biased objects 517 // there is nothing to do and we avoid a safepoint. 518 if (_obj != NULL) { 519 markOop mark = (*_obj)()->mark(); 520 if (mark->has_bias_pattern()) { 521 return true; 522 } 523 } else { 524 for ( int i = 0 ; i < _objs->length(); i++ ) { 525 markOop mark = (_objs->at(i))()->mark(); 526 if (mark->has_bias_pattern()) { 527 return true; 528 } 529 } 530 } 531 return false; 532 } 533 534 virtual void doit() { 535 if (_obj != NULL) { 536 log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:"); 537 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread); 538 clean_up_cached_monitor_info(); 539 return; 540 } else { 541 log_info(biasedlocking)("Revoking bias with global safepoint:"); 542 BiasedLocking::revoke_at_safepoint(_objs); 543 } 544 } 545 546 BiasedLocking::Condition status_code() const { 547 return _status_code; 548 } 549 }; 550 551 552 class VM_BulkRevokeBias : public VM_RevokeBias { 553 private: 554 bool _bulk_rebias; 555 bool _attempt_rebias_of_object; 556 557 public: 558 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, 559 bool bulk_rebias, 560 bool attempt_rebias_of_object) 561 : VM_RevokeBias(obj, requesting_thread) 562 , _bulk_rebias(bulk_rebias) 563 , _attempt_rebias_of_object(attempt_rebias_of_object) {} 564 565 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } 566 virtual bool doit_prologue() { return true; } 567 568 virtual void doit() { 569 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); 570 clean_up_cached_monitor_info(); 571 } 572 }; 573 574 575 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { 576 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 577 578 // We can revoke the biases of anonymously-biased objects 579 // efficiently enough that we should not cause these revocations to 580 // update the heuristics because doing so may cause unwanted bulk 581 // revocations (which are expensive) to occur. 582 markOop mark = obj->mark(); 583 if (mark->is_biased_anonymously() && !attempt_rebias) { 584 // We are probably trying to revoke the bias of this object due to 585 // an identity hash code computation. Try to revoke the bias 586 // without a safepoint. This is possible if we can successfully 587 // compare-and-exchange an unbiased header into the mark word of 588 // the object, meaning that no other thread has raced to acquire 589 // the bias of the object. 590 markOop biased_value = mark; 591 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 592 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 593 if (res_mark == biased_value) { 594 return BIAS_REVOKED; 595 } 596 } else if (mark->has_bias_pattern()) { 597 Klass* k = obj->klass(); 598 markOop prototype_header = k->prototype_header(); 599 if (!prototype_header->has_bias_pattern()) { 600 // This object has a stale bias from before the bulk revocation 601 // for this data type occurred. It's pointless to update the 602 // heuristics at this point so simply update the header with a 603 // CAS. If we fail this race, the object's bias has been revoked 604 // by another thread so we simply return and let the caller deal 605 // with it. 606 markOop biased_value = mark; 607 markOop res_mark = obj->cas_set_mark(prototype_header, mark); 608 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); 609 return BIAS_REVOKED; 610 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { 611 // The epoch of this biasing has expired indicating that the 612 // object is effectively unbiased. Depending on whether we need 613 // to rebias or revoke the bias of this object we can do it 614 // efficiently enough with a CAS that we shouldn't update the 615 // heuristics. This is normally done in the assembly code but we 616 // can reach this point due to various points in the runtime 617 // needing to revoke biases. 618 if (attempt_rebias) { 619 assert(THREAD->is_Java_thread(), ""); 620 markOop biased_value = mark; 621 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); 622 markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark); 623 if (res_mark == biased_value) { 624 return BIAS_REVOKED_AND_REBIASED; 625 } 626 } else { 627 markOop biased_value = mark; 628 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 629 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 630 if (res_mark == biased_value) { 631 return BIAS_REVOKED; 632 } 633 } 634 } 635 } 636 637 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); 638 if (heuristics == HR_NOT_BIASED) { 639 return NOT_BIASED; 640 } else if (heuristics == HR_SINGLE_REVOKE) { 641 Klass *k = obj->klass(); 642 markOop prototype_header = k->prototype_header(); 643 if (mark->biased_locker() == THREAD && 644 prototype_header->bias_epoch() == mark->bias_epoch()) { 645 // A thread is trying to revoke the bias of an object biased 646 // toward it, again likely due to an identity hash code 647 // computation. We can again avoid a safepoint in this case 648 // since we are only going to walk our own stack. There are no 649 // races with revocations occurring in other threads because we 650 // reach no safepoints in the revocation path. 651 // Also check the epoch because even if threads match, another thread 652 // can come in with a CAS to steal the bias of an object that has a 653 // stale epoch. 654 ResourceMark rm; 655 log_info(biasedlocking)("Revoking bias by walking my own stack:"); 656 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD); 657 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 658 assert(cond == BIAS_REVOKED, "why not?"); 659 return cond; 660 } else { 661 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 662 VMThread::execute(&revoke); 663 return revoke.status_code(); 664 } 665 } 666 667 assert((heuristics == HR_BULK_REVOKE) || 668 (heuristics == HR_BULK_REBIAS), "?"); 669 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 670 (heuristics == HR_BULK_REBIAS), 671 attempt_rebias); 672 VMThread::execute(&bulk_revoke); 673 return bulk_revoke.status_code(); 674 } 675 676 677 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 678 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 679 if (objs->length() == 0) { 680 return; 681 } 682 VM_RevokeBias revoke(objs, JavaThread::current()); 683 VMThread::execute(&revoke); 684 } 685 686 687 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 688 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 689 oop obj = h_obj(); 690 HeuristicsResult heuristics = update_heuristics(obj, false); 691 if (heuristics == HR_SINGLE_REVOKE) { 692 revoke_bias(obj, false, false, NULL); 693 } else if ((heuristics == HR_BULK_REBIAS) || 694 (heuristics == HR_BULK_REVOKE)) { 695 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 696 } 697 clean_up_cached_monitor_info(); 698 } 699 700 701 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { 702 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 703 int len = objs->length(); 704 for (int i = 0; i < len; i++) { 705 oop obj = (objs->at(i))(); 706 HeuristicsResult heuristics = update_heuristics(obj, false); 707 if (heuristics == HR_SINGLE_REVOKE) { 708 revoke_bias(obj, false, false, NULL); 709 } else if ((heuristics == HR_BULK_REBIAS) || 710 (heuristics == HR_BULK_REVOKE)) { 711 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 712 } 713 } 714 clean_up_cached_monitor_info(); 715 } 716 717 718 void BiasedLocking::preserve_marks() { 719 if (!UseBiasedLocking) 720 return; 721 722 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 723 724 assert(_preserved_oop_stack == NULL, "double initialization"); 725 assert(_preserved_mark_stack == NULL, "double initialization"); 726 727 // In order to reduce the number of mark words preserved during GC 728 // due to the presence of biased locking, we reinitialize most mark 729 // words to the class's prototype during GC -- even those which have 730 // a currently valid bias owner. One important situation where we 731 // must not clobber a bias is when a biased object is currently 732 // locked. To handle this case we iterate over the currently-locked 733 // monitors in a prepass and, if they are biased, preserve their 734 // mark words here. This should be a relatively small set of objects 735 // especially compared to the number of objects in the heap. 736 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); 737 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); 738 739 ResourceMark rm; 740 Thread* cur = Thread::current(); 741 ThreadsListHandle tlh; 742 JavaThreadIterator jti(tlh.list()); 743 for (JavaThread* thread = jti.first(); thread != NULL; thread = jti.next()) { 744 if (thread->has_last_Java_frame()) { 745 RegisterMap rm(thread); 746 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 747 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 748 if (monitors != NULL) { 749 int len = monitors->length(); 750 // Walk monitors youngest to oldest 751 for (int i = len - 1; i >= 0; i--) { 752 MonitorInfo* mon_info = monitors->at(i); 753 if (mon_info->owner_is_scalar_replaced()) continue; 754 oop owner = mon_info->owner(); 755 if (owner != NULL) { 756 markOop mark = owner->mark(); 757 if (mark->has_bias_pattern()) { 758 _preserved_oop_stack->push(Handle(cur, owner)); 759 _preserved_mark_stack->push(mark); 760 } 761 } 762 } 763 } 764 } 765 } 766 } 767 } 768 769 770 void BiasedLocking::restore_marks() { 771 if (!UseBiasedLocking) 772 return; 773 774 assert(_preserved_oop_stack != NULL, "double free"); 775 assert(_preserved_mark_stack != NULL, "double free"); 776 777 int len = _preserved_oop_stack->length(); 778 for (int i = 0; i < len; i++) { 779 Handle owner = _preserved_oop_stack->at(i); 780 markOop mark = _preserved_mark_stack->at(i); 781 owner->set_mark(mark); 782 } 783 784 delete _preserved_oop_stack; 785 _preserved_oop_stack = NULL; 786 delete _preserved_mark_stack; 787 _preserved_mark_stack = NULL; 788 } 789 790 791 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } 792 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } 793 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } 794 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } 795 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } 796 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } 797 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } 798 799 800 // BiasedLockingCounters 801 802 int BiasedLockingCounters::slow_path_entry_count() { 803 if (_slow_path_entry_count != 0) { 804 return _slow_path_entry_count; 805 } 806 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + 807 _rebiased_lock_entry_count + _revoked_lock_entry_count + 808 _fast_path_entry_count; 809 810 return _total_entry_count - sum; 811 } 812 813 void BiasedLockingCounters::print_on(outputStream* st) { 814 tty->print_cr("# total entries: %d", _total_entry_count); 815 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); 816 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); 817 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); 818 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); 819 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); 820 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); 821 }