1 /* 2 * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "logging/log.hpp" 27 #include "memory/resourceArea.hpp" 28 #include "oops/klass.inline.hpp" 29 #include "oops/markOop.hpp" 30 #include "oops/oop.inline.hpp" 31 #include "runtime/atomic.hpp" 32 #include "runtime/basicLock.hpp" 33 #include "runtime/biasedLocking.hpp" 34 #include "runtime/handles.inline.hpp" 35 #include "runtime/task.hpp" 36 #include "runtime/threadSMR.hpp" 37 #include "runtime/vframe.hpp" 38 #include "runtime/vmThread.hpp" 39 #include "runtime/vm_operations.hpp" 40 #include "trace/tracing.hpp" 41 42 static bool _biased_locking_enabled = false; 43 BiasedLockingCounters BiasedLocking::_counters; 44 45 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 46 static GrowableArray<markOop>* _preserved_mark_stack = NULL; 47 48 static void enable_biased_locking(InstanceKlass* k) { 49 k->set_prototype_header(markOopDesc::biased_locking_prototype()); 50 } 51 52 class VM_EnableBiasedLocking: public VM_Operation { 53 private: 54 bool _is_cheap_allocated; 55 public: 56 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } 57 VMOp_Type type() const { return VMOp_EnableBiasedLocking; } 58 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } 59 bool is_cheap_allocated() const { return _is_cheap_allocated; } 60 61 void doit() { 62 // Iterate the class loader data dictionaries enabling biased locking for all 63 // currently loaded classes. 64 ClassLoaderDataGraph::dictionary_classes_do(enable_biased_locking); 65 // Indicate that future instances should enable it as well 66 _biased_locking_enabled = true; 67 68 log_info(biasedlocking)("Biased locking enabled"); 69 } 70 71 bool allow_nested_vm_operations() const { return false; } 72 }; 73 74 75 // One-shot PeriodicTask subclass for enabling biased locking 76 class EnableBiasedLockingTask : public PeriodicTask { 77 public: 78 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} 79 80 virtual void task() { 81 // Use async VM operation to avoid blocking the Watcher thread. 82 // VM Thread will free C heap storage. 83 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); 84 VMThread::execute(op); 85 86 // Reclaim our storage and disenroll ourself 87 delete this; 88 } 89 }; 90 91 92 void BiasedLocking::init() { 93 // If biased locking is enabled, schedule a task to fire a few 94 // seconds into the run which turns on biased locking for all 95 // currently loaded classes as well as future ones. This is a 96 // workaround for startup time regressions due to a large number of 97 // safepoints being taken during VM startup for bias revocation. 98 // Ideally we would have a lower cost for individual bias revocation 99 // and not need a mechanism like this. 100 if (UseBiasedLocking) { 101 if (BiasedLockingStartupDelay > 0) { 102 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); 103 task->enroll(); 104 } else { 105 VM_EnableBiasedLocking op(false); 106 VMThread::execute(&op); 107 } 108 } 109 } 110 111 112 bool BiasedLocking::enabled() { 113 return _biased_locking_enabled; 114 } 115 116 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order 117 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { 118 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); 119 if (info != NULL) { 120 return info; 121 } 122 123 info = new GrowableArray<MonitorInfo*>(); 124 125 // It's possible for the thread to not have any Java frames on it, 126 // i.e., if it's the main thread and it's already returned from main() 127 if (thread->has_last_Java_frame()) { 128 RegisterMap rm(thread); 129 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 130 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 131 if (monitors != NULL) { 132 int len = monitors->length(); 133 // Walk monitors youngest to oldest 134 for (int i = len - 1; i >= 0; i--) { 135 MonitorInfo* mon_info = monitors->at(i); 136 if (mon_info->eliminated()) continue; 137 oop owner = mon_info->owner(); 138 if (owner != NULL) { 139 info->append(mon_info); 140 } 141 } 142 } 143 } 144 } 145 146 thread->set_cached_monitor_info(info); 147 return info; 148 } 149 150 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL, 151 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization). 152 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { 153 markOop mark = obj->mark(); 154 if (!mark->has_bias_pattern()) { 155 if (log_is_enabled(Info, biasedlocking)) { 156 ResourceMark rm; 157 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT 158 ", mark " INTPTR_FORMAT ", type %s" 159 ", requesting thread " INTPTR_FORMAT 160 " because it's no longer biased)", 161 p2i((void *)obj), (intptr_t) mark, 162 obj->klass()->external_name(), 163 (intptr_t) requesting_thread); 164 } 165 return BiasedLocking::NOT_BIASED; 166 } 167 168 uint age = mark->age(); 169 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); 170 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); 171 172 // Log at "info" level if not bulk, else "trace" level 173 if (!is_bulk) { 174 ResourceMark rm; 175 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark " 176 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT 177 ", allow rebias %d, requesting thread " INTPTR_FORMAT, 178 p2i((void *)obj), 179 (intptr_t) mark, 180 obj->klass()->external_name(), 181 (intptr_t) obj->klass()->prototype_header(), 182 (allow_rebias ? 1 : 0), 183 (intptr_t) requesting_thread); 184 } else { 185 ResourceMark rm; 186 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark " 187 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT 188 " , allow rebias %d , requesting thread " INTPTR_FORMAT, 189 p2i((void *)obj), 190 (intptr_t) mark, 191 obj->klass()->external_name(), 192 (intptr_t) obj->klass()->prototype_header(), 193 (allow_rebias ? 1 : 0), 194 (intptr_t) requesting_thread); 195 } 196 197 JavaThread* biased_thread = mark->biased_locker(); 198 if (biased_thread == NULL) { 199 // Object is anonymously biased. We can get here if, for 200 // example, we revoke the bias due to an identity hash code 201 // being computed for an object. 202 if (!allow_rebias) { 203 obj->set_mark(unbiased_prototype); 204 } 205 // Log at "info" level if not bulk, else "trace" level 206 if (!is_bulk) { 207 log_info(biasedlocking)(" Revoked bias of anonymously-biased object"); 208 } else { 209 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object"); 210 } 211 return BiasedLocking::BIAS_REVOKED; 212 } 213 214 // Handle case where the thread toward which the object was biased has exited 215 bool thread_is_alive = false; 216 if (requesting_thread == biased_thread) { 217 thread_is_alive = true; 218 } else { 219 ThreadsListHandle tlh; 220 thread_is_alive = tlh.includes(biased_thread); 221 } 222 if (!thread_is_alive) { 223 if (allow_rebias) { 224 obj->set_mark(biased_prototype); 225 } else { 226 obj->set_mark(unbiased_prototype); 227 } 228 // Log at "info" level if not bulk, else "trace" level 229 if (!is_bulk) { 230 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread (" 231 PTR_FORMAT ")", p2i(biased_thread)); 232 } else { 233 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread (" 234 PTR_FORMAT ")", p2i(biased_thread)); 235 } 236 return BiasedLocking::BIAS_REVOKED; 237 } 238 239 // Log at "info" level if not bulk, else "trace" level 240 if (!is_bulk) { 241 log_info(biasedlocking)(" Revoked bias of object biased toward live thread (" 242 PTR_FORMAT ")", p2i(biased_thread)); 243 } else { 244 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread (" 245 PTR_FORMAT ")", p2i(biased_thread)); 246 } 247 248 // Thread owning bias is alive. 249 // Check to see whether it currently owns the lock and, if so, 250 // write down the needed displaced headers to the thread's stack. 251 // Otherwise, restore the object's header either to the unlocked 252 // or unbiased state. 253 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); 254 BasicLock* highest_lock = NULL; 255 for (int i = 0; i < cached_monitor_info->length(); i++) { 256 MonitorInfo* mon_info = cached_monitor_info->at(i); 257 if (mon_info->owner() == obj) { 258 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", 259 p2i((void *) mon_info->owner()), 260 p2i((void *) obj)); 261 // Assume recursive case and fix up highest lock later 262 markOop mark = markOopDesc::encode((BasicLock*) NULL); 263 highest_lock = mon_info->lock(); 264 highest_lock->set_displaced_header(mark); 265 } else { 266 log_trace(biasedlocking)(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", 267 p2i((void *) mon_info->owner()), 268 p2i((void *) obj)); 269 } 270 } 271 if (highest_lock != NULL) { 272 // Fix up highest lock to contain displaced header and point 273 // object at it 274 highest_lock->set_displaced_header(unbiased_prototype); 275 // Reset object header to point to displaced mark. 276 // Must release storing the lock address for platforms without TSO 277 // ordering (e.g. ppc). 278 obj->release_set_mark(markOopDesc::encode(highest_lock)); 279 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); 280 // Log at "info" level if not bulk, else "trace" level 281 if (!is_bulk) { 282 log_info(biasedlocking)(" Revoked bias of currently-locked object"); 283 } else { 284 log_trace(biasedlocking)(" Revoked bias of currently-locked object"); 285 } 286 } else { 287 // Log at "info" level if not bulk, else "trace" level 288 if (!is_bulk) { 289 log_info(biasedlocking)(" Revoked bias of currently-unlocked object"); 290 } else { 291 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object"); 292 } 293 if (allow_rebias) { 294 obj->set_mark(biased_prototype); 295 } else { 296 // Store the unlocked value into the object's header. 297 obj->set_mark(unbiased_prototype); 298 } 299 } 300 301 // If requested, return information on which thread held the bias 302 if (biased_locker != NULL) { 303 *biased_locker = biased_thread; 304 } 305 306 return BiasedLocking::BIAS_REVOKED; 307 } 308 309 310 enum HeuristicsResult { 311 HR_NOT_BIASED = 1, 312 HR_SINGLE_REVOKE = 2, 313 HR_BULK_REBIAS = 3, 314 HR_BULK_REVOKE = 4 315 }; 316 317 318 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { 319 markOop mark = o->mark(); 320 if (!mark->has_bias_pattern()) { 321 return HR_NOT_BIASED; 322 } 323 324 // Heuristics to attempt to throttle the number of revocations. 325 // Stages: 326 // 1. Revoke the biases of all objects in the heap of this type, 327 // but allow rebiasing of those objects if unlocked. 328 // 2. Revoke the biases of all objects in the heap of this type 329 // and don't allow rebiasing of these objects. Disable 330 // allocation of objects of that type with the bias bit set. 331 Klass* k = o->klass(); 332 jlong cur_time = os::javaTimeMillis(); 333 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); 334 int revocation_count = k->biased_lock_revocation_count(); 335 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && 336 (revocation_count < BiasedLockingBulkRevokeThreshold) && 337 (last_bulk_revocation_time != 0) && 338 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { 339 // This is the first revocation we've seen in a while of an 340 // object of this type since the last time we performed a bulk 341 // rebiasing operation. The application is allocating objects in 342 // bulk which are biased toward a thread and then handing them 343 // off to another thread. We can cope with this allocation 344 // pattern via the bulk rebiasing mechanism so we reset the 345 // klass's revocation count rather than allow it to increase 346 // monotonically. If we see the need to perform another bulk 347 // rebias operation later, we will, and if subsequently we see 348 // many more revocation operations in a short period of time we 349 // will completely disable biasing for this type. 350 k->set_biased_lock_revocation_count(0); 351 revocation_count = 0; 352 } 353 354 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold 355 if (revocation_count <= BiasedLockingBulkRevokeThreshold) { 356 revocation_count = k->atomic_incr_biased_lock_revocation_count(); 357 } 358 359 if (revocation_count == BiasedLockingBulkRevokeThreshold) { 360 return HR_BULK_REVOKE; 361 } 362 363 if (revocation_count == BiasedLockingBulkRebiasThreshold) { 364 return HR_BULK_REBIAS; 365 } 366 367 return HR_SINGLE_REVOKE; 368 } 369 370 371 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, 372 bool bulk_rebias, 373 bool attempt_rebias_of_object, 374 JavaThread* requesting_thread) { 375 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); 376 377 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object " 378 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 379 (bulk_rebias ? "rebias" : "revoke"), 380 p2i((void *) o), 381 (intptr_t) o->mark(), 382 o->klass()->external_name()); 383 384 jlong cur_time = os::javaTimeMillis(); 385 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); 386 387 388 Klass* k_o = o->klass(); 389 Klass* klass = k_o; 390 391 { 392 JavaThreadIteratorWithHandle jtiwh; 393 394 if (bulk_rebias) { 395 // Use the epoch in the klass of the object to implicitly revoke 396 // all biases of objects of this data type and force them to be 397 // reacquired. However, we also need to walk the stacks of all 398 // threads and update the headers of lightweight locked objects 399 // with biases to have the current epoch. 400 401 // If the prototype header doesn't have the bias pattern, don't 402 // try to update the epoch -- assume another VM operation came in 403 // and reset the header to the unbiased state, which will 404 // implicitly cause all existing biases to be revoked 405 if (klass->prototype_header()->has_bias_pattern()) { 406 int prev_epoch = klass->prototype_header()->bias_epoch(); 407 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); 408 int cur_epoch = klass->prototype_header()->bias_epoch(); 409 410 // Now walk all threads' stacks and adjust epochs of any biased 411 // and locked objects of this data type we encounter 412 for (; JavaThread *thr = jtiwh.next(); ) { 413 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 414 for (int i = 0; i < cached_monitor_info->length(); i++) { 415 MonitorInfo* mon_info = cached_monitor_info->at(i); 416 oop owner = mon_info->owner(); 417 markOop mark = owner->mark(); 418 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 419 // We might have encountered this object already in the case of recursive locking 420 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); 421 owner->set_mark(mark->set_bias_epoch(cur_epoch)); 422 } 423 } 424 } 425 } 426 427 // At this point we're done. All we have to do is potentially 428 // adjust the header of the given object to revoke its bias. 429 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL); 430 } else { 431 if (log_is_enabled(Info, biasedlocking)) { 432 ResourceMark rm; 433 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name()); 434 } 435 436 // Disable biased locking for this data type. Not only will this 437 // cause future instances to not be biased, but existing biased 438 // instances will notice that this implicitly caused their biases 439 // to be revoked. 440 klass->set_prototype_header(markOopDesc::prototype()); 441 442 // Now walk all threads' stacks and forcibly revoke the biases of 443 // any locked and biased objects of this data type we encounter. 444 for (; JavaThread *thr = jtiwh.next(); ) { 445 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 446 for (int i = 0; i < cached_monitor_info->length(); i++) { 447 MonitorInfo* mon_info = cached_monitor_info->at(i); 448 oop owner = mon_info->owner(); 449 markOop mark = owner->mark(); 450 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 451 revoke_bias(owner, false, true, requesting_thread, NULL); 452 } 453 } 454 } 455 456 // Must force the bias of the passed object to be forcibly revoked 457 // as well to ensure guarantees to callers 458 revoke_bias(o, false, true, requesting_thread, NULL); 459 } 460 } // ThreadsListHandle is destroyed here. 461 462 log_info(biasedlocking)("* Ending bulk revocation"); 463 464 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; 465 466 if (attempt_rebias_of_object && 467 o->mark()->has_bias_pattern() && 468 klass->prototype_header()->has_bias_pattern()) { 469 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), 470 klass->prototype_header()->bias_epoch()); 471 o->set_mark(new_mark); 472 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; 473 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); 474 } 475 476 assert(!o->mark()->has_bias_pattern() || 477 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), 478 "bug in bulk bias revocation"); 479 480 return status_code; 481 } 482 483 484 static void clean_up_cached_monitor_info() { 485 // Walk the thread list clearing out the cached monitors 486 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) { 487 thr->set_cached_monitor_info(NULL); 488 } 489 } 490 491 492 class VM_RevokeBias : public VM_Operation { 493 protected: 494 Handle* _obj; 495 GrowableArray<Handle>* _objs; 496 JavaThread* _requesting_thread; 497 BiasedLocking::Condition _status_code; 498 traceid _biased_locker_id; 499 500 public: 501 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 502 : _obj(obj) 503 , _objs(NULL) 504 , _requesting_thread(requesting_thread) 505 , _status_code(BiasedLocking::NOT_BIASED) 506 , _biased_locker_id(0) {} 507 508 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 509 : _obj(NULL) 510 , _objs(objs) 511 , _requesting_thread(requesting_thread) 512 , _status_code(BiasedLocking::NOT_BIASED) 513 , _biased_locker_id(0) {} 514 515 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 516 517 virtual bool doit_prologue() { 518 // Verify that there is actual work to do since the callers just 519 // give us locked object(s). If we don't find any biased objects 520 // there is nothing to do and we avoid a safepoint. 521 if (_obj != NULL) { 522 markOop mark = (*_obj)()->mark(); 523 if (mark->has_bias_pattern()) { 524 return true; 525 } 526 } else { 527 for ( int i = 0 ; i < _objs->length(); i++ ) { 528 markOop mark = (_objs->at(i))()->mark(); 529 if (mark->has_bias_pattern()) { 530 return true; 531 } 532 } 533 } 534 return false; 535 } 536 537 virtual void doit() { 538 if (_obj != NULL) { 539 log_info(biasedlocking)("Revoking bias with potentially per-thread safepoint:"); 540 JavaThread* biased_locker = NULL; 541 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); 542 if (biased_locker != NULL) { 543 _biased_locker_id = THREAD_TRACE_ID(biased_locker); 544 } 545 clean_up_cached_monitor_info(); 546 return; 547 } else { 548 log_info(biasedlocking)("Revoking bias with global safepoint:"); 549 BiasedLocking::revoke_at_safepoint(_objs); 550 } 551 } 552 553 BiasedLocking::Condition status_code() const { 554 return _status_code; 555 } 556 557 traceid biased_locker() const { 558 return _biased_locker_id; 559 } 560 }; 561 562 563 class VM_BulkRevokeBias : public VM_RevokeBias { 564 private: 565 bool _bulk_rebias; 566 bool _attempt_rebias_of_object; 567 568 public: 569 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, 570 bool bulk_rebias, 571 bool attempt_rebias_of_object) 572 : VM_RevokeBias(obj, requesting_thread) 573 , _bulk_rebias(bulk_rebias) 574 , _attempt_rebias_of_object(attempt_rebias_of_object) {} 575 576 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } 577 virtual bool doit_prologue() { return true; } 578 579 virtual void doit() { 580 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); 581 clean_up_cached_monitor_info(); 582 } 583 }; 584 585 586 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { 587 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 588 589 // We can revoke the biases of anonymously-biased objects 590 // efficiently enough that we should not cause these revocations to 591 // update the heuristics because doing so may cause unwanted bulk 592 // revocations (which are expensive) to occur. 593 markOop mark = obj->mark(); 594 if (mark->is_biased_anonymously() && !attempt_rebias) { 595 // We are probably trying to revoke the bias of this object due to 596 // an identity hash code computation. Try to revoke the bias 597 // without a safepoint. This is possible if we can successfully 598 // compare-and-exchange an unbiased header into the mark word of 599 // the object, meaning that no other thread has raced to acquire 600 // the bias of the object. 601 markOop biased_value = mark; 602 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 603 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 604 if (res_mark == biased_value) { 605 return BIAS_REVOKED; 606 } 607 } else if (mark->has_bias_pattern()) { 608 Klass* k = obj->klass(); 609 markOop prototype_header = k->prototype_header(); 610 if (!prototype_header->has_bias_pattern()) { 611 // This object has a stale bias from before the bulk revocation 612 // for this data type occurred. It's pointless to update the 613 // heuristics at this point so simply update the header with a 614 // CAS. If we fail this race, the object's bias has been revoked 615 // by another thread so we simply return and let the caller deal 616 // with it. 617 markOop biased_value = mark; 618 markOop res_mark = obj->cas_set_mark(prototype_header, mark); 619 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); 620 return BIAS_REVOKED; 621 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { 622 // The epoch of this biasing has expired indicating that the 623 // object is effectively unbiased. Depending on whether we need 624 // to rebias or revoke the bias of this object we can do it 625 // efficiently enough with a CAS that we shouldn't update the 626 // heuristics. This is normally done in the assembly code but we 627 // can reach this point due to various points in the runtime 628 // needing to revoke biases. 629 if (attempt_rebias) { 630 assert(THREAD->is_Java_thread(), ""); 631 markOop biased_value = mark; 632 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); 633 markOop res_mark = obj->cas_set_mark(rebiased_prototype, mark); 634 if (res_mark == biased_value) { 635 return BIAS_REVOKED_AND_REBIASED; 636 } 637 } else { 638 markOop biased_value = mark; 639 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 640 markOop res_mark = obj->cas_set_mark(unbiased_prototype, mark); 641 if (res_mark == biased_value) { 642 return BIAS_REVOKED; 643 } 644 } 645 } 646 } 647 648 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); 649 if (heuristics == HR_NOT_BIASED) { 650 return NOT_BIASED; 651 } else if (heuristics == HR_SINGLE_REVOKE) { 652 Klass *k = obj->klass(); 653 markOop prototype_header = k->prototype_header(); 654 if (mark->biased_locker() == THREAD && 655 prototype_header->bias_epoch() == mark->bias_epoch()) { 656 // A thread is trying to revoke the bias of an object biased 657 // toward it, again likely due to an identity hash code 658 // computation. We can again avoid a safepoint in this case 659 // since we are only going to walk our own stack. There are no 660 // races with revocations occurring in other threads because we 661 // reach no safepoints in the revocation path. 662 // Also check the epoch because even if threads match, another thread 663 // can come in with a CAS to steal the bias of an object that has a 664 // stale epoch. 665 ResourceMark rm; 666 log_info(biasedlocking)("Revoking bias by walking my own stack:"); 667 EventBiasedLockSelfRevocation event; 668 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); 669 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 670 assert(cond == BIAS_REVOKED, "why not?"); 671 if (event.should_commit()) { 672 event.set_lockClass(k); 673 event.commit(); 674 } 675 return cond; 676 } else { 677 EventBiasedLockRevocation event; 678 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 679 VMThread::execute(&revoke); 680 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { 681 event.set_lockClass(k); 682 // Subtract 1 to match the id of events committed inside the safepoint 683 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 684 event.set_previousOwner(revoke.biased_locker()); 685 event.commit(); 686 } 687 return revoke.status_code(); 688 } 689 } 690 691 assert((heuristics == HR_BULK_REVOKE) || 692 (heuristics == HR_BULK_REBIAS), "?"); 693 EventBiasedLockClassRevocation event; 694 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 695 (heuristics == HR_BULK_REBIAS), 696 attempt_rebias); 697 VMThread::execute(&bulk_revoke); 698 if (event.should_commit()) { 699 event.set_revokedClass(obj->klass()); 700 event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); 701 // Subtract 1 to match the id of events committed inside the safepoint 702 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 703 event.commit(); 704 } 705 return bulk_revoke.status_code(); 706 } 707 708 709 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 710 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 711 if (objs->length() == 0) { 712 return; 713 } 714 VM_RevokeBias revoke(objs, JavaThread::current()); 715 VMThread::execute(&revoke); 716 } 717 718 719 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 720 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 721 oop obj = h_obj(); 722 HeuristicsResult heuristics = update_heuristics(obj, false); 723 if (heuristics == HR_SINGLE_REVOKE) { 724 revoke_bias(obj, false, false, NULL, NULL); 725 } else if ((heuristics == HR_BULK_REBIAS) || 726 (heuristics == HR_BULK_REVOKE)) { 727 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 728 } 729 clean_up_cached_monitor_info(); 730 } 731 732 733 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { 734 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 735 int len = objs->length(); 736 for (int i = 0; i < len; i++) { 737 oop obj = (objs->at(i))(); 738 HeuristicsResult heuristics = update_heuristics(obj, false); 739 if (heuristics == HR_SINGLE_REVOKE) { 740 revoke_bias(obj, false, false, NULL, NULL); 741 } else if ((heuristics == HR_BULK_REBIAS) || 742 (heuristics == HR_BULK_REVOKE)) { 743 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 744 } 745 } 746 clean_up_cached_monitor_info(); 747 } 748 749 750 void BiasedLocking::preserve_marks() { 751 if (!UseBiasedLocking) 752 return; 753 754 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 755 756 assert(_preserved_oop_stack == NULL, "double initialization"); 757 assert(_preserved_mark_stack == NULL, "double initialization"); 758 759 // In order to reduce the number of mark words preserved during GC 760 // due to the presence of biased locking, we reinitialize most mark 761 // words to the class's prototype during GC -- even those which have 762 // a currently valid bias owner. One important situation where we 763 // must not clobber a bias is when a biased object is currently 764 // locked. To handle this case we iterate over the currently-locked 765 // monitors in a prepass and, if they are biased, preserve their 766 // mark words here. This should be a relatively small set of objects 767 // especially compared to the number of objects in the heap. 768 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); 769 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); 770 771 ResourceMark rm; 772 Thread* cur = Thread::current(); 773 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) { 774 if (thread->has_last_Java_frame()) { 775 RegisterMap rm(thread); 776 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 777 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 778 if (monitors != NULL) { 779 int len = monitors->length(); 780 // Walk monitors youngest to oldest 781 for (int i = len - 1; i >= 0; i--) { 782 MonitorInfo* mon_info = monitors->at(i); 783 if (mon_info->owner_is_scalar_replaced()) continue; 784 oop owner = mon_info->owner(); 785 if (owner != NULL) { 786 markOop mark = owner->mark(); 787 if (mark->has_bias_pattern()) { 788 _preserved_oop_stack->push(Handle(cur, owner)); 789 _preserved_mark_stack->push(mark); 790 } 791 } 792 } 793 } 794 } 795 } 796 } 797 } 798 799 800 void BiasedLocking::restore_marks() { 801 if (!UseBiasedLocking) 802 return; 803 804 assert(_preserved_oop_stack != NULL, "double free"); 805 assert(_preserved_mark_stack != NULL, "double free"); 806 807 int len = _preserved_oop_stack->length(); 808 for (int i = 0; i < len; i++) { 809 Handle owner = _preserved_oop_stack->at(i); 810 markOop mark = _preserved_mark_stack->at(i); 811 owner->set_mark(mark); 812 } 813 814 delete _preserved_oop_stack; 815 _preserved_oop_stack = NULL; 816 delete _preserved_mark_stack; 817 _preserved_mark_stack = NULL; 818 } 819 820 821 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } 822 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } 823 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } 824 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } 825 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } 826 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } 827 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } 828 829 830 // BiasedLockingCounters 831 832 int BiasedLockingCounters::slow_path_entry_count() { 833 if (_slow_path_entry_count != 0) { 834 return _slow_path_entry_count; 835 } 836 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + 837 _rebiased_lock_entry_count + _revoked_lock_entry_count + 838 _fast_path_entry_count; 839 840 return _total_entry_count - sum; 841 } 842 843 void BiasedLockingCounters::print_on(outputStream* st) { 844 tty->print_cr("# total entries: %d", _total_entry_count); 845 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); 846 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); 847 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); 848 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); 849 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); 850 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); 851 }