1 /* 2 * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "oops/klass.inline.hpp" 27 #include "oops/markOop.hpp" 28 #include "runtime/basicLock.hpp" 29 #include "runtime/biasedLocking.hpp" 30 #include "runtime/task.hpp" 31 #include "runtime/vframe.hpp" 32 #include "runtime/vmThread.hpp" 33 #include "runtime/vm_operations.hpp" 34 #include "trace/tracing.hpp" 35 36 static bool _biased_locking_enabled = false; 37 BiasedLockingCounters BiasedLocking::_counters; 38 39 static GrowableArray<Handle>* _preserved_oop_stack = NULL; 40 static GrowableArray<markOop>* _preserved_mark_stack = NULL; 41 42 static void enable_biased_locking(Klass* k) { 43 k->set_prototype_header(markOopDesc::biased_locking_prototype()); 44 } 45 46 class VM_EnableBiasedLocking: public VM_Operation { 47 private: 48 bool _is_cheap_allocated; 49 public: 50 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; } 51 VMOp_Type type() const { return VMOp_EnableBiasedLocking; } 52 Mode evaluation_mode() const { return _is_cheap_allocated ? _async_safepoint : _safepoint; } 53 bool is_cheap_allocated() const { return _is_cheap_allocated; } 54 55 void doit() { 56 // Iterate the system dictionary enabling biased locking for all 57 // currently loaded classes 58 SystemDictionary::classes_do(enable_biased_locking); 59 // Indicate that future instances should enable it as well 60 _biased_locking_enabled = true; 61 62 if (TraceBiasedLocking) { 63 tty->print_cr("Biased locking enabled"); 64 } 65 } 66 67 bool allow_nested_vm_operations() const { return false; } 68 }; 69 70 71 // One-shot PeriodicTask subclass for enabling biased locking 72 class EnableBiasedLockingTask : public PeriodicTask { 73 public: 74 EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {} 75 76 virtual void task() { 77 // Use async VM operation to avoid blocking the Watcher thread. 78 // VM Thread will free C heap storage. 79 VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking(true); 80 VMThread::execute(op); 81 82 // Reclaim our storage and disenroll ourself 83 delete this; 84 } 85 }; 86 87 88 void BiasedLocking::init() { 89 // If biased locking is enabled, schedule a task to fire a few 90 // seconds into the run which turns on biased locking for all 91 // currently loaded classes as well as future ones. This is a 92 // workaround for startup time regressions due to a large number of 93 // safepoints being taken during VM startup for bias revocation. 94 // Ideally we would have a lower cost for individual bias revocation 95 // and not need a mechanism like this. 96 if (UseBiasedLocking) { 97 if (BiasedLockingStartupDelay > 0) { 98 EnableBiasedLockingTask* task = new EnableBiasedLockingTask(BiasedLockingStartupDelay); 99 task->enroll(); 100 } else { 101 VM_EnableBiasedLocking op(false); 102 VMThread::execute(&op); 103 } 104 } 105 } 106 107 108 bool BiasedLocking::enabled() { 109 return _biased_locking_enabled; 110 } 111 112 // Returns MonitorInfos for all objects locked on this thread in youngest to oldest order 113 static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { 114 GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); 115 if (info != NULL) { 116 return info; 117 } 118 119 info = new GrowableArray<MonitorInfo*>(); 120 121 // It's possible for the thread to not have any Java frames on it, 122 // i.e., if it's the main thread and it's already returned from main() 123 if (thread->has_last_Java_frame()) { 124 RegisterMap rm(thread); 125 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 126 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 127 if (monitors != NULL) { 128 int len = monitors->length(); 129 // Walk monitors youngest to oldest 130 for (int i = len - 1; i >= 0; i--) { 131 MonitorInfo* mon_info = monitors->at(i); 132 if (mon_info->eliminated()) continue; 133 oop owner = mon_info->owner(); 134 if (owner != NULL) { 135 info->append(mon_info); 136 } 137 } 138 } 139 } 140 } 141 142 thread->set_cached_monitor_info(info); 143 return info; 144 } 145 146 147 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) { 148 markOop mark = obj->mark(); 149 if (!mark->has_bias_pattern()) { 150 if (TraceBiasedLocking) { 151 ResourceMark rm; 152 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)", 153 obj->klass()->external_name()); 154 } 155 return BiasedLocking::NOT_BIASED; 156 } 157 158 uint age = mark->age(); 159 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age); 160 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age); 161 162 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 163 ResourceMark rm; 164 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT, 165 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread); 166 } 167 168 JavaThread* biased_thread = mark->biased_locker(); 169 if (biased_thread == NULL) { 170 // Object is anonymously biased. We can get here if, for 171 // example, we revoke the bias due to an identity hash code 172 // being computed for an object. 173 if (!allow_rebias) { 174 obj->set_mark(unbiased_prototype); 175 } 176 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 177 tty->print_cr(" Revoked bias of anonymously-biased object"); 178 } 179 return BiasedLocking::BIAS_REVOKED; 180 } 181 182 // Handle case where the thread toward which the object was biased has exited 183 bool thread_is_alive = false; 184 if (requesting_thread == biased_thread) { 185 thread_is_alive = true; 186 } else { 187 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) { 188 if (cur_thread == biased_thread) { 189 thread_is_alive = true; 190 break; 191 } 192 } 193 } 194 if (!thread_is_alive) { 195 if (allow_rebias) { 196 obj->set_mark(biased_prototype); 197 } else { 198 obj->set_mark(unbiased_prototype); 199 } 200 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 201 tty->print_cr(" Revoked bias of object biased toward dead thread"); 202 } 203 return BiasedLocking::BIAS_REVOKED; 204 } 205 206 // Thread owning bias is alive. 207 // Check to see whether it currently owns the lock and, if so, 208 // write down the needed displaced headers to the thread's stack. 209 // Otherwise, restore the object's header either to the unlocked 210 // or unbiased state. 211 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread); 212 BasicLock* highest_lock = NULL; 213 for (int i = 0; i < cached_monitor_info->length(); i++) { 214 MonitorInfo* mon_info = cached_monitor_info->at(i); 215 if (mon_info->owner() == obj) { 216 if (TraceBiasedLocking && Verbose) { 217 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") == obj (" PTR_FORMAT ")", 218 p2i((void *) mon_info->owner()), 219 p2i((void *) obj)); 220 } 221 // Assume recursive case and fix up highest lock later 222 markOop mark = markOopDesc::encode((BasicLock*) NULL); 223 highest_lock = mon_info->lock(); 224 highest_lock->set_displaced_header(mark); 225 } else { 226 if (TraceBiasedLocking && Verbose) { 227 tty->print_cr(" mon_info->owner (" PTR_FORMAT ") != obj (" PTR_FORMAT ")", 228 p2i((void *) mon_info->owner()), 229 p2i((void *) obj)); 230 } 231 } 232 } 233 if (highest_lock != NULL) { 234 // Fix up highest lock to contain displaced header and point 235 // object at it 236 highest_lock->set_displaced_header(unbiased_prototype); 237 // Reset object header to point to displaced mark. 238 // Must release storing the lock address for platforms without TSO 239 // ordering (e.g. ppc). 240 obj->release_set_mark(markOopDesc::encode(highest_lock)); 241 assert(!obj->mark()->has_bias_pattern(), "illegal mark state: stack lock used bias bit"); 242 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 243 tty->print_cr(" Revoked bias of currently-locked object"); 244 } 245 } else { 246 if (TraceBiasedLocking && (Verbose || !is_bulk)) { 247 tty->print_cr(" Revoked bias of currently-unlocked object"); 248 } 249 if (allow_rebias) { 250 obj->set_mark(biased_prototype); 251 } else { 252 // Store the unlocked value into the object's header. 253 obj->set_mark(unbiased_prototype); 254 } 255 } 256 257 // If requested, return information on which thread held the bias 258 if (EnableJFR && biased_locker != NULL) { 259 *biased_locker = biased_thread; 260 } 261 262 return BiasedLocking::BIAS_REVOKED; 263 } 264 265 266 enum HeuristicsResult { 267 HR_NOT_BIASED = 1, 268 HR_SINGLE_REVOKE = 2, 269 HR_BULK_REBIAS = 3, 270 HR_BULK_REVOKE = 4 271 }; 272 273 274 static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { 275 markOop mark = o->mark(); 276 if (!mark->has_bias_pattern()) { 277 return HR_NOT_BIASED; 278 } 279 280 // Heuristics to attempt to throttle the number of revocations. 281 // Stages: 282 // 1. Revoke the biases of all objects in the heap of this type, 283 // but allow rebiasing of those objects if unlocked. 284 // 2. Revoke the biases of all objects in the heap of this type 285 // and don't allow rebiasing of these objects. Disable 286 // allocation of objects of that type with the bias bit set. 287 Klass* k = o->klass(); 288 jlong cur_time = os::javaTimeMillis(); 289 jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); 290 int revocation_count = k->biased_lock_revocation_count(); 291 if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && 292 (revocation_count < BiasedLockingBulkRevokeThreshold) && 293 (last_bulk_revocation_time != 0) && 294 (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { 295 // This is the first revocation we've seen in a while of an 296 // object of this type since the last time we performed a bulk 297 // rebiasing operation. The application is allocating objects in 298 // bulk which are biased toward a thread and then handing them 299 // off to another thread. We can cope with this allocation 300 // pattern via the bulk rebiasing mechanism so we reset the 301 // klass's revocation count rather than allow it to increase 302 // monotonically. If we see the need to perform another bulk 303 // rebias operation later, we will, and if subsequently we see 304 // many more revocation operations in a short period of time we 305 // will completely disable biasing for this type. 306 k->set_biased_lock_revocation_count(0); 307 revocation_count = 0; 308 } 309 310 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold 311 if (revocation_count <= BiasedLockingBulkRevokeThreshold) { 312 revocation_count = k->atomic_incr_biased_lock_revocation_count(); 313 } 314 315 if (revocation_count == BiasedLockingBulkRevokeThreshold) { 316 return HR_BULK_REVOKE; 317 } 318 319 if (revocation_count == BiasedLockingBulkRebiasThreshold) { 320 return HR_BULK_REBIAS; 321 } 322 323 return HR_SINGLE_REVOKE; 324 } 325 326 327 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, 328 bool bulk_rebias, 329 bool attempt_rebias_of_object, 330 JavaThread* requesting_thread) { 331 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); 332 333 if (TraceBiasedLocking) { 334 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " 335 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", 336 (bulk_rebias ? "rebias" : "revoke"), 337 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name()); 338 } 339 340 jlong cur_time = os::javaTimeMillis(); 341 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time); 342 343 344 Klass* k_o = o->klass(); 345 Klass* klass = k_o; 346 347 if (bulk_rebias) { 348 // Use the epoch in the klass of the object to implicitly revoke 349 // all biases of objects of this data type and force them to be 350 // reacquired. However, we also need to walk the stacks of all 351 // threads and update the headers of lightweight locked objects 352 // with biases to have the current epoch. 353 354 // If the prototype header doesn't have the bias pattern, don't 355 // try to update the epoch -- assume another VM operation came in 356 // and reset the header to the unbiased state, which will 357 // implicitly cause all existing biases to be revoked 358 if (klass->prototype_header()->has_bias_pattern()) { 359 int prev_epoch = klass->prototype_header()->bias_epoch(); 360 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); 361 int cur_epoch = klass->prototype_header()->bias_epoch(); 362 363 // Now walk all threads' stacks and adjust epochs of any biased 364 // and locked objects of this data type we encounter 365 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 366 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 367 for (int i = 0; i < cached_monitor_info->length(); i++) { 368 MonitorInfo* mon_info = cached_monitor_info->at(i); 369 oop owner = mon_info->owner(); 370 markOop mark = owner->mark(); 371 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 372 // We might have encountered this object already in the case of recursive locking 373 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); 374 owner->set_mark(mark->set_bias_epoch(cur_epoch)); 375 } 376 } 377 } 378 } 379 380 // At this point we're done. All we have to do is potentially 381 // adjust the header of the given object to revoke its bias. 382 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL); 383 } else { 384 if (TraceBiasedLocking) { 385 ResourceMark rm; 386 tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); 387 } 388 389 // Disable biased locking for this data type. Not only will this 390 // cause future instances to not be biased, but existing biased 391 // instances will notice that this implicitly caused their biases 392 // to be revoked. 393 klass->set_prototype_header(markOopDesc::prototype()); 394 395 // Now walk all threads' stacks and forcibly revoke the biases of 396 // any locked and biased objects of this data type we encounter. 397 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 398 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); 399 for (int i = 0; i < cached_monitor_info->length(); i++) { 400 MonitorInfo* mon_info = cached_monitor_info->at(i); 401 oop owner = mon_info->owner(); 402 markOop mark = owner->mark(); 403 if ((owner->klass() == k_o) && mark->has_bias_pattern()) { 404 revoke_bias(owner, false, true, requesting_thread, NULL); 405 } 406 } 407 } 408 409 // Must force the bias of the passed object to be forcibly revoked 410 // as well to ensure guarantees to callers 411 revoke_bias(o, false, true, requesting_thread, NULL); 412 } 413 414 if (TraceBiasedLocking) { 415 tty->print_cr("* Ending bulk revocation"); 416 } 417 418 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; 419 420 if (attempt_rebias_of_object && 421 o->mark()->has_bias_pattern() && 422 klass->prototype_header()->has_bias_pattern()) { 423 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), 424 klass->prototype_header()->bias_epoch()); 425 o->set_mark(new_mark); 426 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; 427 if (TraceBiasedLocking) { 428 tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); 429 } 430 } 431 432 assert(!o->mark()->has_bias_pattern() || 433 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), 434 "bug in bulk bias revocation"); 435 436 return status_code; 437 } 438 439 440 static void clean_up_cached_monitor_info() { 441 // Walk the thread list clearing out the cached monitors 442 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { 443 thr->set_cached_monitor_info(NULL); 444 } 445 } 446 447 448 class VM_RevokeBias : public VM_Operation { 449 protected: 450 Handle* _obj; 451 GrowableArray<Handle>* _objs; 452 JavaThread* _requesting_thread; 453 BiasedLocking::Condition _status_code; 454 traceid _biased_locker_id; 455 456 public: 457 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread) 458 : _obj(obj) 459 , _objs(NULL) 460 , _requesting_thread(requesting_thread) 461 , _status_code(BiasedLocking::NOT_BIASED) 462 , _biased_locker_id(0) {} 463 464 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread) 465 : _obj(NULL) 466 , _objs(objs) 467 , _requesting_thread(requesting_thread) 468 , _status_code(BiasedLocking::NOT_BIASED) 469 , _biased_locker_id(0) {} 470 471 virtual VMOp_Type type() const { return VMOp_RevokeBias; } 472 473 virtual bool doit_prologue() { 474 // Verify that there is actual work to do since the callers just 475 // give us locked object(s). If we don't find any biased objects 476 // there is nothing to do and we avoid a safepoint. 477 if (_obj != NULL) { 478 markOop mark = (*_obj)()->mark(); 479 if (mark->has_bias_pattern()) { 480 return true; 481 } 482 } else { 483 for ( int i = 0 ; i < _objs->length(); i++ ) { 484 markOop mark = (_objs->at(i))()->mark(); 485 if (mark->has_bias_pattern()) { 486 return true; 487 } 488 } 489 } 490 return false; 491 } 492 493 virtual void doit() { 494 if (_obj != NULL) { 495 if (TraceBiasedLocking) { 496 tty->print_cr("Revoking bias with potentially per-thread safepoint:"); 497 } 498 JavaThread* biased_locker = NULL; 499 _status_code = revoke_bias((*_obj)(), false, false, _requesting_thread, &biased_locker); 500 if (biased_locker != NULL) { 501 _biased_locker_id = THREAD_TRACE_ID(biased_locker); 502 } 503 clean_up_cached_monitor_info(); 504 return; 505 } else { 506 if (TraceBiasedLocking) { 507 tty->print_cr("Revoking bias with global safepoint:"); 508 } 509 BiasedLocking::revoke_at_safepoint(_objs); 510 } 511 } 512 513 BiasedLocking::Condition status_code() const { 514 return _status_code; 515 } 516 517 traceid biased_locker() const { 518 return _biased_locker_id; 519 } 520 }; 521 522 523 class VM_BulkRevokeBias : public VM_RevokeBias { 524 private: 525 bool _bulk_rebias; 526 bool _attempt_rebias_of_object; 527 528 public: 529 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread, 530 bool bulk_rebias, 531 bool attempt_rebias_of_object) 532 : VM_RevokeBias(obj, requesting_thread) 533 , _bulk_rebias(bulk_rebias) 534 , _attempt_rebias_of_object(attempt_rebias_of_object) {} 535 536 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; } 537 virtual bool doit_prologue() { return true; } 538 539 virtual void doit() { 540 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread); 541 clean_up_cached_monitor_info(); 542 } 543 }; 544 545 546 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) { 547 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 548 549 // We can revoke the biases of anonymously-biased objects 550 // efficiently enough that we should not cause these revocations to 551 // update the heuristics because doing so may cause unwanted bulk 552 // revocations (which are expensive) to occur. 553 markOop mark = obj->mark(); 554 if (mark->is_biased_anonymously() && !attempt_rebias) { 555 // We are probably trying to revoke the bias of this object due to 556 // an identity hash code computation. Try to revoke the bias 557 // without a safepoint. This is possible if we can successfully 558 // compare-and-exchange an unbiased header into the mark word of 559 // the object, meaning that no other thread has raced to acquire 560 // the bias of the object. 561 markOop biased_value = mark; 562 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 563 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); 564 if (res_mark == biased_value) { 565 return BIAS_REVOKED; 566 } 567 } else if (mark->has_bias_pattern()) { 568 Klass* k = obj->klass(); 569 markOop prototype_header = k->prototype_header(); 570 if (!prototype_header->has_bias_pattern()) { 571 // This object has a stale bias from before the bulk revocation 572 // for this data type occurred. It's pointless to update the 573 // heuristics at this point so simply update the header with a 574 // CAS. If we fail this race, the object's bias has been revoked 575 // by another thread so we simply return and let the caller deal 576 // with it. 577 markOop biased_value = mark; 578 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(prototype_header, obj->mark_addr(), mark); 579 assert(!(*(obj->mark_addr()))->has_bias_pattern(), "even if we raced, should still be revoked"); 580 return BIAS_REVOKED; 581 } else if (prototype_header->bias_epoch() != mark->bias_epoch()) { 582 // The epoch of this biasing has expired indicating that the 583 // object is effectively unbiased. Depending on whether we need 584 // to rebias or revoke the bias of this object we can do it 585 // efficiently enough with a CAS that we shouldn't update the 586 // heuristics. This is normally done in the assembly code but we 587 // can reach this point due to various points in the runtime 588 // needing to revoke biases. 589 if (attempt_rebias) { 590 assert(THREAD->is_Java_thread(), ""); 591 markOop biased_value = mark; 592 markOop rebiased_prototype = markOopDesc::encode((JavaThread*) THREAD, mark->age(), prototype_header->bias_epoch()); 593 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(rebiased_prototype, obj->mark_addr(), mark); 594 if (res_mark == biased_value) { 595 return BIAS_REVOKED_AND_REBIASED; 596 } 597 } else { 598 markOop biased_value = mark; 599 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age()); 600 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark); 601 if (res_mark == biased_value) { 602 return BIAS_REVOKED; 603 } 604 } 605 } 606 } 607 608 HeuristicsResult heuristics = update_heuristics(obj(), attempt_rebias); 609 if (heuristics == HR_NOT_BIASED) { 610 return NOT_BIASED; 611 } else if (heuristics == HR_SINGLE_REVOKE) { 612 Klass *k = obj->klass(); 613 markOop prototype_header = k->prototype_header(); 614 if (mark->biased_locker() == THREAD && 615 prototype_header->bias_epoch() == mark->bias_epoch()) { 616 // A thread is trying to revoke the bias of an object biased 617 // toward it, again likely due to an identity hash code 618 // computation. We can again avoid a safepoint in this case 619 // since we are only going to walk our own stack. There are no 620 // races with revocations occurring in other threads because we 621 // reach no safepoints in the revocation path. 622 // Also check the epoch because even if threads match, another thread 623 // can come in with a CAS to steal the bias of an object that has a 624 // stale epoch. 625 ResourceMark rm; 626 if (TraceBiasedLocking) { 627 tty->print_cr("Revoking bias by walking my own stack:"); 628 } 629 EventBiasedLockSelfRevocation event; 630 BiasedLocking::Condition cond = revoke_bias(obj(), false, false, (JavaThread*) THREAD, NULL); 631 ((JavaThread*) THREAD)->set_cached_monitor_info(NULL); 632 assert(cond == BIAS_REVOKED, "why not?"); 633 if (event.should_commit()) { 634 event.set_lockClass(k); 635 event.commit(); 636 } 637 return cond; 638 } else { 639 EventBiasedLockRevocation event; 640 VM_RevokeBias revoke(&obj, (JavaThread*) THREAD); 641 VMThread::execute(&revoke); 642 if (event.should_commit() && (revoke.status_code() != NOT_BIASED)) { 643 event.set_lockClass(k); 644 // Subtract 1 to match the id of events committed inside the safepoint 645 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 646 event.set_previousOwner(revoke.biased_locker()); 647 event.commit(); 648 } 649 return revoke.status_code(); 650 } 651 } 652 653 assert((heuristics == HR_BULK_REVOKE) || 654 (heuristics == HR_BULK_REBIAS), "?"); 655 EventBiasedLockClassRevocation event; 656 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*) THREAD, 657 (heuristics == HR_BULK_REBIAS), 658 attempt_rebias); 659 VMThread::execute(&bulk_revoke); 660 if (event.should_commit()) { 661 event.set_revokedClass(obj->klass()); 662 event.set_disableBiasing((heuristics != HR_BULK_REBIAS)); 663 // Subtract 1 to match the id of events committed inside the safepoint 664 event.set_safepointId(SafepointSynchronize::safepoint_counter() - 1); 665 event.commit(); 666 } 667 return bulk_revoke.status_code(); 668 } 669 670 671 void BiasedLocking::revoke(GrowableArray<Handle>* objs) { 672 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint"); 673 if (objs->length() == 0) { 674 return; 675 } 676 VM_RevokeBias revoke(objs, JavaThread::current()); 677 VMThread::execute(&revoke); 678 } 679 680 681 void BiasedLocking::revoke_at_safepoint(Handle h_obj) { 682 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 683 oop obj = h_obj(); 684 HeuristicsResult heuristics = update_heuristics(obj, false); 685 if (heuristics == HR_SINGLE_REVOKE) { 686 revoke_bias(obj, false, false, NULL, NULL); 687 } else if ((heuristics == HR_BULK_REBIAS) || 688 (heuristics == HR_BULK_REVOKE)) { 689 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 690 } 691 clean_up_cached_monitor_info(); 692 } 693 694 695 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) { 696 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 697 int len = objs->length(); 698 for (int i = 0; i < len; i++) { 699 oop obj = (objs->at(i))(); 700 HeuristicsResult heuristics = update_heuristics(obj, false); 701 if (heuristics == HR_SINGLE_REVOKE) { 702 revoke_bias(obj, false, false, NULL, NULL); 703 } else if ((heuristics == HR_BULK_REBIAS) || 704 (heuristics == HR_BULK_REVOKE)) { 705 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL); 706 } 707 } 708 clean_up_cached_monitor_info(); 709 } 710 711 712 void BiasedLocking::preserve_marks() { 713 if (!UseBiasedLocking) 714 return; 715 716 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint"); 717 718 assert(_preserved_oop_stack == NULL, "double initialization"); 719 assert(_preserved_mark_stack == NULL, "double initialization"); 720 721 // In order to reduce the number of mark words preserved during GC 722 // due to the presence of biased locking, we reinitialize most mark 723 // words to the class's prototype during GC -- even those which have 724 // a currently valid bias owner. One important situation where we 725 // must not clobber a bias is when a biased object is currently 726 // locked. To handle this case we iterate over the currently-locked 727 // monitors in a prepass and, if they are biased, preserve their 728 // mark words here. This should be a relatively small set of objects 729 // especially compared to the number of objects in the heap. 730 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true); 731 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true); 732 733 ResourceMark rm; 734 Thread* cur = Thread::current(); 735 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) { 736 if (thread->has_last_Java_frame()) { 737 RegisterMap rm(thread); 738 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { 739 GrowableArray<MonitorInfo*> *monitors = vf->monitors(); 740 if (monitors != NULL) { 741 int len = monitors->length(); 742 // Walk monitors youngest to oldest 743 for (int i = len - 1; i >= 0; i--) { 744 MonitorInfo* mon_info = monitors->at(i); 745 if (mon_info->owner_is_scalar_replaced()) continue; 746 oop owner = mon_info->owner(); 747 if (owner != NULL) { 748 markOop mark = owner->mark(); 749 if (mark->has_bias_pattern()) { 750 _preserved_oop_stack->push(Handle(cur, owner)); 751 _preserved_mark_stack->push(mark); 752 } 753 } 754 } 755 } 756 } 757 } 758 } 759 } 760 761 762 void BiasedLocking::restore_marks() { 763 if (!UseBiasedLocking) 764 return; 765 766 assert(_preserved_oop_stack != NULL, "double free"); 767 assert(_preserved_mark_stack != NULL, "double free"); 768 769 int len = _preserved_oop_stack->length(); 770 for (int i = 0; i < len; i++) { 771 Handle owner = _preserved_oop_stack->at(i); 772 markOop mark = _preserved_mark_stack->at(i); 773 owner->set_mark(mark); 774 } 775 776 delete _preserved_oop_stack; 777 _preserved_oop_stack = NULL; 778 delete _preserved_mark_stack; 779 _preserved_mark_stack = NULL; 780 } 781 782 783 int* BiasedLocking::total_entry_count_addr() { return _counters.total_entry_count_addr(); } 784 int* BiasedLocking::biased_lock_entry_count_addr() { return _counters.biased_lock_entry_count_addr(); } 785 int* BiasedLocking::anonymously_biased_lock_entry_count_addr() { return _counters.anonymously_biased_lock_entry_count_addr(); } 786 int* BiasedLocking::rebiased_lock_entry_count_addr() { return _counters.rebiased_lock_entry_count_addr(); } 787 int* BiasedLocking::revoked_lock_entry_count_addr() { return _counters.revoked_lock_entry_count_addr(); } 788 int* BiasedLocking::fast_path_entry_count_addr() { return _counters.fast_path_entry_count_addr(); } 789 int* BiasedLocking::slow_path_entry_count_addr() { return _counters.slow_path_entry_count_addr(); } 790 791 792 // BiasedLockingCounters 793 794 int BiasedLockingCounters::slow_path_entry_count() { 795 if (_slow_path_entry_count != 0) { 796 return _slow_path_entry_count; 797 } 798 int sum = _biased_lock_entry_count + _anonymously_biased_lock_entry_count + 799 _rebiased_lock_entry_count + _revoked_lock_entry_count + 800 _fast_path_entry_count; 801 802 return _total_entry_count - sum; 803 } 804 805 void BiasedLockingCounters::print_on(outputStream* st) { 806 tty->print_cr("# total entries: %d", _total_entry_count); 807 tty->print_cr("# biased lock entries: %d", _biased_lock_entry_count); 808 tty->print_cr("# anonymously biased lock entries: %d", _anonymously_biased_lock_entry_count); 809 tty->print_cr("# rebiased lock entries: %d", _rebiased_lock_entry_count); 810 tty->print_cr("# revoked lock entries: %d", _revoked_lock_entry_count); 811 tty->print_cr("# fast path lock entries: %d", _fast_path_entry_count); 812 tty->print_cr("# slow path lock entries: %d", slow_path_entry_count()); 813 }