15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38
39 static bool _biased_locking_enabled = false;
40 BiasedLockingCounters BiasedLocking::_counters;
41
42 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
43 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
44
45 static void enable_biased_locking(InstanceKlass* k) {
46 k->set_prototype_header(markOopDesc::biased_locking_prototype());
47 }
48
49 class VM_EnableBiasedLocking: public VM_Operation {
50 private:
51 bool _is_cheap_allocated;
52 public:
53 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
54 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
194 // Object is anonymously biased. We can get here if, for
195 // example, we revoke the bias due to an identity hash code
196 // being computed for an object.
197 if (!allow_rebias) {
198 obj->set_mark(unbiased_prototype);
199 }
200 // Log at "info" level if not bulk, else "trace" level
201 if (!is_bulk) {
202 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
203 } else {
204 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
205 }
206 return BiasedLocking::BIAS_REVOKED;
207 }
208
209 // Handle case where the thread toward which the object was biased has exited
210 bool thread_is_alive = false;
211 if (requesting_thread == biased_thread) {
212 thread_is_alive = true;
213 } else {
214 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
215 if (cur_thread == biased_thread) {
216 thread_is_alive = true;
217 break;
218 }
219 }
220 }
221 if (!thread_is_alive) {
222 if (allow_rebias) {
223 obj->set_mark(biased_prototype);
224 } else {
225 obj->set_mark(unbiased_prototype);
226 }
227 // Log at "info" level if not bulk, else "trace" level
228 if (!is_bulk) {
229 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
230 PTR_FORMAT ")", p2i(biased_thread));
231 } else {
232 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 }
365 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
366 bool bulk_rebias,
367 bool attempt_rebias_of_object,
368 JavaThread* requesting_thread) {
369 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
370
371 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
372 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
373 (bulk_rebias ? "rebias" : "revoke"),
374 p2i((void *) o),
375 (intptr_t) o->mark(),
376 o->klass()->external_name());
377
378 jlong cur_time = os::javaTimeMillis();
379 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
380
381
382 Klass* k_o = o->klass();
383 Klass* klass = k_o;
384
385 if (bulk_rebias) {
386 // Use the epoch in the klass of the object to implicitly revoke
387 // all biases of objects of this data type and force them to be
388 // reacquired. However, we also need to walk the stacks of all
389 // threads and update the headers of lightweight locked objects
390 // with biases to have the current epoch.
391
392 // If the prototype header doesn't have the bias pattern, don't
393 // try to update the epoch -- assume another VM operation came in
394 // and reset the header to the unbiased state, which will
395 // implicitly cause all existing biases to be revoked
396 if (klass->prototype_header()->has_bias_pattern()) {
397 int prev_epoch = klass->prototype_header()->bias_epoch();
398 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
399 int cur_epoch = klass->prototype_header()->bias_epoch();
400
401 // Now walk all threads' stacks and adjust epochs of any biased
402 // and locked objects of this data type we encounter
403 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
404 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
405 for (int i = 0; i < cached_monitor_info->length(); i++) {
406 MonitorInfo* mon_info = cached_monitor_info->at(i);
407 oop owner = mon_info->owner();
408 markOop mark = owner->mark();
409 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
410 // We might have encountered this object already in the case of recursive locking
411 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
412 owner->set_mark(mark->set_bias_epoch(cur_epoch));
413 }
414 }
415 }
416 }
417
418 // At this point we're done. All we have to do is potentially
419 // adjust the header of the given object to revoke its bias.
420 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
421 } else {
422 if (log_is_enabled(Info, biasedlocking)) {
423 ResourceMark rm;
424 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
425 }
426
427 // Disable biased locking for this data type. Not only will this
428 // cause future instances to not be biased, but existing biased
429 // instances will notice that this implicitly caused their biases
430 // to be revoked.
431 klass->set_prototype_header(markOopDesc::prototype());
432
433 // Now walk all threads' stacks and forcibly revoke the biases of
434 // any locked and biased objects of this data type we encounter.
435 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
436 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
437 for (int i = 0; i < cached_monitor_info->length(); i++) {
438 MonitorInfo* mon_info = cached_monitor_info->at(i);
439 oop owner = mon_info->owner();
440 markOop mark = owner->mark();
441 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
442 revoke_bias(owner, false, true, requesting_thread);
443 }
444 }
445 }
446
447 // Must force the bias of the passed object to be forcibly revoked
448 // as well to ensure guarantees to callers
449 revoke_bias(o, false, true, requesting_thread);
450 }
451
452 log_info(biasedlocking)("* Ending bulk revocation");
453
454 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
455
456 if (attempt_rebias_of_object &&
457 o->mark()->has_bias_pattern() &&
458 klass->prototype_header()->has_bias_pattern()) {
459 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
460 klass->prototype_header()->bias_epoch());
461 o->set_mark(new_mark);
462 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
463 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
464 }
465
466 assert(!o->mark()->has_bias_pattern() ||
467 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
468 "bug in bulk bias revocation");
469
470 return status_code;
471 }
472
473
474 static void clean_up_cached_monitor_info() {
475 // Walk the thread list clearing out the cached monitors
476 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
477 thr->set_cached_monitor_info(NULL);
478 }
479 }
480
481
482 class VM_RevokeBias : public VM_Operation {
483 protected:
484 Handle* _obj;
485 GrowableArray<Handle>* _objs;
486 JavaThread* _requesting_thread;
487 BiasedLocking::Condition _status_code;
488
489 public:
490 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
491 : _obj(obj)
492 , _objs(NULL)
493 , _requesting_thread(requesting_thread)
494 , _status_code(BiasedLocking::NOT_BIASED) {}
495
496 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
711
712 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
713
714 assert(_preserved_oop_stack == NULL, "double initialization");
715 assert(_preserved_mark_stack == NULL, "double initialization");
716
717 // In order to reduce the number of mark words preserved during GC
718 // due to the presence of biased locking, we reinitialize most mark
719 // words to the class's prototype during GC -- even those which have
720 // a currently valid bias owner. One important situation where we
721 // must not clobber a bias is when a biased object is currently
722 // locked. To handle this case we iterate over the currently-locked
723 // monitors in a prepass and, if they are biased, preserve their
724 // mark words here. This should be a relatively small set of objects
725 // especially compared to the number of objects in the heap.
726 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
727 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
728
729 ResourceMark rm;
730 Thread* cur = Thread::current();
731 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
732 if (thread->has_last_Java_frame()) {
733 RegisterMap rm(thread);
734 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
735 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
736 if (monitors != NULL) {
737 int len = monitors->length();
738 // Walk monitors youngest to oldest
739 for (int i = len - 1; i >= 0; i--) {
740 MonitorInfo* mon_info = monitors->at(i);
741 if (mon_info->owner_is_scalar_replaced()) continue;
742 oop owner = mon_info->owner();
743 if (owner != NULL) {
744 markOop mark = owner->mark();
745 if (mark->has_bias_pattern()) {
746 _preserved_oop_stack->push(Handle(cur, owner));
747 _preserved_mark_stack->push(mark);
748 }
749 }
750 }
751 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadSMR.hpp"
36 #include "runtime/vframe.hpp"
37 #include "runtime/vmThread.hpp"
38 #include "runtime/vm_operations.hpp"
39
40 static bool _biased_locking_enabled = false;
41 BiasedLockingCounters BiasedLocking::_counters;
42
43 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
44 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
45
46 static void enable_biased_locking(InstanceKlass* k) {
47 k->set_prototype_header(markOopDesc::biased_locking_prototype());
48 }
49
50 class VM_EnableBiasedLocking: public VM_Operation {
51 private:
52 bool _is_cheap_allocated;
53 public:
54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
55 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
195 // Object is anonymously biased. We can get here if, for
196 // example, we revoke the bias due to an identity hash code
197 // being computed for an object.
198 if (!allow_rebias) {
199 obj->set_mark(unbiased_prototype);
200 }
201 // Log at "info" level if not bulk, else "trace" level
202 if (!is_bulk) {
203 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
204 } else {
205 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
206 }
207 return BiasedLocking::BIAS_REVOKED;
208 }
209
210 // Handle case where the thread toward which the object was biased has exited
211 bool thread_is_alive = false;
212 if (requesting_thread == biased_thread) {
213 thread_is_alive = true;
214 } else {
215 ThreadsListHandle tlh;
216 JavaThreadIterator jti(tlh.list());
217 for (JavaThread* cur_thread = jti.first(); cur_thread != NULL; cur_thread = jti.next()) {
218 if (cur_thread == biased_thread) {
219 thread_is_alive = true;
220 break;
221 }
222 }
223 }
224 if (!thread_is_alive) {
225 if (allow_rebias) {
226 obj->set_mark(biased_prototype);
227 } else {
228 obj->set_mark(unbiased_prototype);
229 }
230 // Log at "info" level if not bulk, else "trace" level
231 if (!is_bulk) {
232 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 } else {
235 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
236 PTR_FORMAT ")", p2i(biased_thread));
237 }
368 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
369 bool bulk_rebias,
370 bool attempt_rebias_of_object,
371 JavaThread* requesting_thread) {
372 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
373
374 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
375 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
376 (bulk_rebias ? "rebias" : "revoke"),
377 p2i((void *) o),
378 (intptr_t) o->mark(),
379 o->klass()->external_name());
380
381 jlong cur_time = os::javaTimeMillis();
382 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
383
384
385 Klass* k_o = o->klass();
386 Klass* klass = k_o;
387
388 {
389 ThreadsListHandle tlh;
390 JavaThreadIterator jti(tlh.list());
391
392 if (bulk_rebias) {
393 // Use the epoch in the klass of the object to implicitly revoke
394 // all biases of objects of this data type and force them to be
395 // reacquired. However, we also need to walk the stacks of all
396 // threads and update the headers of lightweight locked objects
397 // with biases to have the current epoch.
398
399 // If the prototype header doesn't have the bias pattern, don't
400 // try to update the epoch -- assume another VM operation came in
401 // and reset the header to the unbiased state, which will
402 // implicitly cause all existing biases to be revoked
403 if (klass->prototype_header()->has_bias_pattern()) {
404 int prev_epoch = klass->prototype_header()->bias_epoch();
405 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
406 int cur_epoch = klass->prototype_header()->bias_epoch();
407
408 // Now walk all threads' stacks and adjust epochs of any biased
409 // and locked objects of this data type we encounter
410 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
411 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
412 for (int i = 0; i < cached_monitor_info->length(); i++) {
413 MonitorInfo* mon_info = cached_monitor_info->at(i);
414 oop owner = mon_info->owner();
415 markOop mark = owner->mark();
416 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
417 // We might have encountered this object already in the case of recursive locking
418 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
419 owner->set_mark(mark->set_bias_epoch(cur_epoch));
420 }
421 }
422 }
423 }
424
425 // At this point we're done. All we have to do is potentially
426 // adjust the header of the given object to revoke its bias.
427 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
428 } else {
429 if (log_is_enabled(Info, biasedlocking)) {
430 ResourceMark rm;
431 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
432 }
433
434 // Disable biased locking for this data type. Not only will this
435 // cause future instances to not be biased, but existing biased
436 // instances will notice that this implicitly caused their biases
437 // to be revoked.
438 klass->set_prototype_header(markOopDesc::prototype());
439
440 // Now walk all threads' stacks and forcibly revoke the biases of
441 // any locked and biased objects of this data type we encounter.
442 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
443 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
444 for (int i = 0; i < cached_monitor_info->length(); i++) {
445 MonitorInfo* mon_info = cached_monitor_info->at(i);
446 oop owner = mon_info->owner();
447 markOop mark = owner->mark();
448 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
449 revoke_bias(owner, false, true, requesting_thread);
450 }
451 }
452 }
453
454 // Must force the bias of the passed object to be forcibly revoked
455 // as well to ensure guarantees to callers
456 revoke_bias(o, false, true, requesting_thread);
457 }
458 } // ThreadsListHandle is destroyed here.
459
460 log_info(biasedlocking)("* Ending bulk revocation");
461
462 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
463
464 if (attempt_rebias_of_object &&
465 o->mark()->has_bias_pattern() &&
466 klass->prototype_header()->has_bias_pattern()) {
467 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
468 klass->prototype_header()->bias_epoch());
469 o->set_mark(new_mark);
470 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
471 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
472 }
473
474 assert(!o->mark()->has_bias_pattern() ||
475 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
476 "bug in bulk bias revocation");
477
478 return status_code;
479 }
480
481
482 static void clean_up_cached_monitor_info() {
483 // Walk the thread list clearing out the cached monitors
484 ThreadsListHandle tlh;
485 JavaThreadIterator jti(tlh.list());
486 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
487 thr->set_cached_monitor_info(NULL);
488 }
489 }
490
491
492 class VM_RevokeBias : public VM_Operation {
493 protected:
494 Handle* _obj;
495 GrowableArray<Handle>* _objs;
496 JavaThread* _requesting_thread;
497 BiasedLocking::Condition _status_code;
498
499 public:
500 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
501 : _obj(obj)
502 , _objs(NULL)
503 , _requesting_thread(requesting_thread)
504 , _status_code(BiasedLocking::NOT_BIASED) {}
505
506 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
721
722 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
723
724 assert(_preserved_oop_stack == NULL, "double initialization");
725 assert(_preserved_mark_stack == NULL, "double initialization");
726
727 // In order to reduce the number of mark words preserved during GC
728 // due to the presence of biased locking, we reinitialize most mark
729 // words to the class's prototype during GC -- even those which have
730 // a currently valid bias owner. One important situation where we
731 // must not clobber a bias is when a biased object is currently
732 // locked. To handle this case we iterate over the currently-locked
733 // monitors in a prepass and, if they are biased, preserve their
734 // mark words here. This should be a relatively small set of objects
735 // especially compared to the number of objects in the heap.
736 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
737 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
738
739 ResourceMark rm;
740 Thread* cur = Thread::current();
741 ThreadsListHandle tlh;
742 JavaThreadIterator jti(tlh.list());
743 for (JavaThread* thread = jti.first(); thread != NULL; thread = jti.next()) {
744 if (thread->has_last_Java_frame()) {
745 RegisterMap rm(thread);
746 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
747 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
748 if (monitors != NULL) {
749 int len = monitors->length();
750 // Walk monitors youngest to oldest
751 for (int i = len - 1; i >= 0; i--) {
752 MonitorInfo* mon_info = monitors->at(i);
753 if (mon_info->owner_is_scalar_replaced()) continue;
754 oop owner = mon_info->owner();
755 if (owner != NULL) {
756 markOop mark = owner->mark();
757 if (mark->has_bias_pattern()) {
758 _preserved_oop_stack->push(Handle(cur, owner));
759 _preserved_mark_stack->push(mark);
760 }
761 }
762 }
763 }
|