195 // Object is anonymously biased. We can get here if, for
196 // example, we revoke the bias due to an identity hash code
197 // being computed for an object.
198 if (!allow_rebias) {
199 obj->set_mark(unbiased_prototype);
200 }
201 // Log at "info" level if not bulk, else "trace" level
202 if (!is_bulk) {
203 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
204 } else {
205 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
206 }
207 return BiasedLocking::BIAS_REVOKED;
208 }
209
210 // Handle case where the thread toward which the object was biased has exited
211 bool thread_is_alive = false;
212 if (requesting_thread == biased_thread) {
213 thread_is_alive = true;
214 } else {
215 ThreadsListHandle tlh;
216 JavaThreadIterator jti(tlh.list());
217 for (JavaThread* cur_thread = jti.first(); cur_thread != NULL; cur_thread = jti.next()) {
218 if (cur_thread == biased_thread) {
219 thread_is_alive = true;
220 break;
221 }
222 }
223 }
224 if (!thread_is_alive) {
225 if (allow_rebias) {
226 obj->set_mark(biased_prototype);
227 } else {
228 obj->set_mark(unbiased_prototype);
229 }
230 // Log at "info" level if not bulk, else "trace" level
231 if (!is_bulk) {
232 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 } else {
235 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
236 PTR_FORMAT ")", p2i(biased_thread));
237 }
369 bool bulk_rebias,
370 bool attempt_rebias_of_object,
371 JavaThread* requesting_thread) {
372 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
373
374 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
375 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
376 (bulk_rebias ? "rebias" : "revoke"),
377 p2i((void *) o),
378 (intptr_t) o->mark(),
379 o->klass()->external_name());
380
381 jlong cur_time = os::javaTimeMillis();
382 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
383
384
385 Klass* k_o = o->klass();
386 Klass* klass = k_o;
387
388 {
389 ThreadsListHandle tlh;
390 JavaThreadIterator jti(tlh.list());
391
392 if (bulk_rebias) {
393 // Use the epoch in the klass of the object to implicitly revoke
394 // all biases of objects of this data type and force them to be
395 // reacquired. However, we also need to walk the stacks of all
396 // threads and update the headers of lightweight locked objects
397 // with biases to have the current epoch.
398
399 // If the prototype header doesn't have the bias pattern, don't
400 // try to update the epoch -- assume another VM operation came in
401 // and reset the header to the unbiased state, which will
402 // implicitly cause all existing biases to be revoked
403 if (klass->prototype_header()->has_bias_pattern()) {
404 int prev_epoch = klass->prototype_header()->bias_epoch();
405 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
406 int cur_epoch = klass->prototype_header()->bias_epoch();
407
408 // Now walk all threads' stacks and adjust epochs of any biased
409 // and locked objects of this data type we encounter
410 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
411 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
412 for (int i = 0; i < cached_monitor_info->length(); i++) {
413 MonitorInfo* mon_info = cached_monitor_info->at(i);
414 oop owner = mon_info->owner();
415 markOop mark = owner->mark();
416 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
417 // We might have encountered this object already in the case of recursive locking
418 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
419 owner->set_mark(mark->set_bias_epoch(cur_epoch));
420 }
421 }
422 }
423 }
424
425 // At this point we're done. All we have to do is potentially
426 // adjust the header of the given object to revoke its bias.
427 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
428 } else {
429 if (log_is_enabled(Info, biasedlocking)) {
430 ResourceMark rm;
431 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
432 }
433
434 // Disable biased locking for this data type. Not only will this
435 // cause future instances to not be biased, but existing biased
436 // instances will notice that this implicitly caused their biases
437 // to be revoked.
438 klass->set_prototype_header(markOopDesc::prototype());
439
440 // Now walk all threads' stacks and forcibly revoke the biases of
441 // any locked and biased objects of this data type we encounter.
442 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
443 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
444 for (int i = 0; i < cached_monitor_info->length(); i++) {
445 MonitorInfo* mon_info = cached_monitor_info->at(i);
446 oop owner = mon_info->owner();
447 markOop mark = owner->mark();
448 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
449 revoke_bias(owner, false, true, requesting_thread);
450 }
451 }
452 }
453
454 // Must force the bias of the passed object to be forcibly revoked
455 // as well to ensure guarantees to callers
456 revoke_bias(o, false, true, requesting_thread);
457 }
458 } // ThreadsListHandle is destroyed here.
459
460 log_info(biasedlocking)("* Ending bulk revocation");
461
462 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
464 if (attempt_rebias_of_object &&
465 o->mark()->has_bias_pattern() &&
466 klass->prototype_header()->has_bias_pattern()) {
467 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
468 klass->prototype_header()->bias_epoch());
469 o->set_mark(new_mark);
470 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
471 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
472 }
473
474 assert(!o->mark()->has_bias_pattern() ||
475 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
476 "bug in bulk bias revocation");
477
478 return status_code;
479 }
480
481
482 static void clean_up_cached_monitor_info() {
483 // Walk the thread list clearing out the cached monitors
484 ThreadsListHandle tlh;
485 JavaThreadIterator jti(tlh.list());
486 for (JavaThread* thr = jti.first(); thr != NULL; thr = jti.next()) {
487 thr->set_cached_monitor_info(NULL);
488 }
489 }
490
491
492 class VM_RevokeBias : public VM_Operation {
493 protected:
494 Handle* _obj;
495 GrowableArray<Handle>* _objs;
496 JavaThread* _requesting_thread;
497 BiasedLocking::Condition _status_code;
498
499 public:
500 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
501 : _obj(obj)
502 , _objs(NULL)
503 , _requesting_thread(requesting_thread)
504 , _status_code(BiasedLocking::NOT_BIASED) {}
505
506 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
721
722 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
723
724 assert(_preserved_oop_stack == NULL, "double initialization");
725 assert(_preserved_mark_stack == NULL, "double initialization");
726
727 // In order to reduce the number of mark words preserved during GC
728 // due to the presence of biased locking, we reinitialize most mark
729 // words to the class's prototype during GC -- even those which have
730 // a currently valid bias owner. One important situation where we
731 // must not clobber a bias is when a biased object is currently
732 // locked. To handle this case we iterate over the currently-locked
733 // monitors in a prepass and, if they are biased, preserve their
734 // mark words here. This should be a relatively small set of objects
735 // especially compared to the number of objects in the heap.
736 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
737 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
738
739 ResourceMark rm;
740 Thread* cur = Thread::current();
741 ThreadsListHandle tlh;
742 JavaThreadIterator jti(tlh.list());
743 for (JavaThread* thread = jti.first(); thread != NULL; thread = jti.next()) {
744 if (thread->has_last_Java_frame()) {
745 RegisterMap rm(thread);
746 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
747 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
748 if (monitors != NULL) {
749 int len = monitors->length();
750 // Walk monitors youngest to oldest
751 for (int i = len - 1; i >= 0; i--) {
752 MonitorInfo* mon_info = monitors->at(i);
753 if (mon_info->owner_is_scalar_replaced()) continue;
754 oop owner = mon_info->owner();
755 if (owner != NULL) {
756 markOop mark = owner->mark();
757 if (mark->has_bias_pattern()) {
758 _preserved_oop_stack->push(Handle(cur, owner));
759 _preserved_mark_stack->push(mark);
760 }
761 }
762 }
763 }
|
195 // Object is anonymously biased. We can get here if, for
196 // example, we revoke the bias due to an identity hash code
197 // being computed for an object.
198 if (!allow_rebias) {
199 obj->set_mark(unbiased_prototype);
200 }
201 // Log at "info" level if not bulk, else "trace" level
202 if (!is_bulk) {
203 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
204 } else {
205 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
206 }
207 return BiasedLocking::BIAS_REVOKED;
208 }
209
210 // Handle case where the thread toward which the object was biased has exited
211 bool thread_is_alive = false;
212 if (requesting_thread == biased_thread) {
213 thread_is_alive = true;
214 } else {
215 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
216 if (cur_thread == biased_thread) {
217 thread_is_alive = true;
218 break;
219 }
220 }
221 }
222 if (!thread_is_alive) {
223 if (allow_rebias) {
224 obj->set_mark(biased_prototype);
225 } else {
226 obj->set_mark(unbiased_prototype);
227 }
228 // Log at "info" level if not bulk, else "trace" level
229 if (!is_bulk) {
230 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
231 PTR_FORMAT ")", p2i(biased_thread));
232 } else {
233 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
234 PTR_FORMAT ")", p2i(biased_thread));
235 }
367 bool bulk_rebias,
368 bool attempt_rebias_of_object,
369 JavaThread* requesting_thread) {
370 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
371
372 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
373 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
374 (bulk_rebias ? "rebias" : "revoke"),
375 p2i((void *) o),
376 (intptr_t) o->mark(),
377 o->klass()->external_name());
378
379 jlong cur_time = os::javaTimeMillis();
380 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
381
382
383 Klass* k_o = o->klass();
384 Klass* klass = k_o;
385
386 {
387 JavaThreadIteratorWithHandle jtiwh;
388
389 if (bulk_rebias) {
390 // Use the epoch in the klass of the object to implicitly revoke
391 // all biases of objects of this data type and force them to be
392 // reacquired. However, we also need to walk the stacks of all
393 // threads and update the headers of lightweight locked objects
394 // with biases to have the current epoch.
395
396 // If the prototype header doesn't have the bias pattern, don't
397 // try to update the epoch -- assume another VM operation came in
398 // and reset the header to the unbiased state, which will
399 // implicitly cause all existing biases to be revoked
400 if (klass->prototype_header()->has_bias_pattern()) {
401 int prev_epoch = klass->prototype_header()->bias_epoch();
402 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
403 int cur_epoch = klass->prototype_header()->bias_epoch();
404
405 // Now walk all threads' stacks and adjust epochs of any biased
406 // and locked objects of this data type we encounter
407 for (; JavaThread *thr = jtiwh.next(); ) {
408 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
409 for (int i = 0; i < cached_monitor_info->length(); i++) {
410 MonitorInfo* mon_info = cached_monitor_info->at(i);
411 oop owner = mon_info->owner();
412 markOop mark = owner->mark();
413 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
414 // We might have encountered this object already in the case of recursive locking
415 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
416 owner->set_mark(mark->set_bias_epoch(cur_epoch));
417 }
418 }
419 }
420 }
421
422 // At this point we're done. All we have to do is potentially
423 // adjust the header of the given object to revoke its bias.
424 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
425 } else {
426 if (log_is_enabled(Info, biasedlocking)) {
427 ResourceMark rm;
428 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
429 }
430
431 // Disable biased locking for this data type. Not only will this
432 // cause future instances to not be biased, but existing biased
433 // instances will notice that this implicitly caused their biases
434 // to be revoked.
435 klass->set_prototype_header(markOopDesc::prototype());
436
437 // Now walk all threads' stacks and forcibly revoke the biases of
438 // any locked and biased objects of this data type we encounter.
439 for (; JavaThread *thr = jtiwh.next(); ) {
440 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
441 for (int i = 0; i < cached_monitor_info->length(); i++) {
442 MonitorInfo* mon_info = cached_monitor_info->at(i);
443 oop owner = mon_info->owner();
444 markOop mark = owner->mark();
445 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
446 revoke_bias(owner, false, true, requesting_thread);
447 }
448 }
449 }
450
451 // Must force the bias of the passed object to be forcibly revoked
452 // as well to ensure guarantees to callers
453 revoke_bias(o, false, true, requesting_thread);
454 }
455 } // ThreadsListHandle is destroyed here.
456
457 log_info(biasedlocking)("* Ending bulk revocation");
458
459 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
461 if (attempt_rebias_of_object &&
462 o->mark()->has_bias_pattern() &&
463 klass->prototype_header()->has_bias_pattern()) {
464 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
465 klass->prototype_header()->bias_epoch());
466 o->set_mark(new_mark);
467 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
468 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
469 }
470
471 assert(!o->mark()->has_bias_pattern() ||
472 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
473 "bug in bulk bias revocation");
474
475 return status_code;
476 }
477
478
479 static void clean_up_cached_monitor_info() {
480 // Walk the thread list clearing out the cached monitors
481 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
482 thr->set_cached_monitor_info(NULL);
483 }
484 }
485
486
487 class VM_RevokeBias : public VM_Operation {
488 protected:
489 Handle* _obj;
490 GrowableArray<Handle>* _objs;
491 JavaThread* _requesting_thread;
492 BiasedLocking::Condition _status_code;
493
494 public:
495 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
496 : _obj(obj)
497 , _objs(NULL)
498 , _requesting_thread(requesting_thread)
499 , _status_code(BiasedLocking::NOT_BIASED) {}
500
501 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
716
717 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
718
719 assert(_preserved_oop_stack == NULL, "double initialization");
720 assert(_preserved_mark_stack == NULL, "double initialization");
721
722 // In order to reduce the number of mark words preserved during GC
723 // due to the presence of biased locking, we reinitialize most mark
724 // words to the class's prototype during GC -- even those which have
725 // a currently valid bias owner. One important situation where we
726 // must not clobber a bias is when a biased object is currently
727 // locked. To handle this case we iterate over the currently-locked
728 // monitors in a prepass and, if they are biased, preserve their
729 // mark words here. This should be a relatively small set of objects
730 // especially compared to the number of objects in the heap.
731 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
732 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
733
734 ResourceMark rm;
735 Thread* cur = Thread::current();
736 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
737 if (thread->has_last_Java_frame()) {
738 RegisterMap rm(thread);
739 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
740 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
741 if (monitors != NULL) {
742 int len = monitors->length();
743 // Walk monitors youngest to oldest
744 for (int i = len - 1; i >= 0; i--) {
745 MonitorInfo* mon_info = monitors->at(i);
746 if (mon_info->owner_is_scalar_replaced()) continue;
747 oop owner = mon_info->owner();
748 if (owner != NULL) {
749 markOop mark = owner->mark();
750 if (mark->has_bias_pattern()) {
751 _preserved_oop_stack->push(Handle(cur, owner));
752 _preserved_mark_stack->push(mark);
753 }
754 }
755 }
756 }
|