129 int len = monitors->length();
130 // Walk monitors youngest to oldest
131 for (int i = len - 1; i >= 0; i--) {
132 MonitorInfo* mon_info = monitors->at(i);
133 if (mon_info->eliminated()) continue;
134 oop owner = mon_info->owner();
135 if (owner != NULL) {
136 info->append(mon_info);
137 }
138 }
139 }
140 }
141 }
142
143 thread->set_cached_monitor_info(info);
144 return info;
145 }
146
147
148 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
149 markOop mark = obj->mark();
150 if (!mark->has_bias_pattern()) {
151 if (TraceBiasedLocking) {
152 ResourceMark rm;
153 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
154 obj->klass()->external_name());
155 }
156 return BiasedLocking::NOT_BIASED;
157 }
158
159 uint age = mark->age();
160 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
161 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
162
163 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
164 ResourceMark rm;
165 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
166 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
167 }
168
308 revocation_count = k->atomic_incr_biased_lock_revocation_count();
309 }
310
311 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
312 return HR_BULK_REVOKE;
313 }
314
315 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
316 return HR_BULK_REBIAS;
317 }
318
319 return HR_SINGLE_REVOKE;
320 }
321
322
323 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
324 bool bulk_rebias,
325 bool attempt_rebias_of_object,
326 JavaThread* requesting_thread) {
327 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
328
329 if (TraceBiasedLocking) {
330 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
331 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
332 (bulk_rebias ? "rebias" : "revoke"),
333 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
334 }
335
336 jlong cur_time = os::javaTimeMillis();
337 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
338
339
340 Klass* k_o = o->klass();
341 Klass* klass = k_o;
342
343 if (bulk_rebias) {
344 // Use the epoch in the klass of the object to implicitly revoke
345 // all biases of objects of this data type and force them to be
346 // reacquired. However, we also need to walk the stacks of all
347 // threads and update the headers of lightweight locked objects
348 // with biases to have the current epoch.
350 // If the prototype header doesn't have the bias pattern, don't
351 // try to update the epoch -- assume another VM operation came in
352 // and reset the header to the unbiased state, which will
353 // implicitly cause all existing biases to be revoked
354 if (klass->prototype_header()->has_bias_pattern()) {
355 int prev_epoch = klass->prototype_header()->bias_epoch();
356 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
357 int cur_epoch = klass->prototype_header()->bias_epoch();
358
359 // Now walk all threads' stacks and adjust epochs of any biased
360 // and locked objects of this data type we encounter
361 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
362 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
363 for (int i = 0; i < cached_monitor_info->length(); i++) {
364 MonitorInfo* mon_info = cached_monitor_info->at(i);
365 oop owner = mon_info->owner();
366 markOop mark = owner->mark();
367 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
368 // We might have encountered this object already in the case of recursive locking
369 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
370 owner->set_mark(mark->set_bias_epoch(cur_epoch));
371 }
372 }
373 }
374 }
375
376 // At this point we're done. All we have to do is potentially
377 // adjust the header of the given object to revoke its bias.
378 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
379 } else {
380 if (TraceBiasedLocking) {
381 ResourceMark rm;
382 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
383 }
384
385 // Disable biased locking for this data type. Not only will this
386 // cause future instances to not be biased, but existing biased
387 // instances will notice that this implicitly caused their biases
388 // to be revoked.
389 klass->set_prototype_header(markOopDesc::prototype());
514 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
515 bool bulk_rebias,
516 bool attempt_rebias_of_object)
517 : VM_RevokeBias(obj, requesting_thread)
518 , _bulk_rebias(bulk_rebias)
519 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
520
521 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
522 virtual bool doit_prologue() { return true; }
523
524 virtual void doit() {
525 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
526 clean_up_cached_monitor_info();
527 }
528 };
529
530
531 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
532 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
533
534 // We can revoke the biases of anonymously-biased objects
535 // efficiently enough that we should not cause these revocations to
536 // update the heuristics because doing so may cause unwanted bulk
537 // revocations (which are expensive) to occur.
538 markOop mark = obj->mark();
539 if (mark->is_biased_anonymously() && !attempt_rebias) {
540 // We are probably trying to revoke the bias of this object due to
541 // an identity hash code computation. Try to revoke the bias
542 // without a safepoint. This is possible if we can successfully
543 // compare-and-exchange an unbiased header into the mark word of
544 // the object, meaning that no other thread has raced to acquire
545 // the bias of the object.
546 markOop biased_value = mark;
547 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
548 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
549 if (res_mark == biased_value) {
550 return BIAS_REVOKED;
551 }
552 } else if (mark->has_bias_pattern()) {
553 Klass* k = obj->klass();
628 (heuristics == HR_BULK_REBIAS),
629 attempt_rebias);
630 VMThread::execute(&bulk_revoke);
631 return bulk_revoke.status_code();
632 }
633
634
635 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
636 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
637 if (objs->length() == 0) {
638 return;
639 }
640 VM_RevokeBias revoke(objs, JavaThread::current());
641 VMThread::execute(&revoke);
642 }
643
644
645 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
646 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
647 oop obj = h_obj();
648 HeuristicsResult heuristics = update_heuristics(obj, false);
649 if (heuristics == HR_SINGLE_REVOKE) {
650 revoke_bias(obj, false, false, NULL);
651 } else if ((heuristics == HR_BULK_REBIAS) ||
652 (heuristics == HR_BULK_REVOKE)) {
653 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
654 }
655 clean_up_cached_monitor_info();
656 }
657
658
659 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
660 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
661 int len = objs->length();
662 for (int i = 0; i < len; i++) {
663 oop obj = (objs->at(i))();
664 HeuristicsResult heuristics = update_heuristics(obj, false);
665 if (heuristics == HR_SINGLE_REVOKE) {
666 revoke_bias(obj, false, false, NULL);
667 } else if ((heuristics == HR_BULK_REBIAS) ||
691 // monitors in a prepass and, if they are biased, preserve their
692 // mark words here. This should be a relatively small set of objects
693 // especially compared to the number of objects in the heap.
694 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
695 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
696
697 ResourceMark rm;
698 Thread* cur = Thread::current();
699 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
700 if (thread->has_last_Java_frame()) {
701 RegisterMap rm(thread);
702 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
703 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
704 if (monitors != NULL) {
705 int len = monitors->length();
706 // Walk monitors youngest to oldest
707 for (int i = len - 1; i >= 0; i--) {
708 MonitorInfo* mon_info = monitors->at(i);
709 if (mon_info->owner_is_scalar_replaced()) continue;
710 oop owner = mon_info->owner();
711 if (owner != NULL) {
712 markOop mark = owner->mark();
713 if (mark->has_bias_pattern()) {
714 _preserved_oop_stack->push(Handle(cur, owner));
715 _preserved_mark_stack->push(mark);
716 }
717 }
718 }
719 }
720 }
721 }
722 }
723 }
724
725
726 void BiasedLocking::restore_marks() {
727 if (!UseBiasedLocking)
728 return;
729
730 assert(_preserved_oop_stack != NULL, "double free");
|
129 int len = monitors->length();
130 // Walk monitors youngest to oldest
131 for (int i = len - 1; i >= 0; i--) {
132 MonitorInfo* mon_info = monitors->at(i);
133 if (mon_info->eliminated()) continue;
134 oop owner = mon_info->owner();
135 if (owner != NULL) {
136 info->append(mon_info);
137 }
138 }
139 }
140 }
141 }
142
143 thread->set_cached_monitor_info(info);
144 return info;
145 }
146
147
148 static BiasedLocking::Condition revoke_bias(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread) {
149 assert(obj == oopDesc::bs()->resolve_oop(obj), "expect to-space copy");
150 markOop mark = obj->mark();
151 if (!mark->has_bias_pattern()) {
152 if (TraceBiasedLocking) {
153 ResourceMark rm;
154 tty->print_cr(" (Skipping revocation of object of type %s because it's no longer biased)",
155 obj->klass()->external_name());
156 }
157 return BiasedLocking::NOT_BIASED;
158 }
159
160 uint age = mark->age();
161 markOop biased_prototype = markOopDesc::biased_locking_prototype()->set_age(age);
162 markOop unbiased_prototype = markOopDesc::prototype()->set_age(age);
163
164 if (TraceBiasedLocking && (Verbose || !is_bulk)) {
165 ResourceMark rm;
166 tty->print_cr("Revoking bias of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT " , allow rebias %d , requesting thread " INTPTR_FORMAT,
167 p2i((void *)obj), (intptr_t) mark, obj->klass()->external_name(), (intptr_t) obj->klass()->prototype_header(), (allow_rebias ? 1 : 0), (intptr_t) requesting_thread);
168 }
169
309 revocation_count = k->atomic_incr_biased_lock_revocation_count();
310 }
311
312 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
313 return HR_BULK_REVOKE;
314 }
315
316 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
317 return HR_BULK_REBIAS;
318 }
319
320 return HR_SINGLE_REVOKE;
321 }
322
323
324 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
325 bool bulk_rebias,
326 bool attempt_rebias_of_object,
327 JavaThread* requesting_thread) {
328 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
329 assert(o == oopDesc::bs()->resolve_oop(o), "expect to-space copy");
330 if (TraceBiasedLocking) {
331 tty->print_cr("* Beginning bulk revocation (kind == %s) because of object "
332 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
333 (bulk_rebias ? "rebias" : "revoke"),
334 p2i((void *) o), (intptr_t) o->mark(), o->klass()->external_name());
335 }
336
337 jlong cur_time = os::javaTimeMillis();
338 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
339
340
341 Klass* k_o = o->klass();
342 Klass* klass = k_o;
343
344 if (bulk_rebias) {
345 // Use the epoch in the klass of the object to implicitly revoke
346 // all biases of objects of this data type and force them to be
347 // reacquired. However, we also need to walk the stacks of all
348 // threads and update the headers of lightweight locked objects
349 // with biases to have the current epoch.
351 // If the prototype header doesn't have the bias pattern, don't
352 // try to update the epoch -- assume another VM operation came in
353 // and reset the header to the unbiased state, which will
354 // implicitly cause all existing biases to be revoked
355 if (klass->prototype_header()->has_bias_pattern()) {
356 int prev_epoch = klass->prototype_header()->bias_epoch();
357 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
358 int cur_epoch = klass->prototype_header()->bias_epoch();
359
360 // Now walk all threads' stacks and adjust epochs of any biased
361 // and locked objects of this data type we encounter
362 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
363 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
364 for (int i = 0; i < cached_monitor_info->length(); i++) {
365 MonitorInfo* mon_info = cached_monitor_info->at(i);
366 oop owner = mon_info->owner();
367 markOop mark = owner->mark();
368 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
369 // We might have encountered this object already in the case of recursive locking
370 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
371 assert(owner == oopDesc::bs()->resolve_oop(owner), "expect to-space copy");
372 owner->set_mark(mark->set_bias_epoch(cur_epoch));
373 }
374 }
375 }
376 }
377
378 // At this point we're done. All we have to do is potentially
379 // adjust the header of the given object to revoke its bias.
380 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
381 } else {
382 if (TraceBiasedLocking) {
383 ResourceMark rm;
384 tty->print_cr("* Disabling biased locking for type %s", klass->external_name());
385 }
386
387 // Disable biased locking for this data type. Not only will this
388 // cause future instances to not be biased, but existing biased
389 // instances will notice that this implicitly caused their biases
390 // to be revoked.
391 klass->set_prototype_header(markOopDesc::prototype());
516 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
517 bool bulk_rebias,
518 bool attempt_rebias_of_object)
519 : VM_RevokeBias(obj, requesting_thread)
520 , _bulk_rebias(bulk_rebias)
521 , _attempt_rebias_of_object(attempt_rebias_of_object) {}
522
523 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
524 virtual bool doit_prologue() { return true; }
525
526 virtual void doit() {
527 _status_code = bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
528 clean_up_cached_monitor_info();
529 }
530 };
531
532
533 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
534 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
535
536 assert(obj() == oopDesc::bs()->resolve_oop(obj()), "must be to-space copy");
537
538 // We can revoke the biases of anonymously-biased objects
539 // efficiently enough that we should not cause these revocations to
540 // update the heuristics because doing so may cause unwanted bulk
541 // revocations (which are expensive) to occur.
542 markOop mark = obj->mark();
543 if (mark->is_biased_anonymously() && !attempt_rebias) {
544 // We are probably trying to revoke the bias of this object due to
545 // an identity hash code computation. Try to revoke the bias
546 // without a safepoint. This is possible if we can successfully
547 // compare-and-exchange an unbiased header into the mark word of
548 // the object, meaning that no other thread has raced to acquire
549 // the bias of the object.
550 markOop biased_value = mark;
551 markOop unbiased_prototype = markOopDesc::prototype()->set_age(mark->age());
552 markOop res_mark = (markOop) Atomic::cmpxchg_ptr(unbiased_prototype, obj->mark_addr(), mark);
553 if (res_mark == biased_value) {
554 return BIAS_REVOKED;
555 }
556 } else if (mark->has_bias_pattern()) {
557 Klass* k = obj->klass();
632 (heuristics == HR_BULK_REBIAS),
633 attempt_rebias);
634 VMThread::execute(&bulk_revoke);
635 return bulk_revoke.status_code();
636 }
637
638
639 void BiasedLocking::revoke(GrowableArray<Handle>* objs) {
640 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
641 if (objs->length() == 0) {
642 return;
643 }
644 VM_RevokeBias revoke(objs, JavaThread::current());
645 VMThread::execute(&revoke);
646 }
647
648
649 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
650 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
651 oop obj = h_obj();
652 assert(obj == oopDesc::bs()->resolve_oop(obj), "expect to-space copy");
653 HeuristicsResult heuristics = update_heuristics(obj, false);
654 if (heuristics == HR_SINGLE_REVOKE) {
655 revoke_bias(obj, false, false, NULL);
656 } else if ((heuristics == HR_BULK_REBIAS) ||
657 (heuristics == HR_BULK_REVOKE)) {
658 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
659 }
660 clean_up_cached_monitor_info();
661 }
662
663
664 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
665 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
666 int len = objs->length();
667 for (int i = 0; i < len; i++) {
668 oop obj = (objs->at(i))();
669 HeuristicsResult heuristics = update_heuristics(obj, false);
670 if (heuristics == HR_SINGLE_REVOKE) {
671 revoke_bias(obj, false, false, NULL);
672 } else if ((heuristics == HR_BULK_REBIAS) ||
696 // monitors in a prepass and, if they are biased, preserve their
697 // mark words here. This should be a relatively small set of objects
698 // especially compared to the number of objects in the heap.
699 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
700 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
701
702 ResourceMark rm;
703 Thread* cur = Thread::current();
704 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
705 if (thread->has_last_Java_frame()) {
706 RegisterMap rm(thread);
707 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
708 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
709 if (monitors != NULL) {
710 int len = monitors->length();
711 // Walk monitors youngest to oldest
712 for (int i = len - 1; i >= 0; i--) {
713 MonitorInfo* mon_info = monitors->at(i);
714 if (mon_info->owner_is_scalar_replaced()) continue;
715 oop owner = mon_info->owner();
716 assert(owner == oopDesc::bs()->resolve_oop(owner), "expect to-space copy");
717 if (owner != NULL) {
718 markOop mark = owner->mark();
719 if (mark->has_bias_pattern()) {
720 _preserved_oop_stack->push(Handle(cur, owner));
721 _preserved_mark_stack->push(mark);
722 }
723 }
724 }
725 }
726 }
727 }
728 }
729 }
730
731
732 void BiasedLocking::restore_marks() {
733 if (!UseBiasedLocking)
734 return;
735
736 assert(_preserved_oop_stack != NULL, "double free");
|