15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38
39 static bool _biased_locking_enabled = false;
40 BiasedLockingCounters BiasedLocking::_counters;
41
42 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
43 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
44
45 static void enable_biased_locking(InstanceKlass* k) {
46 k->set_prototype_header(markOopDesc::biased_locking_prototype());
47 }
48
49 class VM_EnableBiasedLocking: public VM_Operation {
50 private:
51 bool _is_cheap_allocated;
52 public:
53 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
54 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
194 // Object is anonymously biased. We can get here if, for
195 // example, we revoke the bias due to an identity hash code
196 // being computed for an object.
197 if (!allow_rebias) {
198 obj->set_mark(unbiased_prototype);
199 }
200 // Log at "info" level if not bulk, else "trace" level
201 if (!is_bulk) {
202 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
203 } else {
204 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
205 }
206 return BiasedLocking::BIAS_REVOKED;
207 }
208
209 // Handle case where the thread toward which the object was biased has exited
210 bool thread_is_alive = false;
211 if (requesting_thread == biased_thread) {
212 thread_is_alive = true;
213 } else {
214 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
215 if (cur_thread == biased_thread) {
216 thread_is_alive = true;
217 break;
218 }
219 }
220 }
221 if (!thread_is_alive) {
222 if (allow_rebias) {
223 obj->set_mark(biased_prototype);
224 } else {
225 obj->set_mark(unbiased_prototype);
226 }
227 // Log at "info" level if not bulk, else "trace" level
228 if (!is_bulk) {
229 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
230 PTR_FORMAT ")", p2i(biased_thread));
231 } else {
232 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 }
365 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
366 bool bulk_rebias,
367 bool attempt_rebias_of_object,
368 JavaThread* requesting_thread) {
369 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
370
371 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
372 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
373 (bulk_rebias ? "rebias" : "revoke"),
374 p2i((void *) o),
375 (intptr_t) o->mark(),
376 o->klass()->external_name());
377
378 jlong cur_time = os::javaTimeMillis();
379 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
380
381
382 Klass* k_o = o->klass();
383 Klass* klass = k_o;
384
385 if (bulk_rebias) {
386 // Use the epoch in the klass of the object to implicitly revoke
387 // all biases of objects of this data type and force them to be
388 // reacquired. However, we also need to walk the stacks of all
389 // threads and update the headers of lightweight locked objects
390 // with biases to have the current epoch.
391
392 // If the prototype header doesn't have the bias pattern, don't
393 // try to update the epoch -- assume another VM operation came in
394 // and reset the header to the unbiased state, which will
395 // implicitly cause all existing biases to be revoked
396 if (klass->prototype_header()->has_bias_pattern()) {
397 int prev_epoch = klass->prototype_header()->bias_epoch();
398 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
399 int cur_epoch = klass->prototype_header()->bias_epoch();
400
401 // Now walk all threads' stacks and adjust epochs of any biased
402 // and locked objects of this data type we encounter
403 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
404 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
405 for (int i = 0; i < cached_monitor_info->length(); i++) {
406 MonitorInfo* mon_info = cached_monitor_info->at(i);
407 oop owner = mon_info->owner();
408 markOop mark = owner->mark();
409 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
410 // We might have encountered this object already in the case of recursive locking
411 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
412 owner->set_mark(mark->set_bias_epoch(cur_epoch));
413 }
414 }
415 }
416 }
417
418 // At this point we're done. All we have to do is potentially
419 // adjust the header of the given object to revoke its bias.
420 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
421 } else {
422 if (log_is_enabled(Info, biasedlocking)) {
423 ResourceMark rm;
424 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
425 }
426
427 // Disable biased locking for this data type. Not only will this
428 // cause future instances to not be biased, but existing biased
429 // instances will notice that this implicitly caused their biases
430 // to be revoked.
431 klass->set_prototype_header(markOopDesc::prototype());
432
433 // Now walk all threads' stacks and forcibly revoke the biases of
434 // any locked and biased objects of this data type we encounter.
435 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
436 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
437 for (int i = 0; i < cached_monitor_info->length(); i++) {
438 MonitorInfo* mon_info = cached_monitor_info->at(i);
439 oop owner = mon_info->owner();
440 markOop mark = owner->mark();
441 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
442 revoke_bias(owner, false, true, requesting_thread);
443 }
444 }
445 }
446
447 // Must force the bias of the passed object to be forcibly revoked
448 // as well to ensure guarantees to callers
449 revoke_bias(o, false, true, requesting_thread);
450 }
451
452 log_info(biasedlocking)("* Ending bulk revocation");
453
454 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
455
456 if (attempt_rebias_of_object &&
457 o->mark()->has_bias_pattern() &&
458 klass->prototype_header()->has_bias_pattern()) {
459 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
460 klass->prototype_header()->bias_epoch());
461 o->set_mark(new_mark);
462 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
463 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
464 }
465
466 assert(!o->mark()->has_bias_pattern() ||
467 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
468 "bug in bulk bias revocation");
469
470 return status_code;
471 }
472
473
474 static void clean_up_cached_monitor_info() {
475 // Walk the thread list clearing out the cached monitors
476 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
477 thr->set_cached_monitor_info(NULL);
478 }
479 }
480
481
482 class VM_RevokeBias : public VM_Operation {
483 protected:
484 Handle* _obj;
485 GrowableArray<Handle>* _objs;
486 JavaThread* _requesting_thread;
487 BiasedLocking::Condition _status_code;
488
489 public:
490 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
491 : _obj(obj)
492 , _objs(NULL)
493 , _requesting_thread(requesting_thread)
494 , _status_code(BiasedLocking::NOT_BIASED) {}
495
496 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
711
712 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
713
714 assert(_preserved_oop_stack == NULL, "double initialization");
715 assert(_preserved_mark_stack == NULL, "double initialization");
716
717 // In order to reduce the number of mark words preserved during GC
718 // due to the presence of biased locking, we reinitialize most mark
719 // words to the class's prototype during GC -- even those which have
720 // a currently valid bias owner. One important situation where we
721 // must not clobber a bias is when a biased object is currently
722 // locked. To handle this case we iterate over the currently-locked
723 // monitors in a prepass and, if they are biased, preserve their
724 // mark words here. This should be a relatively small set of objects
725 // especially compared to the number of objects in the heap.
726 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
727 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
728
729 ResourceMark rm;
730 Thread* cur = Thread::current();
731 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
732 if (thread->has_last_Java_frame()) {
733 RegisterMap rm(thread);
734 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
735 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
736 if (monitors != NULL) {
737 int len = monitors->length();
738 // Walk monitors youngest to oldest
739 for (int i = len - 1; i >= 0; i--) {
740 MonitorInfo* mon_info = monitors->at(i);
741 if (mon_info->owner_is_scalar_replaced()) continue;
742 oop owner = mon_info->owner();
743 if (owner != NULL) {
744 markOop mark = owner->mark();
745 if (mark->has_bias_pattern()) {
746 _preserved_oop_stack->push(Handle(cur, owner));
747 _preserved_mark_stack->push(mark);
748 }
749 }
750 }
751 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadSMR.hpp"
36 #include "runtime/vframe.hpp"
37 #include "runtime/vmThread.hpp"
38 #include "runtime/vm_operations.hpp"
39
40 static bool _biased_locking_enabled = false;
41 BiasedLockingCounters BiasedLocking::_counters;
42
43 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
44 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
45
46 static void enable_biased_locking(InstanceKlass* k) {
47 k->set_prototype_header(markOopDesc::biased_locking_prototype());
48 }
49
50 class VM_EnableBiasedLocking: public VM_Operation {
51 private:
52 bool _is_cheap_allocated;
53 public:
54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
55 VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
195 // Object is anonymously biased. We can get here if, for
196 // example, we revoke the bias due to an identity hash code
197 // being computed for an object.
198 if (!allow_rebias) {
199 obj->set_mark(unbiased_prototype);
200 }
201 // Log at "info" level if not bulk, else "trace" level
202 if (!is_bulk) {
203 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
204 } else {
205 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
206 }
207 return BiasedLocking::BIAS_REVOKED;
208 }
209
210 // Handle case where the thread toward which the object was biased has exited
211 bool thread_is_alive = false;
212 if (requesting_thread == biased_thread) {
213 thread_is_alive = true;
214 } else {
215 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
216 if (cur_thread == biased_thread) {
217 thread_is_alive = true;
218 break;
219 }
220 }
221 }
222 if (!thread_is_alive) {
223 if (allow_rebias) {
224 obj->set_mark(biased_prototype);
225 } else {
226 obj->set_mark(unbiased_prototype);
227 }
228 // Log at "info" level if not bulk, else "trace" level
229 if (!is_bulk) {
230 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
231 PTR_FORMAT ")", p2i(biased_thread));
232 } else {
233 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
234 PTR_FORMAT ")", p2i(biased_thread));
235 }
366 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
367 bool bulk_rebias,
368 bool attempt_rebias_of_object,
369 JavaThread* requesting_thread) {
370 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
371
372 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
373 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
374 (bulk_rebias ? "rebias" : "revoke"),
375 p2i((void *) o),
376 (intptr_t) o->mark(),
377 o->klass()->external_name());
378
379 jlong cur_time = os::javaTimeMillis();
380 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
381
382
383 Klass* k_o = o->klass();
384 Klass* klass = k_o;
385
386 {
387 JavaThreadIteratorWithHandle jtiwh;
388
389 if (bulk_rebias) {
390 // Use the epoch in the klass of the object to implicitly revoke
391 // all biases of objects of this data type and force them to be
392 // reacquired. However, we also need to walk the stacks of all
393 // threads and update the headers of lightweight locked objects
394 // with biases to have the current epoch.
395
396 // If the prototype header doesn't have the bias pattern, don't
397 // try to update the epoch -- assume another VM operation came in
398 // and reset the header to the unbiased state, which will
399 // implicitly cause all existing biases to be revoked
400 if (klass->prototype_header()->has_bias_pattern()) {
401 int prev_epoch = klass->prototype_header()->bias_epoch();
402 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
403 int cur_epoch = klass->prototype_header()->bias_epoch();
404
405 // Now walk all threads' stacks and adjust epochs of any biased
406 // and locked objects of this data type we encounter
407 for (; JavaThread *thr = jtiwh.next(); ) {
408 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
409 for (int i = 0; i < cached_monitor_info->length(); i++) {
410 MonitorInfo* mon_info = cached_monitor_info->at(i);
411 oop owner = mon_info->owner();
412 markOop mark = owner->mark();
413 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
414 // We might have encountered this object already in the case of recursive locking
415 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
416 owner->set_mark(mark->set_bias_epoch(cur_epoch));
417 }
418 }
419 }
420 }
421
422 // At this point we're done. All we have to do is potentially
423 // adjust the header of the given object to revoke its bias.
424 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread);
425 } else {
426 if (log_is_enabled(Info, biasedlocking)) {
427 ResourceMark rm;
428 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
429 }
430
431 // Disable biased locking for this data type. Not only will this
432 // cause future instances to not be biased, but existing biased
433 // instances will notice that this implicitly caused their biases
434 // to be revoked.
435 klass->set_prototype_header(markOopDesc::prototype());
436
437 // Now walk all threads' stacks and forcibly revoke the biases of
438 // any locked and biased objects of this data type we encounter.
439 for (; JavaThread *thr = jtiwh.next(); ) {
440 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
441 for (int i = 0; i < cached_monitor_info->length(); i++) {
442 MonitorInfo* mon_info = cached_monitor_info->at(i);
443 oop owner = mon_info->owner();
444 markOop mark = owner->mark();
445 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
446 revoke_bias(owner, false, true, requesting_thread);
447 }
448 }
449 }
450
451 // Must force the bias of the passed object to be forcibly revoked
452 // as well to ensure guarantees to callers
453 revoke_bias(o, false, true, requesting_thread);
454 }
455 } // ThreadsListHandle is destroyed here.
456
457 log_info(biasedlocking)("* Ending bulk revocation");
458
459 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
460
461 if (attempt_rebias_of_object &&
462 o->mark()->has_bias_pattern() &&
463 klass->prototype_header()->has_bias_pattern()) {
464 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
465 klass->prototype_header()->bias_epoch());
466 o->set_mark(new_mark);
467 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
468 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
469 }
470
471 assert(!o->mark()->has_bias_pattern() ||
472 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
473 "bug in bulk bias revocation");
474
475 return status_code;
476 }
477
478
479 static void clean_up_cached_monitor_info() {
480 // Walk the thread list clearing out the cached monitors
481 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
482 thr->set_cached_monitor_info(NULL);
483 }
484 }
485
486
487 class VM_RevokeBias : public VM_Operation {
488 protected:
489 Handle* _obj;
490 GrowableArray<Handle>* _objs;
491 JavaThread* _requesting_thread;
492 BiasedLocking::Condition _status_code;
493
494 public:
495 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
496 : _obj(obj)
497 , _objs(NULL)
498 , _requesting_thread(requesting_thread)
499 , _status_code(BiasedLocking::NOT_BIASED) {}
500
501 VM_RevokeBias(GrowableArray<Handle>* objs, JavaThread* requesting_thread)
716
717 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
718
719 assert(_preserved_oop_stack == NULL, "double initialization");
720 assert(_preserved_mark_stack == NULL, "double initialization");
721
722 // In order to reduce the number of mark words preserved during GC
723 // due to the presence of biased locking, we reinitialize most mark
724 // words to the class's prototype during GC -- even those which have
725 // a currently valid bias owner. One important situation where we
726 // must not clobber a bias is when a biased object is currently
727 // locked. To handle this case we iterate over the currently-locked
728 // monitors in a prepass and, if they are biased, preserve their
729 // mark words here. This should be a relatively small set of objects
730 // especially compared to the number of objects in the heap.
731 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
732 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
733
734 ResourceMark rm;
735 Thread* cur = Thread::current();
736 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
737 if (thread->has_last_Java_frame()) {
738 RegisterMap rm(thread);
739 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
740 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
741 if (monitors != NULL) {
742 int len = monitors->length();
743 // Walk monitors youngest to oldest
744 for (int i = len - 1; i >= 0; i--) {
745 MonitorInfo* mon_info = monitors->at(i);
746 if (mon_info->owner_is_scalar_replaced()) continue;
747 oop owner = mon_info->owner();
748 if (owner != NULL) {
749 markOop mark = owner->mark();
750 if (mark->has_bias_pattern()) {
751 _preserved_oop_stack->push(Handle(cur, owner));
752 _preserved_mark_stack->push(mark);
753 }
754 }
755 }
756 }
|