15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38 #include "trace/tracing.hpp"
39
40 static bool _biased_locking_enabled = false;
41 BiasedLockingCounters BiasedLocking::_counters;
42
43 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
44 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
45
46 static void enable_biased_locking(InstanceKlass* k) {
47 k->set_prototype_header(markOopDesc::biased_locking_prototype());
48 }
49
50 class VM_EnableBiasedLocking: public VM_Operation {
51 private:
52 bool _is_cheap_allocated;
53 public:
54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
197 // Object is anonymously biased. We can get here if, for
198 // example, we revoke the bias due to an identity hash code
199 // being computed for an object.
200 if (!allow_rebias) {
201 obj->set_mark(unbiased_prototype);
202 }
203 // Log at "info" level if not bulk, else "trace" level
204 if (!is_bulk) {
205 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
206 } else {
207 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
208 }
209 return BiasedLocking::BIAS_REVOKED;
210 }
211
212 // Handle case where the thread toward which the object was biased has exited
213 bool thread_is_alive = false;
214 if (requesting_thread == biased_thread) {
215 thread_is_alive = true;
216 } else {
217 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
218 if (cur_thread == biased_thread) {
219 thread_is_alive = true;
220 break;
221 }
222 }
223 }
224 if (!thread_is_alive) {
225 if (allow_rebias) {
226 obj->set_mark(biased_prototype);
227 } else {
228 obj->set_mark(unbiased_prototype);
229 }
230 // Log at "info" level if not bulk, else "trace" level
231 if (!is_bulk) {
232 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 } else {
235 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
236 PTR_FORMAT ")", p2i(biased_thread));
237 }
373 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
374 bool bulk_rebias,
375 bool attempt_rebias_of_object,
376 JavaThread* requesting_thread) {
377 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
378
379 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
380 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
381 (bulk_rebias ? "rebias" : "revoke"),
382 p2i((void *) o),
383 (intptr_t) o->mark(),
384 o->klass()->external_name());
385
386 jlong cur_time = os::javaTimeMillis();
387 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
388
389
390 Klass* k_o = o->klass();
391 Klass* klass = k_o;
392
393 if (bulk_rebias) {
394 // Use the epoch in the klass of the object to implicitly revoke
395 // all biases of objects of this data type and force them to be
396 // reacquired. However, we also need to walk the stacks of all
397 // threads and update the headers of lightweight locked objects
398 // with biases to have the current epoch.
399
400 // If the prototype header doesn't have the bias pattern, don't
401 // try to update the epoch -- assume another VM operation came in
402 // and reset the header to the unbiased state, which will
403 // implicitly cause all existing biases to be revoked
404 if (klass->prototype_header()->has_bias_pattern()) {
405 int prev_epoch = klass->prototype_header()->bias_epoch();
406 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
407 int cur_epoch = klass->prototype_header()->bias_epoch();
408
409 // Now walk all threads' stacks and adjust epochs of any biased
410 // and locked objects of this data type we encounter
411 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
412 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
413 for (int i = 0; i < cached_monitor_info->length(); i++) {
414 MonitorInfo* mon_info = cached_monitor_info->at(i);
415 oop owner = mon_info->owner();
416 markOop mark = owner->mark();
417 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
418 // We might have encountered this object already in the case of recursive locking
419 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
420 owner->set_mark(mark->set_bias_epoch(cur_epoch));
421 }
422 }
423 }
424 }
425
426 // At this point we're done. All we have to do is potentially
427 // adjust the header of the given object to revoke its bias.
428 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
429 } else {
430 if (log_is_enabled(Info, biasedlocking)) {
431 ResourceMark rm;
432 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
433 }
434
435 // Disable biased locking for this data type. Not only will this
436 // cause future instances to not be biased, but existing biased
437 // instances will notice that this implicitly caused their biases
438 // to be revoked.
439 klass->set_prototype_header(markOopDesc::prototype());
440
441 // Now walk all threads' stacks and forcibly revoke the biases of
442 // any locked and biased objects of this data type we encounter.
443 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
444 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
445 for (int i = 0; i < cached_monitor_info->length(); i++) {
446 MonitorInfo* mon_info = cached_monitor_info->at(i);
447 oop owner = mon_info->owner();
448 markOop mark = owner->mark();
449 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
450 revoke_bias(owner, false, true, requesting_thread, NULL);
451 }
452 }
453 }
454
455 // Must force the bias of the passed object to be forcibly revoked
456 // as well to ensure guarantees to callers
457 revoke_bias(o, false, true, requesting_thread, NULL);
458 }
459
460 log_info(biasedlocking)("* Ending bulk revocation");
461
462 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
463
464 if (attempt_rebias_of_object &&
465 o->mark()->has_bias_pattern() &&
466 klass->prototype_header()->has_bias_pattern()) {
467 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
468 klass->prototype_header()->bias_epoch());
469 o->set_mark(new_mark);
470 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
471 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
472 }
473
474 assert(!o->mark()->has_bias_pattern() ||
475 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
476 "bug in bulk bias revocation");
477
478 return status_code;
479 }
480
481
482 static void clean_up_cached_monitor_info() {
483 // Walk the thread list clearing out the cached monitors
484 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
485 thr->set_cached_monitor_info(NULL);
486 }
487 }
488
489
490 class VM_RevokeBias : public VM_Operation {
491 protected:
492 Handle* _obj;
493 GrowableArray<Handle>* _objs;
494 JavaThread* _requesting_thread;
495 BiasedLocking::Condition _status_code;
496 traceid _biased_locker_id;
497
498 public:
499 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
500 : _obj(obj)
501 , _objs(NULL)
502 , _requesting_thread(requesting_thread)
503 , _status_code(BiasedLocking::NOT_BIASED)
504 , _biased_locker_id(0) {}
751
752 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
753
754 assert(_preserved_oop_stack == NULL, "double initialization");
755 assert(_preserved_mark_stack == NULL, "double initialization");
756
757 // In order to reduce the number of mark words preserved during GC
758 // due to the presence of biased locking, we reinitialize most mark
759 // words to the class's prototype during GC -- even those which have
760 // a currently valid bias owner. One important situation where we
761 // must not clobber a bias is when a biased object is currently
762 // locked. To handle this case we iterate over the currently-locked
763 // monitors in a prepass and, if they are biased, preserve their
764 // mark words here. This should be a relatively small set of objects
765 // especially compared to the number of objects in the heap.
766 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
767 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
768
769 ResourceMark rm;
770 Thread* cur = Thread::current();
771 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
772 if (thread->has_last_Java_frame()) {
773 RegisterMap rm(thread);
774 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
775 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
776 if (monitors != NULL) {
777 int len = monitors->length();
778 // Walk monitors youngest to oldest
779 for (int i = len - 1; i >= 0; i--) {
780 MonitorInfo* mon_info = monitors->at(i);
781 if (mon_info->owner_is_scalar_replaced()) continue;
782 oop owner = mon_info->owner();
783 if (owner != NULL) {
784 markOop mark = owner->mark();
785 if (mark->has_bias_pattern()) {
786 _preserved_oop_stack->push(Handle(cur, owner));
787 _preserved_mark_stack->push(mark);
788 }
789 }
790 }
791 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadSMR.hpp"
36 #include "runtime/vframe.hpp"
37 #include "runtime/vmThread.hpp"
38 #include "runtime/vm_operations.hpp"
39 #include "trace/tracing.hpp"
40
41 static bool _biased_locking_enabled = false;
42 BiasedLockingCounters BiasedLocking::_counters;
43
44 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
45 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
46
47 static void enable_biased_locking(InstanceKlass* k) {
48 k->set_prototype_header(markOopDesc::biased_locking_prototype());
49 }
50
51 class VM_EnableBiasedLocking: public VM_Operation {
52 private:
53 bool _is_cheap_allocated;
54 public:
55 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
198 // Object is anonymously biased. We can get here if, for
199 // example, we revoke the bias due to an identity hash code
200 // being computed for an object.
201 if (!allow_rebias) {
202 obj->set_mark(unbiased_prototype);
203 }
204 // Log at "info" level if not bulk, else "trace" level
205 if (!is_bulk) {
206 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
207 } else {
208 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
209 }
210 return BiasedLocking::BIAS_REVOKED;
211 }
212
213 // Handle case where the thread toward which the object was biased has exited
214 bool thread_is_alive = false;
215 if (requesting_thread == biased_thread) {
216 thread_is_alive = true;
217 } else {
218 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *cur_thread = jtiwh.next(); ) {
219 if (cur_thread == biased_thread) {
220 thread_is_alive = true;
221 break;
222 }
223 }
224 }
225 if (!thread_is_alive) {
226 if (allow_rebias) {
227 obj->set_mark(biased_prototype);
228 } else {
229 obj->set_mark(unbiased_prototype);
230 }
231 // Log at "info" level if not bulk, else "trace" level
232 if (!is_bulk) {
233 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
234 PTR_FORMAT ")", p2i(biased_thread));
235 } else {
236 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
237 PTR_FORMAT ")", p2i(biased_thread));
238 }
374 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
375 bool bulk_rebias,
376 bool attempt_rebias_of_object,
377 JavaThread* requesting_thread) {
378 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
379
380 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
381 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
382 (bulk_rebias ? "rebias" : "revoke"),
383 p2i((void *) o),
384 (intptr_t) o->mark(),
385 o->klass()->external_name());
386
387 jlong cur_time = os::javaTimeMillis();
388 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
389
390
391 Klass* k_o = o->klass();
392 Klass* klass = k_o;
393
394 {
395 JavaThreadIteratorWithHandle jtiwh;
396
397 if (bulk_rebias) {
398 // Use the epoch in the klass of the object to implicitly revoke
399 // all biases of objects of this data type and force them to be
400 // reacquired. However, we also need to walk the stacks of all
401 // threads and update the headers of lightweight locked objects
402 // with biases to have the current epoch.
403
404 // If the prototype header doesn't have the bias pattern, don't
405 // try to update the epoch -- assume another VM operation came in
406 // and reset the header to the unbiased state, which will
407 // implicitly cause all existing biases to be revoked
408 if (klass->prototype_header()->has_bias_pattern()) {
409 int prev_epoch = klass->prototype_header()->bias_epoch();
410 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
411 int cur_epoch = klass->prototype_header()->bias_epoch();
412
413 // Now walk all threads' stacks and adjust epochs of any biased
414 // and locked objects of this data type we encounter
415 for (; JavaThread *thr = jtiwh.next(); ) {
416 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
417 for (int i = 0; i < cached_monitor_info->length(); i++) {
418 MonitorInfo* mon_info = cached_monitor_info->at(i);
419 oop owner = mon_info->owner();
420 markOop mark = owner->mark();
421 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
422 // We might have encountered this object already in the case of recursive locking
423 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
424 owner->set_mark(mark->set_bias_epoch(cur_epoch));
425 }
426 }
427 }
428 }
429
430 // At this point we're done. All we have to do is potentially
431 // adjust the header of the given object to revoke its bias.
432 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
433 } else {
434 if (log_is_enabled(Info, biasedlocking)) {
435 ResourceMark rm;
436 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
437 }
438
439 // Disable biased locking for this data type. Not only will this
440 // cause future instances to not be biased, but existing biased
441 // instances will notice that this implicitly caused their biases
442 // to be revoked.
443 klass->set_prototype_header(markOopDesc::prototype());
444
445 // Now walk all threads' stacks and forcibly revoke the biases of
446 // any locked and biased objects of this data type we encounter.
447 for (; JavaThread *thr = jtiwh.next(); ) {
448 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
449 for (int i = 0; i < cached_monitor_info->length(); i++) {
450 MonitorInfo* mon_info = cached_monitor_info->at(i);
451 oop owner = mon_info->owner();
452 markOop mark = owner->mark();
453 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
454 revoke_bias(owner, false, true, requesting_thread, NULL);
455 }
456 }
457 }
458
459 // Must force the bias of the passed object to be forcibly revoked
460 // as well to ensure guarantees to callers
461 revoke_bias(o, false, true, requesting_thread, NULL);
462 }
463 } // ThreadsListHandle is destroyed here.
464
465 log_info(biasedlocking)("* Ending bulk revocation");
466
467 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
468
469 if (attempt_rebias_of_object &&
470 o->mark()->has_bias_pattern() &&
471 klass->prototype_header()->has_bias_pattern()) {
472 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
473 klass->prototype_header()->bias_epoch());
474 o->set_mark(new_mark);
475 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
476 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
477 }
478
479 assert(!o->mark()->has_bias_pattern() ||
480 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
481 "bug in bulk bias revocation");
482
483 return status_code;
484 }
485
486
487 static void clean_up_cached_monitor_info() {
488 // Walk the thread list clearing out the cached monitors
489 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
490 thr->set_cached_monitor_info(NULL);
491 }
492 }
493
494
495 class VM_RevokeBias : public VM_Operation {
496 protected:
497 Handle* _obj;
498 GrowableArray<Handle>* _objs;
499 JavaThread* _requesting_thread;
500 BiasedLocking::Condition _status_code;
501 traceid _biased_locker_id;
502
503 public:
504 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
505 : _obj(obj)
506 , _objs(NULL)
507 , _requesting_thread(requesting_thread)
508 , _status_code(BiasedLocking::NOT_BIASED)
509 , _biased_locker_id(0) {}
756
757 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
758
759 assert(_preserved_oop_stack == NULL, "double initialization");
760 assert(_preserved_mark_stack == NULL, "double initialization");
761
762 // In order to reduce the number of mark words preserved during GC
763 // due to the presence of biased locking, we reinitialize most mark
764 // words to the class's prototype during GC -- even those which have
765 // a currently valid bias owner. One important situation where we
766 // must not clobber a bias is when a biased object is currently
767 // locked. To handle this case we iterate over the currently-locked
768 // monitors in a prepass and, if they are biased, preserve their
769 // mark words here. This should be a relatively small set of objects
770 // especially compared to the number of objects in the heap.
771 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
772 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
773
774 ResourceMark rm;
775 Thread* cur = Thread::current();
776 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
777 if (thread->has_last_Java_frame()) {
778 RegisterMap rm(thread);
779 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
780 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
781 if (monitors != NULL) {
782 int len = monitors->length();
783 // Walk monitors youngest to oldest
784 for (int i = len - 1; i >= 0; i--) {
785 MonitorInfo* mon_info = monitors->at(i);
786 if (mon_info->owner_is_scalar_replaced()) continue;
787 oop owner = mon_info->owner();
788 if (owner != NULL) {
789 markOop mark = owner->mark();
790 if (mark->has_bias_pattern()) {
791 _preserved_oop_stack->push(Handle(cur, owner));
792 _preserved_mark_stack->push(mark);
793 }
794 }
795 }
796 }
|