15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/vframe.hpp"
36 #include "runtime/vmThread.hpp"
37 #include "runtime/vm_operations.hpp"
38 #include "trace/tracing.hpp"
39
40 static bool _biased_locking_enabled = false;
41 BiasedLockingCounters BiasedLocking::_counters;
42
43 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
44 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
45
46 static void enable_biased_locking(InstanceKlass* k) {
47 k->set_prototype_header(markOopDesc::biased_locking_prototype());
48 }
49
50 class VM_EnableBiasedLocking: public VM_Operation {
51 private:
52 bool _is_cheap_allocated;
53 public:
54 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
197 // Object is anonymously biased. We can get here if, for
198 // example, we revoke the bias due to an identity hash code
199 // being computed for an object.
200 if (!allow_rebias) {
201 obj->set_mark(unbiased_prototype);
202 }
203 // Log at "info" level if not bulk, else "trace" level
204 if (!is_bulk) {
205 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
206 } else {
207 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
208 }
209 return BiasedLocking::BIAS_REVOKED;
210 }
211
212 // Handle case where the thread toward which the object was biased has exited
213 bool thread_is_alive = false;
214 if (requesting_thread == biased_thread) {
215 thread_is_alive = true;
216 } else {
217 for (JavaThread* cur_thread = Threads::first(); cur_thread != NULL; cur_thread = cur_thread->next()) {
218 if (cur_thread == biased_thread) {
219 thread_is_alive = true;
220 break;
221 }
222 }
223 }
224 if (!thread_is_alive) {
225 if (allow_rebias) {
226 obj->set_mark(biased_prototype);
227 } else {
228 obj->set_mark(unbiased_prototype);
229 }
230 // Log at "info" level if not bulk, else "trace" level
231 if (!is_bulk) {
232 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 } else {
235 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
236 PTR_FORMAT ")", p2i(biased_thread));
237 }
238 return BiasedLocking::BIAS_REVOKED;
239 }
240
241 // Log at "info" level if not bulk, else "trace" level
242 if (!is_bulk) {
243 log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
373 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
374 bool bulk_rebias,
375 bool attempt_rebias_of_object,
376 JavaThread* requesting_thread) {
377 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
378
379 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
380 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
381 (bulk_rebias ? "rebias" : "revoke"),
382 p2i((void *) o),
383 (intptr_t) o->mark(),
384 o->klass()->external_name());
385
386 jlong cur_time = os::javaTimeMillis();
387 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
388
389
390 Klass* k_o = o->klass();
391 Klass* klass = k_o;
392
393 if (bulk_rebias) {
394 // Use the epoch in the klass of the object to implicitly revoke
395 // all biases of objects of this data type and force them to be
396 // reacquired. However, we also need to walk the stacks of all
397 // threads and update the headers of lightweight locked objects
398 // with biases to have the current epoch.
399
400 // If the prototype header doesn't have the bias pattern, don't
401 // try to update the epoch -- assume another VM operation came in
402 // and reset the header to the unbiased state, which will
403 // implicitly cause all existing biases to be revoked
404 if (klass->prototype_header()->has_bias_pattern()) {
405 int prev_epoch = klass->prototype_header()->bias_epoch();
406 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
407 int cur_epoch = klass->prototype_header()->bias_epoch();
408
409 // Now walk all threads' stacks and adjust epochs of any biased
410 // and locked objects of this data type we encounter
411 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
412 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
413 for (int i = 0; i < cached_monitor_info->length(); i++) {
414 MonitorInfo* mon_info = cached_monitor_info->at(i);
415 oop owner = mon_info->owner();
416 markOop mark = owner->mark();
417 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
418 // We might have encountered this object already in the case of recursive locking
419 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
420 owner->set_mark(mark->set_bias_epoch(cur_epoch));
421 }
422 }
423 }
424 }
425
426 // At this point we're done. All we have to do is potentially
427 // adjust the header of the given object to revoke its bias.
428 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
429 } else {
430 if (log_is_enabled(Info, biasedlocking)) {
431 ResourceMark rm;
432 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
433 }
434
435 // Disable biased locking for this data type. Not only will this
436 // cause future instances to not be biased, but existing biased
437 // instances will notice that this implicitly caused their biases
438 // to be revoked.
439 klass->set_prototype_header(markOopDesc::prototype());
440
441 // Now walk all threads' stacks and forcibly revoke the biases of
442 // any locked and biased objects of this data type we encounter.
443 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
444 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
445 for (int i = 0; i < cached_monitor_info->length(); i++) {
446 MonitorInfo* mon_info = cached_monitor_info->at(i);
447 oop owner = mon_info->owner();
448 markOop mark = owner->mark();
449 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
450 revoke_bias(owner, false, true, requesting_thread, NULL);
451 }
452 }
453 }
454
455 // Must force the bias of the passed object to be forcibly revoked
456 // as well to ensure guarantees to callers
457 revoke_bias(o, false, true, requesting_thread, NULL);
458 }
459
460 log_info(biasedlocking)("* Ending bulk revocation");
461
462 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
463
464 if (attempt_rebias_of_object &&
465 o->mark()->has_bias_pattern() &&
466 klass->prototype_header()->has_bias_pattern()) {
467 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
468 klass->prototype_header()->bias_epoch());
469 o->set_mark(new_mark);
470 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
471 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
472 }
473
474 assert(!o->mark()->has_bias_pattern() ||
475 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
476 "bug in bulk bias revocation");
477
478 return status_code;
479 }
480
481
482 static void clean_up_cached_monitor_info() {
483 // Walk the thread list clearing out the cached monitors
484 for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) {
485 thr->set_cached_monitor_info(NULL);
486 }
487 }
488
489
490 class VM_RevokeBias : public VM_Operation {
491 protected:
492 Handle* _obj;
493 GrowableArray<Handle>* _objs;
494 JavaThread* _requesting_thread;
495 BiasedLocking::Condition _status_code;
496 traceid _biased_locker_id;
497
498 public:
499 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
500 : _obj(obj)
501 , _objs(NULL)
502 , _requesting_thread(requesting_thread)
503 , _status_code(BiasedLocking::NOT_BIASED)
504 , _biased_locker_id(0) {}
751
752 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
753
754 assert(_preserved_oop_stack == NULL, "double initialization");
755 assert(_preserved_mark_stack == NULL, "double initialization");
756
757 // In order to reduce the number of mark words preserved during GC
758 // due to the presence of biased locking, we reinitialize most mark
759 // words to the class's prototype during GC -- even those which have
760 // a currently valid bias owner. One important situation where we
761 // must not clobber a bias is when a biased object is currently
762 // locked. To handle this case we iterate over the currently-locked
763 // monitors in a prepass and, if they are biased, preserve their
764 // mark words here. This should be a relatively small set of objects
765 // especially compared to the number of objects in the heap.
766 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
767 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
768
769 ResourceMark rm;
770 Thread* cur = Thread::current();
771 for (JavaThread* thread = Threads::first(); thread != NULL; thread = thread->next()) {
772 if (thread->has_last_Java_frame()) {
773 RegisterMap rm(thread);
774 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
775 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
776 if (monitors != NULL) {
777 int len = monitors->length();
778 // Walk monitors youngest to oldest
779 for (int i = len - 1; i >= 0; i--) {
780 MonitorInfo* mon_info = monitors->at(i);
781 if (mon_info->owner_is_scalar_replaced()) continue;
782 oop owner = mon_info->owner();
783 if (owner != NULL) {
784 markOop mark = owner->mark();
785 if (mark->has_bias_pattern()) {
786 _preserved_oop_stack->push(Handle(cur, owner));
787 _preserved_mark_stack->push(mark);
788 }
789 }
790 }
791 }
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "logging/log.hpp"
27 #include "memory/resourceArea.hpp"
28 #include "oops/klass.inline.hpp"
29 #include "oops/markOop.hpp"
30 #include "oops/oop.inline.hpp"
31 #include "runtime/atomic.hpp"
32 #include "runtime/basicLock.hpp"
33 #include "runtime/biasedLocking.hpp"
34 #include "runtime/task.hpp"
35 #include "runtime/threadSMR.hpp"
36 #include "runtime/vframe.hpp"
37 #include "runtime/vmThread.hpp"
38 #include "runtime/vm_operations.hpp"
39 #include "trace/tracing.hpp"
40
41 static bool _biased_locking_enabled = false;
42 BiasedLockingCounters BiasedLocking::_counters;
43
44 static GrowableArray<Handle>* _preserved_oop_stack = NULL;
45 static GrowableArray<markOop>* _preserved_mark_stack = NULL;
46
47 static void enable_biased_locking(InstanceKlass* k) {
48 k->set_prototype_header(markOopDesc::biased_locking_prototype());
49 }
50
51 class VM_EnableBiasedLocking: public VM_Operation {
52 private:
53 bool _is_cheap_allocated;
54 public:
55 VM_EnableBiasedLocking(bool is_cheap_allocated) { _is_cheap_allocated = is_cheap_allocated; }
198 // Object is anonymously biased. We can get here if, for
199 // example, we revoke the bias due to an identity hash code
200 // being computed for an object.
201 if (!allow_rebias) {
202 obj->set_mark(unbiased_prototype);
203 }
204 // Log at "info" level if not bulk, else "trace" level
205 if (!is_bulk) {
206 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
207 } else {
208 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
209 }
210 return BiasedLocking::BIAS_REVOKED;
211 }
212
213 // Handle case where the thread toward which the object was biased has exited
214 bool thread_is_alive = false;
215 if (requesting_thread == biased_thread) {
216 thread_is_alive = true;
217 } else {
218 ThreadsListHandle tlh;
219 thread_is_alive = tlh.includes(biased_thread);
220 }
221 if (!thread_is_alive) {
222 if (allow_rebias) {
223 obj->set_mark(biased_prototype);
224 } else {
225 obj->set_mark(unbiased_prototype);
226 }
227 // Log at "info" level if not bulk, else "trace" level
228 if (!is_bulk) {
229 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
230 PTR_FORMAT ")", p2i(biased_thread));
231 } else {
232 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
233 PTR_FORMAT ")", p2i(biased_thread));
234 }
235 return BiasedLocking::BIAS_REVOKED;
236 }
237
238 // Log at "info" level if not bulk, else "trace" level
239 if (!is_bulk) {
240 log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
370 static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o,
371 bool bulk_rebias,
372 bool attempt_rebias_of_object,
373 JavaThread* requesting_thread) {
374 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
375
376 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
377 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
378 (bulk_rebias ? "rebias" : "revoke"),
379 p2i((void *) o),
380 (intptr_t) o->mark(),
381 o->klass()->external_name());
382
383 jlong cur_time = os::javaTimeMillis();
384 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
385
386
387 Klass* k_o = o->klass();
388 Klass* klass = k_o;
389
390 {
391 JavaThreadIteratorWithHandle jtiwh;
392
393 if (bulk_rebias) {
394 // Use the epoch in the klass of the object to implicitly revoke
395 // all biases of objects of this data type and force them to be
396 // reacquired. However, we also need to walk the stacks of all
397 // threads and update the headers of lightweight locked objects
398 // with biases to have the current epoch.
399
400 // If the prototype header doesn't have the bias pattern, don't
401 // try to update the epoch -- assume another VM operation came in
402 // and reset the header to the unbiased state, which will
403 // implicitly cause all existing biases to be revoked
404 if (klass->prototype_header()->has_bias_pattern()) {
405 int prev_epoch = klass->prototype_header()->bias_epoch();
406 klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch());
407 int cur_epoch = klass->prototype_header()->bias_epoch();
408
409 // Now walk all threads' stacks and adjust epochs of any biased
410 // and locked objects of this data type we encounter
411 for (; JavaThread *thr = jtiwh.next(); ) {
412 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
413 for (int i = 0; i < cached_monitor_info->length(); i++) {
414 MonitorInfo* mon_info = cached_monitor_info->at(i);
415 oop owner = mon_info->owner();
416 markOop mark = owner->mark();
417 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
418 // We might have encountered this object already in the case of recursive locking
419 assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment");
420 owner->set_mark(mark->set_bias_epoch(cur_epoch));
421 }
422 }
423 }
424 }
425
426 // At this point we're done. All we have to do is potentially
427 // adjust the header of the given object to revoke its bias.
428 revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread, NULL);
429 } else {
430 if (log_is_enabled(Info, biasedlocking)) {
431 ResourceMark rm;
432 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
433 }
434
435 // Disable biased locking for this data type. Not only will this
436 // cause future instances to not be biased, but existing biased
437 // instances will notice that this implicitly caused their biases
438 // to be revoked.
439 klass->set_prototype_header(markOopDesc::prototype());
440
441 // Now walk all threads' stacks and forcibly revoke the biases of
442 // any locked and biased objects of this data type we encounter.
443 for (; JavaThread *thr = jtiwh.next(); ) {
444 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
445 for (int i = 0; i < cached_monitor_info->length(); i++) {
446 MonitorInfo* mon_info = cached_monitor_info->at(i);
447 oop owner = mon_info->owner();
448 markOop mark = owner->mark();
449 if ((owner->klass() == k_o) && mark->has_bias_pattern()) {
450 revoke_bias(owner, false, true, requesting_thread, NULL);
451 }
452 }
453 }
454
455 // Must force the bias of the passed object to be forcibly revoked
456 // as well to ensure guarantees to callers
457 revoke_bias(o, false, true, requesting_thread, NULL);
458 }
459 } // ThreadsListHandle is destroyed here.
460
461 log_info(biasedlocking)("* Ending bulk revocation");
462
463 BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED;
464
465 if (attempt_rebias_of_object &&
466 o->mark()->has_bias_pattern() &&
467 klass->prototype_header()->has_bias_pattern()) {
468 markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(),
469 klass->prototype_header()->bias_epoch());
470 o->set_mark(new_mark);
471 status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED;
472 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
473 }
474
475 assert(!o->mark()->has_bias_pattern() ||
476 (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)),
477 "bug in bulk bias revocation");
478
479 return status_code;
480 }
481
482
483 static void clean_up_cached_monitor_info() {
484 // Walk the thread list clearing out the cached monitors
485 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
486 thr->set_cached_monitor_info(NULL);
487 }
488 }
489
490
491 class VM_RevokeBias : public VM_Operation {
492 protected:
493 Handle* _obj;
494 GrowableArray<Handle>* _objs;
495 JavaThread* _requesting_thread;
496 BiasedLocking::Condition _status_code;
497 traceid _biased_locker_id;
498
499 public:
500 VM_RevokeBias(Handle* obj, JavaThread* requesting_thread)
501 : _obj(obj)
502 , _objs(NULL)
503 , _requesting_thread(requesting_thread)
504 , _status_code(BiasedLocking::NOT_BIASED)
505 , _biased_locker_id(0) {}
752
753 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
754
755 assert(_preserved_oop_stack == NULL, "double initialization");
756 assert(_preserved_mark_stack == NULL, "double initialization");
757
758 // In order to reduce the number of mark words preserved during GC
759 // due to the presence of biased locking, we reinitialize most mark
760 // words to the class's prototype during GC -- even those which have
761 // a currently valid bias owner. One important situation where we
762 // must not clobber a bias is when a biased object is currently
763 // locked. To handle this case we iterate over the currently-locked
764 // monitors in a prepass and, if they are biased, preserve their
765 // mark words here. This should be a relatively small set of objects
766 // especially compared to the number of objects in the heap.
767 _preserved_mark_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<markOop>(10, true);
768 _preserved_oop_stack = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<Handle>(10, true);
769
770 ResourceMark rm;
771 Thread* cur = Thread::current();
772 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thread = jtiwh.next(); ) {
773 if (thread->has_last_Java_frame()) {
774 RegisterMap rm(thread);
775 for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) {
776 GrowableArray<MonitorInfo*> *monitors = vf->monitors();
777 if (monitors != NULL) {
778 int len = monitors->length();
779 // Walk monitors youngest to oldest
780 for (int i = len - 1; i >= 0; i--) {
781 MonitorInfo* mon_info = monitors->at(i);
782 if (mon_info->owner_is_scalar_replaced()) continue;
783 oop owner = mon_info->owner();
784 if (owner != NULL) {
785 markOop mark = owner->mark();
786 if (mark->has_bias_pattern()) {
787 _preserved_oop_stack->push(Handle(cur, owner));
788 _preserved_mark_stack->push(mark);
789 }
790 }
791 }
792 }
|