140 // Walk monitors youngest to oldest
141 for (int i = len - 1; i >= 0; i--) {
142 MonitorInfo* mon_info = monitors->at(i);
143 if (mon_info->eliminated()) continue;
144 oop owner = mon_info->owner();
145 if (owner != NULL) {
146 info->append(mon_info);
147 }
148 }
149 }
150 }
151 }
152
153 thread->set_cached_monitor_info(info);
154 return info;
155 }
156
157
158 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
159 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
160 BiasedLocking::Condition BiasedLocking::single_revoke_at_safepoint(oop obj, bool allow_rebias, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
161 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
162 assert(Thread::current()->is_VM_thread(), "must be VMThread");
163
164 markWord mark = obj->mark();
165 if (!mark.has_bias_pattern()) {
166 if (log_is_enabled(Info, biasedlocking)) {
167 ResourceMark rm;
168 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
169 ", mark " INTPTR_FORMAT ", type %s"
170 ", requesting thread " INTPTR_FORMAT
171 " because it's no longer biased)",
172 p2i((void *)obj), mark.value(),
173 obj->klass()->external_name(),
174 (intptr_t) requesting_thread);
175 }
176 return NOT_BIASED;
177 }
178
179 uint age = mark.age();
180 markWord biased_prototype = markWord::biased_locking_prototype().set_age(age);
181 markWord unbiased_prototype = markWord::prototype().set_age(age);
182
183 // Log at "info" level if not bulk, else "trace" level
184 if (!is_bulk) {
185 ResourceMark rm;
186 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
187 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
188 ", allow rebias %d, requesting thread " INTPTR_FORMAT,
189 p2i((void *)obj),
190 mark.value(),
191 obj->klass()->external_name(),
192 obj->klass()->prototype_header().value(),
193 (allow_rebias ? 1 : 0),
194 (intptr_t) requesting_thread);
195 } else {
196 ResourceMark rm;
197 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
198 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
199 " , allow rebias %d , requesting thread " INTPTR_FORMAT,
200 p2i((void *)obj),
201 mark.value(),
202 obj->klass()->external_name(),
203 obj->klass()->prototype_header().value(),
204 (allow_rebias ? 1 : 0),
205 (intptr_t) requesting_thread);
206 }
207
208 JavaThread* biased_thread = mark.biased_locker();
209 if (biased_thread == NULL) {
210 // Object is anonymously biased. We can get here if, for
211 // example, we revoke the bias due to an identity hash code
212 // being computed for an object.
213 if (!allow_rebias) {
214 obj->set_mark(unbiased_prototype);
215 }
216 // Log at "info" level if not bulk, else "trace" level
217 if (!is_bulk) {
218 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
219 } else {
220 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
221 }
222 return BIAS_REVOKED;
223 }
224
225 // Handle case where the thread toward which the object was biased has exited
226 bool thread_is_alive = false;
227 if (requesting_thread == biased_thread) {
228 thread_is_alive = true;
229 } else {
230 ThreadsListHandle tlh;
231 thread_is_alive = tlh.includes(biased_thread);
232 }
233 if (!thread_is_alive) {
234 if (allow_rebias) {
235 obj->set_mark(biased_prototype);
236 } else {
237 obj->set_mark(unbiased_prototype);
238 }
239 // Log at "info" level if not bulk, else "trace" level
240 if (!is_bulk) {
241 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
242 PTR_FORMAT ")", p2i(biased_thread));
243 } else {
244 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
245 PTR_FORMAT ")", p2i(biased_thread));
246 }
247 return BIAS_REVOKED;
248 }
249
250 // Log at "info" level if not bulk, else "trace" level
251 if (!is_bulk) {
252 log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
253 PTR_FORMAT ")", p2i(biased_thread));
254 } else {
255 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread ("
256 PTR_FORMAT ")", p2i(biased_thread));
257 }
258
259 // Thread owning bias is alive.
260 // Check to see whether it currently owns the lock and, if so,
261 // write down the needed displaced headers to the thread's stack.
262 // Otherwise, restore the object's header either to the unlocked
263 // or unbiased state.
264 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
265 BasicLock* highest_lock = NULL;
266 for (int i = 0; i < cached_monitor_info->length(); i++) {
267 MonitorInfo* mon_info = cached_monitor_info->at(i);
284 // object at it
285 highest_lock->set_displaced_header(unbiased_prototype);
286 // Reset object header to point to displaced mark.
287 // Must release store the lock address for platforms without TSO
288 // ordering (e.g. ppc).
289 obj->release_set_mark(markWord::encode(highest_lock));
290 assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
291 // Log at "info" level if not bulk, else "trace" level
292 if (!is_bulk) {
293 log_info(biasedlocking)(" Revoked bias of currently-locked object");
294 } else {
295 log_trace(biasedlocking)(" Revoked bias of currently-locked object");
296 }
297 } else {
298 // Log at "info" level if not bulk, else "trace" level
299 if (!is_bulk) {
300 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
301 } else {
302 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object");
303 }
304 if (allow_rebias) {
305 obj->set_mark(biased_prototype);
306 } else {
307 // Store the unlocked value into the object's header.
308 obj->set_mark(unbiased_prototype);
309 }
310 }
311
312 // If requested, return information on which thread held the bias
313 if (biased_locker != NULL) {
314 *biased_locker = biased_thread;
315 }
316
317 return BIAS_REVOKED;
318 }
319
320
321 enum HeuristicsResult {
322 HR_NOT_BIASED = 1,
323 HR_SINGLE_REVOKE = 2,
324 HR_BULK_REBIAS = 3,
325 HR_BULK_REVOKE = 4
326 };
327
328
329 static HeuristicsResult update_heuristics(oop o) {
330 markWord mark = o->mark();
331 if (!mark.has_bias_pattern()) {
332 return HR_NOT_BIASED;
333 }
334
335 // Heuristics to attempt to throttle the number of revocations.
336 // Stages:
337 // 1. Revoke the biases of all objects in the heap of this type,
362 revocation_count = 0;
363 }
364
365 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
366 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
367 revocation_count = k->atomic_incr_biased_lock_revocation_count();
368 }
369
370 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
371 return HR_BULK_REVOKE;
372 }
373
374 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
375 return HR_BULK_REBIAS;
376 }
377
378 return HR_SINGLE_REVOKE;
379 }
380
381
382 BiasedLocking::Condition BiasedLocking::bulk_revoke_or_rebias_at_safepoint(oop o,
383 bool bulk_rebias,
384 bool attempt_rebias_of_object,
385 JavaThread* requesting_thread) {
386 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
387 assert(Thread::current()->is_VM_thread(), "must be VMThread");
388
389 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
390 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
391 (bulk_rebias ? "rebias" : "revoke"),
392 p2i((void *) o),
393 o->mark().value(),
394 o->klass()->external_name());
395
396 jlong cur_time = os::javaTimeMillis();
397 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
398
399 Klass* k_o = o->klass();
400 Klass* klass = k_o;
401
402 {
403 JavaThreadIteratorWithHandle jtiwh;
404
405 if (bulk_rebias) {
420
421 // Now walk all threads' stacks and adjust epochs of any biased
422 // and locked objects of this data type we encounter
423 for (; JavaThread *thr = jtiwh.next(); ) {
424 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
425 for (int i = 0; i < cached_monitor_info->length(); i++) {
426 MonitorInfo* mon_info = cached_monitor_info->at(i);
427 oop owner = mon_info->owner();
428 markWord mark = owner->mark();
429 if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
430 // We might have encountered this object already in the case of recursive locking
431 assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
432 owner->set_mark(mark.set_bias_epoch(cur_epoch));
433 }
434 }
435 }
436 }
437
438 // At this point we're done. All we have to do is potentially
439 // adjust the header of the given object to revoke its bias.
440 single_revoke_at_safepoint(o, attempt_rebias_of_object && klass->prototype_header().has_bias_pattern(), true, requesting_thread, NULL);
441 } else {
442 if (log_is_enabled(Info, biasedlocking)) {
443 ResourceMark rm;
444 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
445 }
446
447 // Disable biased locking for this data type. Not only will this
448 // cause future instances to not be biased, but existing biased
449 // instances will notice that this implicitly caused their biases
450 // to be revoked.
451 klass->set_prototype_header(markWord::prototype());
452
453 // Now walk all threads' stacks and forcibly revoke the biases of
454 // any locked and biased objects of this data type we encounter.
455 for (; JavaThread *thr = jtiwh.next(); ) {
456 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
457 for (int i = 0; i < cached_monitor_info->length(); i++) {
458 MonitorInfo* mon_info = cached_monitor_info->at(i);
459 oop owner = mon_info->owner();
460 markWord mark = owner->mark();
461 if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
462 single_revoke_at_safepoint(owner, false, true, requesting_thread, NULL);
463 }
464 }
465 }
466
467 // Must force the bias of the passed object to be forcibly revoked
468 // as well to ensure guarantees to callers
469 single_revoke_at_safepoint(o, false, true, requesting_thread, NULL);
470 }
471 } // ThreadsListHandle is destroyed here.
472
473 log_info(biasedlocking)("* Ending bulk revocation");
474
475 BiasedLocking::Condition status_code = BIAS_REVOKED;
476
477 if (attempt_rebias_of_object &&
478 o->mark().has_bias_pattern() &&
479 klass->prototype_header().has_bias_pattern()) {
480 markWord new_mark = markWord::encode(requesting_thread, o->mark().age(),
481 klass->prototype_header().bias_epoch());
482 o->set_mark(new_mark);
483 status_code = BIAS_REVOKED_AND_REBIASED;
484 log_info(biasedlocking)(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread);
485 }
486
487 assert(!o->mark().has_bias_pattern() ||
488 (attempt_rebias_of_object && (o->mark().biased_locker() == requesting_thread)),
489 "bug in bulk bias revocation");
490
491 return status_code;
492 }
493
494
495 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
496 if (thread != NULL) {
497 thread->set_cached_monitor_info(NULL);
498 } else {
499 // Walk the thread list clearing out the cached monitors
500 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
501 thr->set_cached_monitor_info(NULL);
502 }
503 }
504 }
505
506
507 class VM_BulkRevokeBias : public VM_Operation {
508 private:
509 Handle* _obj;
510 JavaThread* _requesting_thread;
511 bool _bulk_rebias;
512 bool _attempt_rebias_of_object;
513 BiasedLocking::Condition _status_code;
514 uint64_t _safepoint_id;
515
516 public:
517 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
518 bool bulk_rebias,
519 bool attempt_rebias_of_object)
520 : _obj(obj)
521 , _requesting_thread(requesting_thread)
522 , _bulk_rebias(bulk_rebias)
523 , _attempt_rebias_of_object(attempt_rebias_of_object)
524 , _status_code(BiasedLocking::NOT_BIASED)
525 , _safepoint_id(0) {}
526
527 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
528
529 virtual void doit() {
530 _status_code = BiasedLocking::bulk_revoke_or_rebias_at_safepoint((*_obj)(), _bulk_rebias, _attempt_rebias_of_object, _requesting_thread);
531 _safepoint_id = SafepointSynchronize::safepoint_id();
532 clean_up_cached_monitor_info();
533 }
534
535 bool is_bulk_rebias() const {
536 return _bulk_rebias;
537 }
538
539 BiasedLocking::Condition status_code() const {
540 return _status_code;
541 }
542
543 uint64_t safepoint_id() const {
544 return _safepoint_id;
545 }
546 };
547
548
549 class RevokeOneBias : public ThreadClosure {
550 protected:
551 Handle _obj;
552 JavaThread* _requesting_thread;
553 JavaThread* _biased_locker;
554 BiasedLocking::Condition _status_code;
555 traceid _biased_locker_id;
556
557 public:
558 RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
559 : _obj(obj)
560 , _requesting_thread(requesting_thread)
561 , _biased_locker(biased_locker)
562 , _status_code(BiasedLocking::NOT_BIASED)
752 if (highest_lock != NULL) {
753 // Fix up highest lock to contain displaced header and point
754 // object at it
755 highest_lock->set_displaced_header(unbiased_prototype);
756 // Reset object header to point to displaced mark.
757 // Must release store the lock address for platforms without TSO
758 // ordering (e.g. ppc).
759 obj->release_set_mark(markWord::encode(highest_lock));
760 assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
761 log_info(biasedlocking)(" Revoked bias of currently-locked object");
762 } else {
763 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
764 // Store the unlocked value into the object's header.
765 obj->set_mark(unbiased_prototype);
766 }
767
768 assert(!obj->mark().has_bias_pattern(), "must not be biased");
769 }
770
771
772 BiasedLocking::Condition BiasedLocking::revoke_and_rebias(Handle obj, bool attempt_rebias, TRAPS) {
773 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
774
775 while (true) {
776 // We can revoke the biases of anonymously-biased objects
777 // efficiently enough that we should not cause these revocations to
778 // update the heuristics because doing so may cause unwanted bulk
779 // revocations (which are expensive) to occur.
780 markWord mark = obj->mark();
781 if (mark.is_biased_anonymously() && !attempt_rebias) {
782 // We are probably trying to revoke the bias of this object due to
783 // an identity hash code computation. Try to revoke the bias
784 // without a safepoint. This is possible if we can successfully
785 // compare-and-exchange an unbiased header into the mark word of
786 // the object, meaning that no other thread has raced to acquire
787 // the bias of the object.
788 markWord biased_value = mark;
789 markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
790 markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
791 if (res_mark == biased_value) {
792 return BIAS_REVOKED;
793 }
794 mark = res_mark; // Refresh mark with the latest value.
795 } else if (mark.has_bias_pattern()) {
796 Klass* k = obj->klass();
797 markWord prototype_header = k->prototype_header();
798 if (!prototype_header.has_bias_pattern()) {
799 // This object has a stale bias from before the bulk revocation
800 // for this data type occurred. It's pointless to update the
801 // heuristics at this point so simply update the header with a
802 // CAS. If we fail this race, the object's bias has been revoked
803 // by another thread so we simply return and let the caller deal
804 // with it.
805 obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
806 assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
807 return BIAS_REVOKED;
808 } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
809 // The epoch of this biasing has expired indicating that the
810 // object is effectively unbiased. Depending on whether we need
811 // to rebias or revoke the bias of this object we can do it
812 // efficiently enough with a CAS that we shouldn't update the
813 // heuristics. This is normally done in the assembly code but we
814 // can reach this point due to various points in the runtime
815 // needing to revoke biases.
816 markWord res_mark;
817 if (attempt_rebias) {
818 assert(THREAD->is_Java_thread(), "");
819 markWord biased_value = mark;
820 markWord rebiased_prototype = markWord::encode((JavaThread*) THREAD, mark.age(), prototype_header.bias_epoch());
821 res_mark = obj->cas_set_mark(rebiased_prototype, mark);
822 if (res_mark == biased_value) {
823 return BIAS_REVOKED_AND_REBIASED;
824 }
825 } else {
826 markWord biased_value = mark;
827 markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
828 res_mark = obj->cas_set_mark(unbiased_prototype, mark);
829 if (res_mark == biased_value) {
830 return BIAS_REVOKED;
831 }
832 }
833 mark = res_mark; // Refresh mark with the latest value.
834 }
835 }
836
837 HeuristicsResult heuristics = update_heuristics(obj());
838 if (heuristics == HR_NOT_BIASED) {
839 return NOT_BIASED;
840 } else if (heuristics == HR_SINGLE_REVOKE) {
841 JavaThread *blt = mark.biased_locker();
842 assert(blt != NULL, "invariant");
843 if (blt == THREAD) {
844 // A thread is trying to revoke the bias of an object biased
845 // toward it, again likely due to an identity hash code
846 // computation. We can again avoid a safepoint/handshake in this case
847 // since we are only going to walk our own stack. There are no
848 // races with revocations occurring in other threads because we
849 // reach no safepoints in the revocation path.
850 EventBiasedLockSelfRevocation event;
851 ResourceMark rm;
852 walk_stack_and_revoke(obj(), blt);
853 blt->set_cached_monitor_info(NULL);
854 assert(!obj->mark().has_bias_pattern(), "invariant");
855 if (event.should_commit()) {
856 post_self_revocation_event(&event, obj->klass());
857 }
858 return BIAS_REVOKED;
859 } else {
860 BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
861 if (cond != NOT_REVOKED) {
862 return cond;
863 }
864 }
865 } else {
866 assert((heuristics == HR_BULK_REVOKE) ||
867 (heuristics == HR_BULK_REBIAS), "?");
868 EventBiasedLockClassRevocation event;
869 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
870 (heuristics == HR_BULK_REBIAS),
871 attempt_rebias);
872 VMThread::execute(&bulk_revoke);
873 if (event.should_commit()) {
874 post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
875 }
876 return bulk_revoke.status_code();
877 }
878 }
879 }
880
881 // All objects in objs should be locked by biaser
882 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
883 bool clean_my_cache = false;
884 for (int i = 0; i < objs->length(); i++) {
885 oop obj = (objs->at(i))();
886 markWord mark = obj->mark();
887 if (mark.has_bias_pattern()) {
888 walk_stack_and_revoke(obj, biaser);
889 clean_my_cache = true;
890 }
891 }
892 if (clean_my_cache) {
893 clean_up_cached_monitor_info(biaser);
894 }
895 }
896
897
898 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
899 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
900 oop obj = h_obj();
901 HeuristicsResult heuristics = update_heuristics(obj);
902 if (heuristics == HR_SINGLE_REVOKE) {
903 JavaThread* biased_locker = NULL;
904 single_revoke_at_safepoint(obj, false, false, NULL, &biased_locker);
905 if (biased_locker) {
906 clean_up_cached_monitor_info(biased_locker);
907 }
908 } else if ((heuristics == HR_BULK_REBIAS) ||
909 (heuristics == HR_BULK_REVOKE)) {
910 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
911 clean_up_cached_monitor_info();
912 }
913 }
914
915
916 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
917 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
918 int len = objs->length();
919 for (int i = 0; i < len; i++) {
920 oop obj = (objs->at(i))();
921 HeuristicsResult heuristics = update_heuristics(obj);
922 if (heuristics == HR_SINGLE_REVOKE) {
923 single_revoke_at_safepoint(obj, false, false, NULL, NULL);
924 } else if ((heuristics == HR_BULK_REBIAS) ||
925 (heuristics == HR_BULK_REVOKE)) {
926 bulk_revoke_or_rebias_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), false, NULL);
927 }
928 }
929 clean_up_cached_monitor_info();
930 }
931
932
933 void BiasedLocking::preserve_marks() {
934 if (!UseBiasedLocking)
935 return;
936
937 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
938
939 assert(_preserved_oop_stack == NULL, "double initialization");
940 assert(_preserved_mark_stack == NULL, "double initialization");
941
942 // In order to reduce the number of mark words preserved during GC
943 // due to the presence of biased locking, we reinitialize most mark
944 // words to the class's prototype during GC -- even those which have
945 // a currently valid bias owner. One important situation where we
946 // must not clobber a bias is when a biased object is currently
|
140 // Walk monitors youngest to oldest
141 for (int i = len - 1; i >= 0; i--) {
142 MonitorInfo* mon_info = monitors->at(i);
143 if (mon_info->eliminated()) continue;
144 oop owner = mon_info->owner();
145 if (owner != NULL) {
146 info->append(mon_info);
147 }
148 }
149 }
150 }
151 }
152
153 thread->set_cached_monitor_info(info);
154 return info;
155 }
156
157
158 // After the call, *biased_locker will be set to obj->mark()->biased_locker() if biased_locker != NULL,
159 // AND it is a living thread. Otherwise it will not be updated, (i.e. the caller is responsible for initialization).
160 void BiasedLocking::single_revoke_at_safepoint(oop obj, bool is_bulk, JavaThread* requesting_thread, JavaThread** biased_locker) {
161 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
162 assert(Thread::current()->is_VM_thread(), "must be VMThread");
163
164 markWord mark = obj->mark();
165 if (!mark.has_bias_pattern()) {
166 if (log_is_enabled(Info, biasedlocking)) {
167 ResourceMark rm;
168 log_info(biasedlocking)(" (Skipping revocation of object " INTPTR_FORMAT
169 ", mark " INTPTR_FORMAT ", type %s"
170 ", requesting thread " INTPTR_FORMAT
171 " because it's no longer biased)",
172 p2i((void *)obj), mark.value(),
173 obj->klass()->external_name(),
174 (intptr_t) requesting_thread);
175 }
176 return;
177 }
178
179 uint age = mark.age();
180 markWord unbiased_prototype = markWord::prototype().set_age(age);
181
182 // Log at "info" level if not bulk, else "trace" level
183 if (!is_bulk) {
184 ResourceMark rm;
185 log_info(biasedlocking)("Revoking bias of object " INTPTR_FORMAT ", mark "
186 INTPTR_FORMAT ", type %s, prototype header " INTPTR_FORMAT
187 ", requesting thread " INTPTR_FORMAT,
188 p2i((void *)obj),
189 mark.value(),
190 obj->klass()->external_name(),
191 obj->klass()->prototype_header().value(),
192 (intptr_t) requesting_thread);
193 } else {
194 ResourceMark rm;
195 log_trace(biasedlocking)("Revoking bias of object " INTPTR_FORMAT " , mark "
196 INTPTR_FORMAT " , type %s , prototype header " INTPTR_FORMAT
197 " , requesting thread " INTPTR_FORMAT,
198 p2i((void *)obj),
199 mark.value(),
200 obj->klass()->external_name(),
201 obj->klass()->prototype_header().value(),
202 (intptr_t) requesting_thread);
203 }
204
205 JavaThread* biased_thread = mark.biased_locker();
206 if (biased_thread == NULL) {
207 // Object is anonymously biased. We can get here if, for
208 // example, we revoke the bias due to an identity hash code
209 // being computed for an object.
210 obj->set_mark(unbiased_prototype);
211
212 // Log at "info" level if not bulk, else "trace" level
213 if (!is_bulk) {
214 log_info(biasedlocking)(" Revoked bias of anonymously-biased object");
215 } else {
216 log_trace(biasedlocking)(" Revoked bias of anonymously-biased object");
217 }
218 return;
219 }
220
221 // Handle case where the thread toward which the object was biased has exited
222 bool thread_is_alive = false;
223 if (requesting_thread == biased_thread) {
224 thread_is_alive = true;
225 } else {
226 ThreadsListHandle tlh;
227 thread_is_alive = tlh.includes(biased_thread);
228 }
229 if (!thread_is_alive) {
230 obj->set_mark(unbiased_prototype);
231 // Log at "info" level if not bulk, else "trace" level
232 if (!is_bulk) {
233 log_info(biasedlocking)(" Revoked bias of object biased toward dead thread ("
234 PTR_FORMAT ")", p2i(biased_thread));
235 } else {
236 log_trace(biasedlocking)(" Revoked bias of object biased toward dead thread ("
237 PTR_FORMAT ")", p2i(biased_thread));
238 }
239 return;
240 }
241
242 // Log at "info" level if not bulk, else "trace" level
243 if (!is_bulk) {
244 log_info(biasedlocking)(" Revoked bias of object biased toward live thread ("
245 PTR_FORMAT ")", p2i(biased_thread));
246 } else {
247 log_trace(biasedlocking)(" Revoked bias of object biased toward live thread ("
248 PTR_FORMAT ")", p2i(biased_thread));
249 }
250
251 // Thread owning bias is alive.
252 // Check to see whether it currently owns the lock and, if so,
253 // write down the needed displaced headers to the thread's stack.
254 // Otherwise, restore the object's header either to the unlocked
255 // or unbiased state.
256 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(biased_thread);
257 BasicLock* highest_lock = NULL;
258 for (int i = 0; i < cached_monitor_info->length(); i++) {
259 MonitorInfo* mon_info = cached_monitor_info->at(i);
276 // object at it
277 highest_lock->set_displaced_header(unbiased_prototype);
278 // Reset object header to point to displaced mark.
279 // Must release store the lock address for platforms without TSO
280 // ordering (e.g. ppc).
281 obj->release_set_mark(markWord::encode(highest_lock));
282 assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
283 // Log at "info" level if not bulk, else "trace" level
284 if (!is_bulk) {
285 log_info(biasedlocking)(" Revoked bias of currently-locked object");
286 } else {
287 log_trace(biasedlocking)(" Revoked bias of currently-locked object");
288 }
289 } else {
290 // Log at "info" level if not bulk, else "trace" level
291 if (!is_bulk) {
292 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
293 } else {
294 log_trace(biasedlocking)(" Revoked bias of currently-unlocked object");
295 }
296 // Store the unlocked value into the object's header.
297 obj->set_mark(unbiased_prototype);
298 }
299
300 // If requested, return information on which thread held the bias
301 if (biased_locker != NULL) {
302 *biased_locker = biased_thread;
303 }
304 }
305
306
307 enum HeuristicsResult {
308 HR_NOT_BIASED = 1,
309 HR_SINGLE_REVOKE = 2,
310 HR_BULK_REBIAS = 3,
311 HR_BULK_REVOKE = 4
312 };
313
314
315 static HeuristicsResult update_heuristics(oop o) {
316 markWord mark = o->mark();
317 if (!mark.has_bias_pattern()) {
318 return HR_NOT_BIASED;
319 }
320
321 // Heuristics to attempt to throttle the number of revocations.
322 // Stages:
323 // 1. Revoke the biases of all objects in the heap of this type,
348 revocation_count = 0;
349 }
350
351 // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold
352 if (revocation_count <= BiasedLockingBulkRevokeThreshold) {
353 revocation_count = k->atomic_incr_biased_lock_revocation_count();
354 }
355
356 if (revocation_count == BiasedLockingBulkRevokeThreshold) {
357 return HR_BULK_REVOKE;
358 }
359
360 if (revocation_count == BiasedLockingBulkRebiasThreshold) {
361 return HR_BULK_REBIAS;
362 }
363
364 return HR_SINGLE_REVOKE;
365 }
366
367
368 void BiasedLocking::bulk_revoke_at_safepoint(oop o, bool bulk_rebias, JavaThread* requesting_thread) {
369 assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
370 assert(Thread::current()->is_VM_thread(), "must be VMThread");
371
372 log_info(biasedlocking)("* Beginning bulk revocation (kind == %s) because of object "
373 INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s",
374 (bulk_rebias ? "rebias" : "revoke"),
375 p2i((void *) o),
376 o->mark().value(),
377 o->klass()->external_name());
378
379 jlong cur_time = os::javaTimeMillis();
380 o->klass()->set_last_biased_lock_bulk_revocation_time(cur_time);
381
382 Klass* k_o = o->klass();
383 Klass* klass = k_o;
384
385 {
386 JavaThreadIteratorWithHandle jtiwh;
387
388 if (bulk_rebias) {
403
404 // Now walk all threads' stacks and adjust epochs of any biased
405 // and locked objects of this data type we encounter
406 for (; JavaThread *thr = jtiwh.next(); ) {
407 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
408 for (int i = 0; i < cached_monitor_info->length(); i++) {
409 MonitorInfo* mon_info = cached_monitor_info->at(i);
410 oop owner = mon_info->owner();
411 markWord mark = owner->mark();
412 if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
413 // We might have encountered this object already in the case of recursive locking
414 assert(mark.bias_epoch() == prev_epoch || mark.bias_epoch() == cur_epoch, "error in bias epoch adjustment");
415 owner->set_mark(mark.set_bias_epoch(cur_epoch));
416 }
417 }
418 }
419 }
420
421 // At this point we're done. All we have to do is potentially
422 // adjust the header of the given object to revoke its bias.
423 single_revoke_at_safepoint(o, true, requesting_thread, NULL);
424 } else {
425 if (log_is_enabled(Info, biasedlocking)) {
426 ResourceMark rm;
427 log_info(biasedlocking)("* Disabling biased locking for type %s", klass->external_name());
428 }
429
430 // Disable biased locking for this data type. Not only will this
431 // cause future instances to not be biased, but existing biased
432 // instances will notice that this implicitly caused their biases
433 // to be revoked.
434 klass->set_prototype_header(markWord::prototype());
435
436 // Now walk all threads' stacks and forcibly revoke the biases of
437 // any locked and biased objects of this data type we encounter.
438 for (; JavaThread *thr = jtiwh.next(); ) {
439 GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr);
440 for (int i = 0; i < cached_monitor_info->length(); i++) {
441 MonitorInfo* mon_info = cached_monitor_info->at(i);
442 oop owner = mon_info->owner();
443 markWord mark = owner->mark();
444 if ((owner->klass() == k_o) && mark.has_bias_pattern()) {
445 single_revoke_at_safepoint(owner, true, requesting_thread, NULL);
446 }
447 }
448 }
449
450 // Must force the bias of the passed object to be forcibly revoked
451 // as well to ensure guarantees to callers
452 single_revoke_at_safepoint(o, true, requesting_thread, NULL);
453 }
454 } // ThreadsListHandle is destroyed here.
455
456 log_info(biasedlocking)("* Ending bulk revocation");
457
458 assert(!o->mark().has_bias_pattern(), "bug in bulk bias revocation");
459 }
460
461
462 static void clean_up_cached_monitor_info(JavaThread* thread = NULL) {
463 if (thread != NULL) {
464 thread->set_cached_monitor_info(NULL);
465 } else {
466 // Walk the thread list clearing out the cached monitors
467 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *thr = jtiwh.next(); ) {
468 thr->set_cached_monitor_info(NULL);
469 }
470 }
471 }
472
473
474 class VM_BulkRevokeBias : public VM_Operation {
475 private:
476 Handle* _obj;
477 JavaThread* _requesting_thread;
478 bool _bulk_rebias;
479 uint64_t _safepoint_id;
480
481 public:
482 VM_BulkRevokeBias(Handle* obj, JavaThread* requesting_thread,
483 bool bulk_rebias)
484 : _obj(obj)
485 , _requesting_thread(requesting_thread)
486 , _bulk_rebias(bulk_rebias)
487 , _safepoint_id(0) {}
488
489 virtual VMOp_Type type() const { return VMOp_BulkRevokeBias; }
490
491 virtual void doit() {
492 BiasedLocking::bulk_revoke_at_safepoint((*_obj)(), _bulk_rebias, _requesting_thread);
493 _safepoint_id = SafepointSynchronize::safepoint_id();
494 clean_up_cached_monitor_info();
495 }
496
497 bool is_bulk_rebias() const {
498 return _bulk_rebias;
499 }
500
501 uint64_t safepoint_id() const {
502 return _safepoint_id;
503 }
504 };
505
506
507 class RevokeOneBias : public ThreadClosure {
508 protected:
509 Handle _obj;
510 JavaThread* _requesting_thread;
511 JavaThread* _biased_locker;
512 BiasedLocking::Condition _status_code;
513 traceid _biased_locker_id;
514
515 public:
516 RevokeOneBias(Handle obj, JavaThread* requesting_thread, JavaThread* biased_locker)
517 : _obj(obj)
518 , _requesting_thread(requesting_thread)
519 , _biased_locker(biased_locker)
520 , _status_code(BiasedLocking::NOT_BIASED)
710 if (highest_lock != NULL) {
711 // Fix up highest lock to contain displaced header and point
712 // object at it
713 highest_lock->set_displaced_header(unbiased_prototype);
714 // Reset object header to point to displaced mark.
715 // Must release store the lock address for platforms without TSO
716 // ordering (e.g. ppc).
717 obj->release_set_mark(markWord::encode(highest_lock));
718 assert(!obj->mark().has_bias_pattern(), "illegal mark state: stack lock used bias bit");
719 log_info(biasedlocking)(" Revoked bias of currently-locked object");
720 } else {
721 log_info(biasedlocking)(" Revoked bias of currently-unlocked object");
722 // Store the unlocked value into the object's header.
723 obj->set_mark(unbiased_prototype);
724 }
725
726 assert(!obj->mark().has_bias_pattern(), "must not be biased");
727 }
728
729
730 void BiasedLocking::revoke(Handle obj, TRAPS) {
731 assert(!SafepointSynchronize::is_at_safepoint(), "must not be called while at safepoint");
732
733 while (true) {
734 // We can revoke the biases of anonymously-biased objects
735 // efficiently enough that we should not cause these revocations to
736 // update the heuristics because doing so may cause unwanted bulk
737 // revocations (which are expensive) to occur.
738 markWord mark = obj->mark();
739
740 if (!mark.has_bias_pattern()) {
741 return;
742 }
743
744 if (mark.is_biased_anonymously()) {
745 // We are probably trying to revoke the bias of this object due to
746 // an identity hash code computation. Try to revoke the bias
747 // without a safepoint. This is possible if we can successfully
748 // compare-and-exchange an unbiased header into the mark word of
749 // the object, meaning that no other thread has raced to acquire
750 // the bias of the object.
751 markWord biased_value = mark;
752 markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
753 markWord res_mark = obj->cas_set_mark(unbiased_prototype, mark);
754 if (res_mark == biased_value) {
755 return;
756 }
757 mark = res_mark; // Refresh mark with the latest value.
758 } else {
759 Klass* k = obj->klass();
760 markWord prototype_header = k->prototype_header();
761 if (!prototype_header.has_bias_pattern()) {
762 // This object has a stale bias from before the bulk revocation
763 // for this data type occurred. It's pointless to update the
764 // heuristics at this point so simply update the header with a
765 // CAS. If we fail this race, the object's bias has been revoked
766 // by another thread so we simply return and let the caller deal
767 // with it.
768 obj->cas_set_mark(prototype_header.set_age(mark.age()), mark);
769 assert(!obj->mark().has_bias_pattern(), "even if we raced, should still be revoked");
770 return;
771 } else if (prototype_header.bias_epoch() != mark.bias_epoch()) {
772 // The epoch of this biasing has expired indicating that the
773 // object is effectively unbiased. We can revoke the bias of this
774 // object efficiently enough with a CAS that we shouldn't update the
775 // heuristics. This is normally done in the assembly code but we
776 // can reach this point due to various points in the runtime
777 // needing to revoke biases.
778 markWord res_mark;
779 markWord biased_value = mark;
780 markWord unbiased_prototype = markWord::prototype().set_age(mark.age());
781 res_mark = obj->cas_set_mark(unbiased_prototype, mark);
782 if (res_mark == biased_value) {
783 return;
784 }
785 mark = res_mark; // Refresh mark with the latest value.
786 }
787 }
788
789 HeuristicsResult heuristics = update_heuristics(obj());
790 if (heuristics == HR_NOT_BIASED) {
791 return;
792 } else if (heuristics == HR_SINGLE_REVOKE) {
793 JavaThread *blt = mark.biased_locker();
794 assert(blt != NULL, "invariant");
795 if (blt == THREAD) {
796 // A thread is trying to revoke the bias of an object biased
797 // toward it, again likely due to an identity hash code
798 // computation. We can again avoid a safepoint/handshake in this case
799 // since we are only going to walk our own stack. There are no
800 // races with revocations occurring in other threads because we
801 // reach no safepoints in the revocation path.
802 EventBiasedLockSelfRevocation event;
803 ResourceMark rm;
804 walk_stack_and_revoke(obj(), blt);
805 blt->set_cached_monitor_info(NULL);
806 assert(!obj->mark().has_bias_pattern(), "invariant");
807 if (event.should_commit()) {
808 post_self_revocation_event(&event, obj->klass());
809 }
810 return;
811 } else {
812 BiasedLocking::Condition cond = single_revoke_with_handshake(obj, (JavaThread*)THREAD, blt);
813 if (cond != NOT_REVOKED) {
814 return;
815 }
816 }
817 } else {
818 assert((heuristics == HR_BULK_REVOKE) ||
819 (heuristics == HR_BULK_REBIAS), "?");
820 EventBiasedLockClassRevocation event;
821 VM_BulkRevokeBias bulk_revoke(&obj, (JavaThread*)THREAD,
822 (heuristics == HR_BULK_REBIAS));
823 VMThread::execute(&bulk_revoke);
824 if (event.should_commit()) {
825 post_class_revocation_event(&event, obj->klass(), &bulk_revoke);
826 }
827 return;
828 }
829 }
830 }
831
832 // All objects in objs should be locked by biaser
833 void BiasedLocking::revoke(GrowableArray<Handle>* objs, JavaThread *biaser) {
834 bool clean_my_cache = false;
835 for (int i = 0; i < objs->length(); i++) {
836 oop obj = (objs->at(i))();
837 markWord mark = obj->mark();
838 if (mark.has_bias_pattern()) {
839 walk_stack_and_revoke(obj, biaser);
840 clean_my_cache = true;
841 }
842 }
843 if (clean_my_cache) {
844 clean_up_cached_monitor_info(biaser);
845 }
846 }
847
848
849 void BiasedLocking::revoke_at_safepoint(Handle h_obj) {
850 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
851 oop obj = h_obj();
852 HeuristicsResult heuristics = update_heuristics(obj);
853 if (heuristics == HR_SINGLE_REVOKE) {
854 JavaThread* biased_locker = NULL;
855 single_revoke_at_safepoint(obj, false, NULL, &biased_locker);
856 if (biased_locker) {
857 clean_up_cached_monitor_info(biased_locker);
858 }
859 } else if ((heuristics == HR_BULK_REBIAS) ||
860 (heuristics == HR_BULK_REVOKE)) {
861 bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
862 clean_up_cached_monitor_info();
863 }
864 }
865
866
867 void BiasedLocking::revoke_at_safepoint(GrowableArray<Handle>* objs) {
868 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
869 int len = objs->length();
870 for (int i = 0; i < len; i++) {
871 oop obj = (objs->at(i))();
872 HeuristicsResult heuristics = update_heuristics(obj);
873 if (heuristics == HR_SINGLE_REVOKE) {
874 single_revoke_at_safepoint(obj, false, NULL, NULL);
875 } else if ((heuristics == HR_BULK_REBIAS) ||
876 (heuristics == HR_BULK_REVOKE)) {
877 bulk_revoke_at_safepoint(obj, (heuristics == HR_BULK_REBIAS), NULL);
878 }
879 }
880 clean_up_cached_monitor_info();
881 }
882
883
884 void BiasedLocking::preserve_marks() {
885 if (!UseBiasedLocking)
886 return;
887
888 assert(SafepointSynchronize::is_at_safepoint(), "must only be called while at safepoint");
889
890 assert(_preserved_oop_stack == NULL, "double initialization");
891 assert(_preserved_mark_stack == NULL, "double initialization");
892
893 // In order to reduce the number of mark words preserved during GC
894 // due to the presence of biased locking, we reinitialize most mark
895 // words to the class's prototype during GC -- even those which have
896 // a currently valid bias owner. One important situation where we
897 // must not clobber a bias is when a biased object is currently
|