688 if (mark->has_bias_pattern()) {
689 uintptr_t thread_ident;
690 uintptr_t anticipated_bias_locking_value;
691 thread_ident = (uintptr_t)istate->thread();
692 anticipated_bias_locking_value =
693 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
694 ~((uintptr_t) markOopDesc::age_mask_in_place);
695
696 if (anticipated_bias_locking_value == 0) {
697 // Already biased towards this thread, nothing to do.
698 if (PrintBiasedLockingStatistics) {
699 (* BiasedLocking::biased_lock_entry_count_addr())++;
700 }
701 success = true;
702 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
703 // Try to revoke bias.
704 markOop header = rcvr->klass()->prototype_header();
705 if (hash != markOopDesc::no_hash) {
706 header = header->copy_set_hash(hash);
707 }
708 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), mark) == mark) {
709 if (PrintBiasedLockingStatistics)
710 (*BiasedLocking::revoked_lock_entry_count_addr())++;
711 }
712 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
713 // Try to rebias.
714 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
715 if (hash != markOopDesc::no_hash) {
716 new_header = new_header->copy_set_hash(hash);
717 }
718 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), mark) == mark) {
719 if (PrintBiasedLockingStatistics) {
720 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
721 }
722 } else {
723 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
724 }
725 success = true;
726 } else {
727 // Try to bias towards thread in case object is anonymously biased.
728 markOop header = (markOop) ((uintptr_t) mark &
729 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
730 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
731 if (hash != markOopDesc::no_hash) {
732 header = header->copy_set_hash(hash);
733 }
734 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
735 // Debugging hint.
736 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
737 if (Atomic::cmpxchg_ptr((void*)new_header, rcvr->mark_addr(), header) == header) {
738 if (PrintBiasedLockingStatistics) {
739 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
740 }
741 } else {
742 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
743 }
744 success = true;
745 }
746 }
747
748 // Traditional lightweight locking.
749 if (!success) {
750 markOop displaced = rcvr->mark()->set_unlocked();
751 mon->lock()->set_displaced_header(displaced);
752 bool call_vm = UseHeavyMonitors;
753 if (call_vm || Atomic::cmpxchg_ptr(mon, rcvr->mark_addr(), displaced) != displaced) {
754 // Is it simple recursive case?
755 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
756 mon->lock()->set_displaced_header(NULL);
757 } else {
758 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
759 }
760 }
761 }
762 }
763 THREAD->clr_do_not_unlock();
764
765 // Notify jvmti
766 #ifdef VM_JVMTI
767 if (_jvmti_interp_events) {
768 // Whenever JVMTI puts a thread in interp_only_mode, method
769 // entry/exit events are sent for that thread to track stack depth.
770 if (THREAD->is_interp_only_mode()) {
771 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
772 handle_exception);
773 }
886 if (mark->has_bias_pattern()) {
887 uintptr_t thread_ident;
888 uintptr_t anticipated_bias_locking_value;
889 thread_ident = (uintptr_t)istate->thread();
890 anticipated_bias_locking_value =
891 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
892 ~((uintptr_t) markOopDesc::age_mask_in_place);
893
894 if (anticipated_bias_locking_value == 0) {
895 // already biased towards this thread, nothing to do
896 if (PrintBiasedLockingStatistics) {
897 (* BiasedLocking::biased_lock_entry_count_addr())++;
898 }
899 success = true;
900 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
901 // try revoke bias
902 markOop header = lockee->klass()->prototype_header();
903 if (hash != markOopDesc::no_hash) {
904 header = header->copy_set_hash(hash);
905 }
906 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
907 if (PrintBiasedLockingStatistics) {
908 (*BiasedLocking::revoked_lock_entry_count_addr())++;
909 }
910 }
911 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
912 // try rebias
913 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
914 if (hash != markOopDesc::no_hash) {
915 new_header = new_header->copy_set_hash(hash);
916 }
917 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
918 if (PrintBiasedLockingStatistics) {
919 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
920 }
921 } else {
922 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
923 }
924 success = true;
925 } else {
926 // try to bias towards thread in case object is anonymously biased
927 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
928 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
929 if (hash != markOopDesc::no_hash) {
930 header = header->copy_set_hash(hash);
931 }
932 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
933 // debugging hint
934 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
935 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
936 if (PrintBiasedLockingStatistics) {
937 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
938 }
939 } else {
940 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
941 }
942 success = true;
943 }
944 }
945
946 // traditional lightweight locking
947 if (!success) {
948 markOop displaced = lockee->mark()->set_unlocked();
949 entry->lock()->set_displaced_header(displaced);
950 bool call_vm = UseHeavyMonitors;
951 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
952 // Is it simple recursive case?
953 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
954 entry->lock()->set_displaced_header(NULL);
955 } else {
956 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
957 }
958 }
959 }
960 UPDATE_PC_AND_TOS(1, -1);
961 goto run;
962 }
963 default: {
964 fatal("Unexpected message from frame manager");
965 }
966 }
967
968 run:
969
970 DO_UPDATE_INSTRUCTION_COUNT(*pc)
971 DEBUGGER_SINGLE_STEP_NOTIFY();
1827 uintptr_t thread_ident;
1828 uintptr_t anticipated_bias_locking_value;
1829 thread_ident = (uintptr_t)istate->thread();
1830 anticipated_bias_locking_value =
1831 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1832 ~((uintptr_t) markOopDesc::age_mask_in_place);
1833
1834 if (anticipated_bias_locking_value == 0) {
1835 // already biased towards this thread, nothing to do
1836 if (PrintBiasedLockingStatistics) {
1837 (* BiasedLocking::biased_lock_entry_count_addr())++;
1838 }
1839 success = true;
1840 }
1841 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1842 // try revoke bias
1843 markOop header = lockee->klass()->prototype_header();
1844 if (hash != markOopDesc::no_hash) {
1845 header = header->copy_set_hash(hash);
1846 }
1847 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), mark) == mark) {
1848 if (PrintBiasedLockingStatistics)
1849 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1850 }
1851 }
1852 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1853 // try rebias
1854 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1855 if (hash != markOopDesc::no_hash) {
1856 new_header = new_header->copy_set_hash(hash);
1857 }
1858 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), mark) == mark) {
1859 if (PrintBiasedLockingStatistics)
1860 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1861 }
1862 else {
1863 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1864 }
1865 success = true;
1866 }
1867 else {
1868 // try to bias towards thread in case object is anonymously biased
1869 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1870 (uintptr_t)markOopDesc::age_mask_in_place |
1871 epoch_mask_in_place));
1872 if (hash != markOopDesc::no_hash) {
1873 header = header->copy_set_hash(hash);
1874 }
1875 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1876 // debugging hint
1877 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1878 if (Atomic::cmpxchg_ptr((void*)new_header, lockee->mark_addr(), header) == header) {
1879 if (PrintBiasedLockingStatistics)
1880 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1881 }
1882 else {
1883 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1884 }
1885 success = true;
1886 }
1887 }
1888
1889 // traditional lightweight locking
1890 if (!success) {
1891 markOop displaced = lockee->mark()->set_unlocked();
1892 entry->lock()->set_displaced_header(displaced);
1893 bool call_vm = UseHeavyMonitors;
1894 if (call_vm || Atomic::cmpxchg_ptr(entry, lockee->mark_addr(), displaced) != displaced) {
1895 // Is it simple recursive case?
1896 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1897 entry->lock()->set_displaced_header(NULL);
1898 } else {
1899 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1900 }
1901 }
1902 }
1903 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1904 } else {
1905 istate->set_msg(more_monitors);
1906 UPDATE_PC_AND_RETURN(0); // Re-execute
1907 }
1908 }
1909
1910 CASE(_monitorexit): {
1911 oop lockee = STACK_OBJECT(-1);
1912 CHECK_NULL(lockee);
1913 // derefing's lockee ought to provoke implicit null check
1914 // find our monitor slot
1915 BasicObjectLock* limit = istate->monitor_base();
1916 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1917 while (most_recent != limit ) {
1918 if ((most_recent)->obj() == lockee) {
1919 BasicLock* lock = most_recent->lock();
1920 markOop header = lock->displaced_header();
1921 most_recent->set_obj(NULL);
1922 if (!lockee->mark()->has_bias_pattern()) {
1923 bool call_vm = UseHeavyMonitors;
1924 // If it isn't recursive we either must swap old header or call the runtime
1925 if (header != NULL || call_vm) {
1926 if (call_vm || Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
1927 // restore object for the slow case
1928 most_recent->set_obj(lockee);
1929 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1930 }
1931 }
1932 }
1933 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1934 }
1935 most_recent++;
1936 }
1937 // Need to throw illegal monitor state exception
1938 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1939 ShouldNotReachHere();
1940 }
1941
1942 /* All of the non-quick opcodes. */
1943
1944 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1945 * constant pool index in the instruction.
1946 */
2172 InstanceKlass* ik = InstanceKlass::cast(entry);
2173 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
2174 size_t obj_size = ik->size_helper();
2175 oop result = NULL;
2176 // If the TLAB isn't pre-zeroed then we'll have to do it
2177 bool need_zero = !ZeroTLAB;
2178 if (UseTLAB) {
2179 result = (oop) THREAD->tlab().allocate(obj_size);
2180 }
2181 // Disable non-TLAB-based fast-path, because profiling requires that all
2182 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
2183 // returns NULL.
2184 #ifndef CC_INTERP_PROFILE
2185 if (result == NULL) {
2186 need_zero = true;
2187 // Try allocate in shared eden
2188 retry:
2189 HeapWord* compare_to = *Universe::heap()->top_addr();
2190 HeapWord* new_top = compare_to + obj_size;
2191 if (new_top <= *Universe::heap()->end_addr()) {
2192 if (Atomic::cmpxchg_ptr(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
2193 goto retry;
2194 }
2195 result = (oop) compare_to;
2196 }
2197 }
2198 #endif
2199 if (result != NULL) {
2200 // Initialize object (if nonzero size and need) and then the header
2201 if (need_zero ) {
2202 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2203 obj_size -= sizeof(oopDesc) / oopSize;
2204 if (obj_size > 0 ) {
2205 memset(to_zero, 0, obj_size * HeapWordSize);
2206 }
2207 }
2208 if (UseBiasedLocking) {
2209 result->set_mark(ik->prototype_header());
2210 } else {
2211 result->set_mark(markOopDesc::prototype());
2212 }
2958 // correctly
2959
2960 BasicObjectLock* base = istate->monitor_base();
2961 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2962 bool method_unlock_needed = METHOD->is_synchronized();
2963 // We know the initial monitor was used for the method don't check that
2964 // slot in the loop
2965 if (method_unlock_needed) base--;
2966
2967 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2968 while (end < base) {
2969 oop lockee = end->obj();
2970 if (lockee != NULL) {
2971 BasicLock* lock = end->lock();
2972 markOop header = lock->displaced_header();
2973 end->set_obj(NULL);
2974
2975 if (!lockee->mark()->has_bias_pattern()) {
2976 // If it isn't recursive we either must swap old header or call the runtime
2977 if (header != NULL) {
2978 if (Atomic::cmpxchg_ptr(header, lockee->mark_addr(), lock) != lock) {
2979 // restore object for the slow case
2980 end->set_obj(lockee);
2981 {
2982 // Prevent any HandleMarkCleaner from freeing our live handles
2983 HandleMark __hm(THREAD);
2984 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2985 }
2986 }
2987 }
2988 }
2989 // One error is plenty
2990 if (illegal_state_oop() == NULL && !suppress_error) {
2991 {
2992 // Prevent any HandleMarkCleaner from freeing our live handles
2993 HandleMark __hm(THREAD);
2994 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2995 }
2996 assert(THREAD->has_pending_exception(), "Lost our exception!");
2997 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
2998 THREAD->clear_pending_exception();
3033 }
3034 } else if (UseHeavyMonitors) {
3035 {
3036 // Prevent any HandleMarkCleaner from freeing our live handles.
3037 HandleMark __hm(THREAD);
3038 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3039 }
3040 if (THREAD->has_pending_exception()) {
3041 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3042 THREAD->clear_pending_exception();
3043 }
3044 } else {
3045 BasicLock* lock = base->lock();
3046 markOop header = lock->displaced_header();
3047 base->set_obj(NULL);
3048
3049 if (!rcvr->mark()->has_bias_pattern()) {
3050 base->set_obj(NULL);
3051 // If it isn't recursive we either must swap old header or call the runtime
3052 if (header != NULL) {
3053 if (Atomic::cmpxchg_ptr(header, rcvr->mark_addr(), lock) != lock) {
3054 // restore object for the slow case
3055 base->set_obj(rcvr);
3056 {
3057 // Prevent any HandleMarkCleaner from freeing our live handles
3058 HandleMark __hm(THREAD);
3059 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3060 }
3061 if (THREAD->has_pending_exception()) {
3062 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3063 THREAD->clear_pending_exception();
3064 }
3065 }
3066 }
3067 }
3068 }
3069 }
3070 }
3071 }
3072 // Clear the do_not_unlock flag now.
3073 THREAD->clr_do_not_unlock();
|
688 if (mark->has_bias_pattern()) {
689 uintptr_t thread_ident;
690 uintptr_t anticipated_bias_locking_value;
691 thread_ident = (uintptr_t)istate->thread();
692 anticipated_bias_locking_value =
693 (((uintptr_t)rcvr->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
694 ~((uintptr_t) markOopDesc::age_mask_in_place);
695
696 if (anticipated_bias_locking_value == 0) {
697 // Already biased towards this thread, nothing to do.
698 if (PrintBiasedLockingStatistics) {
699 (* BiasedLocking::biased_lock_entry_count_addr())++;
700 }
701 success = true;
702 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
703 // Try to revoke bias.
704 markOop header = rcvr->klass()->prototype_header();
705 if (hash != markOopDesc::no_hash) {
706 header = header->copy_set_hash(hash);
707 }
708 if (Atomic::cmpxchg(header, rcvr->mark_addr(), mark) == mark) {
709 if (PrintBiasedLockingStatistics)
710 (*BiasedLocking::revoked_lock_entry_count_addr())++;
711 }
712 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) != 0) {
713 // Try to rebias.
714 markOop new_header = (markOop) ( (intptr_t) rcvr->klass()->prototype_header() | thread_ident);
715 if (hash != markOopDesc::no_hash) {
716 new_header = new_header->copy_set_hash(hash);
717 }
718 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), mark) == mark) {
719 if (PrintBiasedLockingStatistics) {
720 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
721 }
722 } else {
723 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
724 }
725 success = true;
726 } else {
727 // Try to bias towards thread in case object is anonymously biased.
728 markOop header = (markOop) ((uintptr_t) mark &
729 ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
730 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
731 if (hash != markOopDesc::no_hash) {
732 header = header->copy_set_hash(hash);
733 }
734 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
735 // Debugging hint.
736 DEBUG_ONLY(mon->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
737 if (Atomic::cmpxchg(new_header, rcvr->mark_addr(), header) == header) {
738 if (PrintBiasedLockingStatistics) {
739 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
740 }
741 } else {
742 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
743 }
744 success = true;
745 }
746 }
747
748 // Traditional lightweight locking.
749 if (!success) {
750 markOop displaced = rcvr->mark()->set_unlocked();
751 mon->lock()->set_displaced_header(displaced);
752 bool call_vm = UseHeavyMonitors;
753 if (call_vm || Atomic::cmpxchg((markOop)mon, rcvr->mark_addr(), displaced) != displaced) {
754 // Is it simple recursive case?
755 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
756 mon->lock()->set_displaced_header(NULL);
757 } else {
758 CALL_VM(InterpreterRuntime::monitorenter(THREAD, mon), handle_exception);
759 }
760 }
761 }
762 }
763 THREAD->clr_do_not_unlock();
764
765 // Notify jvmti
766 #ifdef VM_JVMTI
767 if (_jvmti_interp_events) {
768 // Whenever JVMTI puts a thread in interp_only_mode, method
769 // entry/exit events are sent for that thread to track stack depth.
770 if (THREAD->is_interp_only_mode()) {
771 CALL_VM(InterpreterRuntime::post_method_entry(THREAD),
772 handle_exception);
773 }
886 if (mark->has_bias_pattern()) {
887 uintptr_t thread_ident;
888 uintptr_t anticipated_bias_locking_value;
889 thread_ident = (uintptr_t)istate->thread();
890 anticipated_bias_locking_value =
891 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
892 ~((uintptr_t) markOopDesc::age_mask_in_place);
893
894 if (anticipated_bias_locking_value == 0) {
895 // already biased towards this thread, nothing to do
896 if (PrintBiasedLockingStatistics) {
897 (* BiasedLocking::biased_lock_entry_count_addr())++;
898 }
899 success = true;
900 } else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
901 // try revoke bias
902 markOop header = lockee->klass()->prototype_header();
903 if (hash != markOopDesc::no_hash) {
904 header = header->copy_set_hash(hash);
905 }
906 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
907 if (PrintBiasedLockingStatistics) {
908 (*BiasedLocking::revoked_lock_entry_count_addr())++;
909 }
910 }
911 } else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
912 // try rebias
913 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
914 if (hash != markOopDesc::no_hash) {
915 new_header = new_header->copy_set_hash(hash);
916 }
917 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
918 if (PrintBiasedLockingStatistics) {
919 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
920 }
921 } else {
922 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
923 }
924 success = true;
925 } else {
926 // try to bias towards thread in case object is anonymously biased
927 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
928 (uintptr_t)markOopDesc::age_mask_in_place | epoch_mask_in_place));
929 if (hash != markOopDesc::no_hash) {
930 header = header->copy_set_hash(hash);
931 }
932 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
933 // debugging hint
934 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
935 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
936 if (PrintBiasedLockingStatistics) {
937 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
938 }
939 } else {
940 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
941 }
942 success = true;
943 }
944 }
945
946 // traditional lightweight locking
947 if (!success) {
948 markOop displaced = lockee->mark()->set_unlocked();
949 entry->lock()->set_displaced_header(displaced);
950 bool call_vm = UseHeavyMonitors;
951 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
952 // Is it simple recursive case?
953 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
954 entry->lock()->set_displaced_header(NULL);
955 } else {
956 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
957 }
958 }
959 }
960 UPDATE_PC_AND_TOS(1, -1);
961 goto run;
962 }
963 default: {
964 fatal("Unexpected message from frame manager");
965 }
966 }
967
968 run:
969
970 DO_UPDATE_INSTRUCTION_COUNT(*pc)
971 DEBUGGER_SINGLE_STEP_NOTIFY();
1827 uintptr_t thread_ident;
1828 uintptr_t anticipated_bias_locking_value;
1829 thread_ident = (uintptr_t)istate->thread();
1830 anticipated_bias_locking_value =
1831 (((uintptr_t)lockee->klass()->prototype_header() | thread_ident) ^ (uintptr_t)mark) &
1832 ~((uintptr_t) markOopDesc::age_mask_in_place);
1833
1834 if (anticipated_bias_locking_value == 0) {
1835 // already biased towards this thread, nothing to do
1836 if (PrintBiasedLockingStatistics) {
1837 (* BiasedLocking::biased_lock_entry_count_addr())++;
1838 }
1839 success = true;
1840 }
1841 else if ((anticipated_bias_locking_value & markOopDesc::biased_lock_mask_in_place) != 0) {
1842 // try revoke bias
1843 markOop header = lockee->klass()->prototype_header();
1844 if (hash != markOopDesc::no_hash) {
1845 header = header->copy_set_hash(hash);
1846 }
1847 if (Atomic::cmpxchg(header, lockee->mark_addr(), mark) == mark) {
1848 if (PrintBiasedLockingStatistics)
1849 (*BiasedLocking::revoked_lock_entry_count_addr())++;
1850 }
1851 }
1852 else if ((anticipated_bias_locking_value & epoch_mask_in_place) !=0) {
1853 // try rebias
1854 markOop new_header = (markOop) ( (intptr_t) lockee->klass()->prototype_header() | thread_ident);
1855 if (hash != markOopDesc::no_hash) {
1856 new_header = new_header->copy_set_hash(hash);
1857 }
1858 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), mark) == mark) {
1859 if (PrintBiasedLockingStatistics)
1860 (* BiasedLocking::rebiased_lock_entry_count_addr())++;
1861 }
1862 else {
1863 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1864 }
1865 success = true;
1866 }
1867 else {
1868 // try to bias towards thread in case object is anonymously biased
1869 markOop header = (markOop) ((uintptr_t) mark & ((uintptr_t)markOopDesc::biased_lock_mask_in_place |
1870 (uintptr_t)markOopDesc::age_mask_in_place |
1871 epoch_mask_in_place));
1872 if (hash != markOopDesc::no_hash) {
1873 header = header->copy_set_hash(hash);
1874 }
1875 markOop new_header = (markOop) ((uintptr_t) header | thread_ident);
1876 // debugging hint
1877 DEBUG_ONLY(entry->lock()->set_displaced_header((markOop) (uintptr_t) 0xdeaddead);)
1878 if (Atomic::cmpxchg(new_header, lockee->mark_addr(), header) == header) {
1879 if (PrintBiasedLockingStatistics)
1880 (* BiasedLocking::anonymously_biased_lock_entry_count_addr())++;
1881 }
1882 else {
1883 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1884 }
1885 success = true;
1886 }
1887 }
1888
1889 // traditional lightweight locking
1890 if (!success) {
1891 markOop displaced = lockee->mark()->set_unlocked();
1892 entry->lock()->set_displaced_header(displaced);
1893 bool call_vm = UseHeavyMonitors;
1894 if (call_vm || Atomic::cmpxchg((markOop)entry, lockee->mark_addr(), displaced) != displaced) {
1895 // Is it simple recursive case?
1896 if (!call_vm && THREAD->is_lock_owned((address) displaced->clear_lock_bits())) {
1897 entry->lock()->set_displaced_header(NULL);
1898 } else {
1899 CALL_VM(InterpreterRuntime::monitorenter(THREAD, entry), handle_exception);
1900 }
1901 }
1902 }
1903 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1904 } else {
1905 istate->set_msg(more_monitors);
1906 UPDATE_PC_AND_RETURN(0); // Re-execute
1907 }
1908 }
1909
1910 CASE(_monitorexit): {
1911 oop lockee = STACK_OBJECT(-1);
1912 CHECK_NULL(lockee);
1913 // derefing's lockee ought to provoke implicit null check
1914 // find our monitor slot
1915 BasicObjectLock* limit = istate->monitor_base();
1916 BasicObjectLock* most_recent = (BasicObjectLock*) istate->stack_base();
1917 while (most_recent != limit ) {
1918 if ((most_recent)->obj() == lockee) {
1919 BasicLock* lock = most_recent->lock();
1920 markOop header = lock->displaced_header();
1921 most_recent->set_obj(NULL);
1922 if (!lockee->mark()->has_bias_pattern()) {
1923 bool call_vm = UseHeavyMonitors;
1924 // If it isn't recursive we either must swap old header or call the runtime
1925 if (header != NULL || call_vm) {
1926 if (call_vm || Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) {
1927 // restore object for the slow case
1928 most_recent->set_obj(lockee);
1929 CALL_VM(InterpreterRuntime::monitorexit(THREAD, most_recent), handle_exception);
1930 }
1931 }
1932 }
1933 UPDATE_PC_AND_TOS_AND_CONTINUE(1, -1);
1934 }
1935 most_recent++;
1936 }
1937 // Need to throw illegal monitor state exception
1938 CALL_VM(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD), handle_exception);
1939 ShouldNotReachHere();
1940 }
1941
1942 /* All of the non-quick opcodes. */
1943
1944 /* -Set clobbersCpIndex true if the quickened opcode clobbers the
1945 * constant pool index in the instruction.
1946 */
2172 InstanceKlass* ik = InstanceKlass::cast(entry);
2173 if (ik->is_initialized() && ik->can_be_fastpath_allocated() ) {
2174 size_t obj_size = ik->size_helper();
2175 oop result = NULL;
2176 // If the TLAB isn't pre-zeroed then we'll have to do it
2177 bool need_zero = !ZeroTLAB;
2178 if (UseTLAB) {
2179 result = (oop) THREAD->tlab().allocate(obj_size);
2180 }
2181 // Disable non-TLAB-based fast-path, because profiling requires that all
2182 // allocations go through InterpreterRuntime::_new() if THREAD->tlab().allocate
2183 // returns NULL.
2184 #ifndef CC_INTERP_PROFILE
2185 if (result == NULL) {
2186 need_zero = true;
2187 // Try allocate in shared eden
2188 retry:
2189 HeapWord* compare_to = *Universe::heap()->top_addr();
2190 HeapWord* new_top = compare_to + obj_size;
2191 if (new_top <= *Universe::heap()->end_addr()) {
2192 if (Atomic::cmpxchg(new_top, Universe::heap()->top_addr(), compare_to) != compare_to) {
2193 goto retry;
2194 }
2195 result = (oop) compare_to;
2196 }
2197 }
2198 #endif
2199 if (result != NULL) {
2200 // Initialize object (if nonzero size and need) and then the header
2201 if (need_zero ) {
2202 HeapWord* to_zero = (HeapWord*) result + sizeof(oopDesc) / oopSize;
2203 obj_size -= sizeof(oopDesc) / oopSize;
2204 if (obj_size > 0 ) {
2205 memset(to_zero, 0, obj_size * HeapWordSize);
2206 }
2207 }
2208 if (UseBiasedLocking) {
2209 result->set_mark(ik->prototype_header());
2210 } else {
2211 result->set_mark(markOopDesc::prototype());
2212 }
2958 // correctly
2959
2960 BasicObjectLock* base = istate->monitor_base();
2961 BasicObjectLock* end = (BasicObjectLock*) istate->stack_base();
2962 bool method_unlock_needed = METHOD->is_synchronized();
2963 // We know the initial monitor was used for the method don't check that
2964 // slot in the loop
2965 if (method_unlock_needed) base--;
2966
2967 // Check all the monitors to see they are unlocked. Install exception if found to be locked.
2968 while (end < base) {
2969 oop lockee = end->obj();
2970 if (lockee != NULL) {
2971 BasicLock* lock = end->lock();
2972 markOop header = lock->displaced_header();
2973 end->set_obj(NULL);
2974
2975 if (!lockee->mark()->has_bias_pattern()) {
2976 // If it isn't recursive we either must swap old header or call the runtime
2977 if (header != NULL) {
2978 if (Atomic::cmpxchg(header, lockee->mark_addr(), lock) != lock) {
2979 // restore object for the slow case
2980 end->set_obj(lockee);
2981 {
2982 // Prevent any HandleMarkCleaner from freeing our live handles
2983 HandleMark __hm(THREAD);
2984 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, end));
2985 }
2986 }
2987 }
2988 }
2989 // One error is plenty
2990 if (illegal_state_oop() == NULL && !suppress_error) {
2991 {
2992 // Prevent any HandleMarkCleaner from freeing our live handles
2993 HandleMark __hm(THREAD);
2994 CALL_VM_NOCHECK(InterpreterRuntime::throw_illegal_monitor_state_exception(THREAD));
2995 }
2996 assert(THREAD->has_pending_exception(), "Lost our exception!");
2997 illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
2998 THREAD->clear_pending_exception();
3033 }
3034 } else if (UseHeavyMonitors) {
3035 {
3036 // Prevent any HandleMarkCleaner from freeing our live handles.
3037 HandleMark __hm(THREAD);
3038 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3039 }
3040 if (THREAD->has_pending_exception()) {
3041 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3042 THREAD->clear_pending_exception();
3043 }
3044 } else {
3045 BasicLock* lock = base->lock();
3046 markOop header = lock->displaced_header();
3047 base->set_obj(NULL);
3048
3049 if (!rcvr->mark()->has_bias_pattern()) {
3050 base->set_obj(NULL);
3051 // If it isn't recursive we either must swap old header or call the runtime
3052 if (header != NULL) {
3053 if (Atomic::cmpxchg(header, rcvr->mark_addr(), lock) != lock) {
3054 // restore object for the slow case
3055 base->set_obj(rcvr);
3056 {
3057 // Prevent any HandleMarkCleaner from freeing our live handles
3058 HandleMark __hm(THREAD);
3059 CALL_VM_NOCHECK(InterpreterRuntime::monitorexit(THREAD, base));
3060 }
3061 if (THREAD->has_pending_exception()) {
3062 if (!suppress_error) illegal_state_oop = Handle(THREAD, THREAD->pending_exception());
3063 THREAD->clear_pending_exception();
3064 }
3065 }
3066 }
3067 }
3068 }
3069 }
3070 }
3071 }
3072 // Clear the do_not_unlock flag now.
3073 THREAD->clr_do_not_unlock();
|