944 jcc(Assembler::zero, unlocked);
945
946 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
947 // is set.
948 testbool(rbx);
949 jcc(Assembler::notZero, no_unlock);
950
951 // unlock monitor
952 push(state); // save result
953
954 // BasicObjectLock will be first in list, since this is a
955 // synchronized method. However, need to check that the object has
956 // not been unlocked by an explicit monitorexit bytecode.
957 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
958 wordSize - (int) sizeof(BasicObjectLock));
959 // We use c_rarg1/rdx so that if we go slow path it will be the correct
960 // register for unlock_object to pass to VM directly
961 lea(robj, monitor); // address of first monitor
962
963 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes()));
964 testptr(rax, rax);
965 jcc(Assembler::notZero, unlock);
966
967 pop(state);
968 if (throw_monitor_exception) {
969 // Entry already unlocked, need to throw exception
970 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow
971 call_VM(noreg, CAST_FROM_FN_PTR(address,
972 InterpreterRuntime::throw_illegal_monitor_state_exception));
973 should_not_reach_here();
974 } else {
975 // Monitor already unlocked during a stack unroll. If requested,
976 // install an illegal_monitor_state_exception. Continue with
977 // stack unrolling.
978 if (install_monitor_exception) {
979 NOT_LP64(empty_FPU_stack();)
980 call_VM(noreg, CAST_FROM_FN_PTR(address,
981 InterpreterRuntime::new_illegal_monitor_state_exception));
982 }
983 jmp(unlocked);
1026 // Unlock does not block, so don't have to worry about the frame.
1027 // We don't have to preserve c_rarg1 since we are going to throw an exception.
1028
1029 push(state);
1030 mov(robj, rmon); // nop if robj and rmon are the same
1031 unlock_object(robj);
1032 pop(state);
1033
1034 if (install_monitor_exception) {
1035 NOT_LP64(empty_FPU_stack();)
1036 call_VM(noreg, CAST_FROM_FN_PTR(address,
1037 InterpreterRuntime::
1038 new_illegal_monitor_state_exception));
1039 }
1040
1041 jmp(restart);
1042 }
1043
1044 bind(loop);
1045 // check if current entry is used
1046 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1047 jcc(Assembler::notEqual, exception);
1048
1049 addptr(rmon, entry_size); // otherwise advance to next entry
1050 bind(entry);
1051 cmpptr(rmon, rbx); // check if bottom reached
1052 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1053 }
1054
1055 bind(no_unlock);
1056
1057 // jvmti support
1058 if (notify_jvmdi) {
1059 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
1060 } else {
1061 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1062 }
1063
1064 // remove activation
1065 // get sender sp
1123 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1124 lock_reg);
1125 } else {
1126 Label done;
1127
1128 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1129 const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
1130 // problematic case where tmp_reg = no_reg.
1131 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1132
1133 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1134 const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1135 const int mark_offset = lock_offset +
1136 BasicLock::displaced_header_offset_in_bytes();
1137
1138 Label slow_case;
1139
1140 // Load object pointer into obj_reg
1141 movptr(obj_reg, Address(lock_reg, obj_offset));
1142
1143 if (UseBiasedLocking) {
1144 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
1145 }
1146
1147 // Load immediate 1 into swap_reg %rax
1148 movl(swap_reg, (int32_t)1);
1149
1150 // Load (object->mark() | 1) into swap_reg %rax
1151 orptr(swap_reg, Address(obj_reg, 0));
1152
1153 // Save (object->mark() | 1) into BasicLock's displaced header
1154 movptr(Address(lock_reg, mark_offset), swap_reg);
1155
1156 assert(lock_offset == 0,
1157 "displaced header must be first word in BasicObjectLock");
1158
1159 if (os::is_MP()) lock();
1160 cmpxchgptr(lock_reg, Address(obj_reg, 0));
1161 if (PrintBiasedLockingStatistics) {
1162 cond_inc32(Assembler::zero,
1163 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1164 }
1165 jcc(Assembler::zero, done);
1166
1167 const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1168
1169 // Test if the oopMark is an obvious stack pointer, i.e.,
1170 // 1) (mark & zero_bits) == 0, and
1171 // 2) rsp <= mark < mark + os::pagesize()
1172 //
1173 // These 3 tests can be done by evaluating the following
1174 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1175 // assuming both stack pointer and pagesize have their
1176 // least significant bits clear.
1177 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
1178 subptr(swap_reg, rsp);
1217
1218 if (UseHeavyMonitors) {
1219 call_VM(noreg,
1220 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
1221 lock_reg);
1222 } else {
1223 Label done;
1224
1225 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1226 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark
1227 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1228
1229 save_bcp(); // Save in case of exception
1230
1231 // Convert from BasicObjectLock structure to object and BasicLock
1232 // structure Store the BasicLock address into %rax
1233 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
1234
1235 // Load oop into obj_reg(%c_rarg3)
1236 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
1237
1238 // Free entry
1239 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
1240
1241 if (UseBiasedLocking) {
1242 biased_locking_exit(obj_reg, header_reg, done);
1243 }
1244
1245 // Load the old header from BasicLock structure
1246 movptr(header_reg, Address(swap_reg,
1247 BasicLock::displaced_header_offset_in_bytes()));
1248
1249 // Test for recursion
1250 testptr(header_reg, header_reg);
1251
1252 // zero for recursive case
1253 jcc(Assembler::zero, done);
1254
1255 // Atomic swap back the old header
1256 if (os::is_MP()) lock();
|
944 jcc(Assembler::zero, unlocked);
945
946 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
947 // is set.
948 testbool(rbx);
949 jcc(Assembler::notZero, no_unlock);
950
951 // unlock monitor
952 push(state); // save result
953
954 // BasicObjectLock will be first in list, since this is a
955 // synchronized method. However, need to check that the object has
956 // not been unlocked by an explicit monitorexit bytecode.
957 const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset *
958 wordSize - (int) sizeof(BasicObjectLock));
959 // We use c_rarg1/rdx so that if we go slow path it will be the correct
960 // register for unlock_object to pass to VM directly
961 lea(robj, monitor); // address of first monitor
962
963 movptr(rax, Address(robj, BasicObjectLock::obj_offset_in_bytes()));
964 shenandoah_store_addr_check(rax); // Invariant
965 testptr(rax, rax);
966 jcc(Assembler::notZero, unlock);
967
968 pop(state);
969 if (throw_monitor_exception) {
970 // Entry already unlocked, need to throw exception
971 NOT_LP64(empty_FPU_stack();) // remove possible return value from FPU-stack, otherwise stack could overflow
972 call_VM(noreg, CAST_FROM_FN_PTR(address,
973 InterpreterRuntime::throw_illegal_monitor_state_exception));
974 should_not_reach_here();
975 } else {
976 // Monitor already unlocked during a stack unroll. If requested,
977 // install an illegal_monitor_state_exception. Continue with
978 // stack unrolling.
979 if (install_monitor_exception) {
980 NOT_LP64(empty_FPU_stack();)
981 call_VM(noreg, CAST_FROM_FN_PTR(address,
982 InterpreterRuntime::new_illegal_monitor_state_exception));
983 }
984 jmp(unlocked);
1027 // Unlock does not block, so don't have to worry about the frame.
1028 // We don't have to preserve c_rarg1 since we are going to throw an exception.
1029
1030 push(state);
1031 mov(robj, rmon); // nop if robj and rmon are the same
1032 unlock_object(robj);
1033 pop(state);
1034
1035 if (install_monitor_exception) {
1036 NOT_LP64(empty_FPU_stack();)
1037 call_VM(noreg, CAST_FROM_FN_PTR(address,
1038 InterpreterRuntime::
1039 new_illegal_monitor_state_exception));
1040 }
1041
1042 jmp(restart);
1043 }
1044
1045 bind(loop);
1046 // check if current entry is used
1047 shenandoah_lock_check(rmon);
1048 cmpptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
1049 jcc(Assembler::notEqual, exception);
1050
1051 addptr(rmon, entry_size); // otherwise advance to next entry
1052 bind(entry);
1053 cmpptr(rmon, rbx); // check if bottom reached
1054 jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
1055 }
1056
1057 bind(no_unlock);
1058
1059 // jvmti support
1060 if (notify_jvmdi) {
1061 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
1062 } else {
1063 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
1064 }
1065
1066 // remove activation
1067 // get sender sp
1125 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
1126 lock_reg);
1127 } else {
1128 Label done;
1129
1130 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1131 const Register tmp_reg = rbx; // Will be passed to biased_locking_enter to avoid a
1132 // problematic case where tmp_reg = no_reg.
1133 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1134
1135 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
1136 const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
1137 const int mark_offset = lock_offset +
1138 BasicLock::displaced_header_offset_in_bytes();
1139
1140 Label slow_case;
1141
1142 // Load object pointer into obj_reg
1143 movptr(obj_reg, Address(lock_reg, obj_offset));
1144
1145 shenandoah_store_addr_check(obj_reg);
1146
1147 if (UseBiasedLocking) {
1148 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case);
1149 }
1150
1151 // Load immediate 1 into swap_reg %rax
1152 movl(swap_reg, (int32_t)1);
1153
1154 // Load (object->mark() | 1) into swap_reg %rax
1155 orptr(swap_reg, Address(obj_reg, 0));
1156
1157 // Save (object->mark() | 1) into BasicLock's displaced header
1158 movptr(Address(lock_reg, mark_offset), swap_reg);
1159
1160 assert(lock_offset == 0,
1161 "displaced header must be first word in BasicObjectLock");
1162
1163 // obj_reg has been checked a few lines up.
1164 if (os::is_MP()) lock();
1165 cmpxchgptr(lock_reg, Address(obj_reg, 0));
1166 if (PrintBiasedLockingStatistics) {
1167 cond_inc32(Assembler::zero,
1168 ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
1169 }
1170 jcc(Assembler::zero, done);
1171
1172 const int zero_bits = LP64_ONLY(7) NOT_LP64(3);
1173
1174 // Test if the oopMark is an obvious stack pointer, i.e.,
1175 // 1) (mark & zero_bits) == 0, and
1176 // 2) rsp <= mark < mark + os::pagesize()
1177 //
1178 // These 3 tests can be done by evaluating the following
1179 // expression: ((mark - rsp) & (zero_bits - os::vm_page_size())),
1180 // assuming both stack pointer and pagesize have their
1181 // least significant bits clear.
1182 // NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
1183 subptr(swap_reg, rsp);
1222
1223 if (UseHeavyMonitors) {
1224 call_VM(noreg,
1225 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
1226 lock_reg);
1227 } else {
1228 Label done;
1229
1230 const Register swap_reg = rax; // Must use rax for cmpxchg instruction
1231 const Register header_reg = LP64_ONLY(c_rarg2) NOT_LP64(rbx); // Will contain the old oopMark
1232 const Register obj_reg = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // Will contain the oop
1233
1234 save_bcp(); // Save in case of exception
1235
1236 // Convert from BasicObjectLock structure to object and BasicLock
1237 // structure Store the BasicLock address into %rax
1238 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
1239
1240 // Load oop into obj_reg(%c_rarg3)
1241 movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
1242 shenandoah_store_addr_check(obj_reg); // Invariant
1243
1244 // Free entry
1245 movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
1246
1247 if (UseBiasedLocking) {
1248 biased_locking_exit(obj_reg, header_reg, done);
1249 }
1250
1251 // Load the old header from BasicLock structure
1252 movptr(header_reg, Address(swap_reg,
1253 BasicLock::displaced_header_offset_in_bytes()));
1254
1255 // Test for recursion
1256 testptr(header_reg, header_reg);
1257
1258 // zero for recursive case
1259 jcc(Assembler::zero, done);
1260
1261 // Atomic swap back the old header
1262 if (os::is_MP()) lock();
|