897
898 if (deoptimize_for_volatile) {
899 // At compile time we assumed the field wasn't volatile but after
900 // loading it turns out it was volatile so we have to throw the
901 // compiled code out and let it be regenerated.
902 if (TracePatching) {
903 tty->print_cr("Deoptimizing for patching volatile field reference");
904 }
905 // It's possible the nmethod was invalidated in the last
906 // safepoint, but if it's still alive then make it not_entrant.
907 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
908 if (nm != NULL) {
909 nm->make_not_entrant();
910 }
911
912 Deoptimization::deoptimize_frame(thread, caller_frame.id());
913
914 // Return to the now deoptimized frame.
915 }
916
917 // If we are patching in a non-perm oop, make sure the nmethod
918 // is on the right list.
919 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
920 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
921 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
922 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
923 if (!nm->on_scavenge_root_list())
924 CodeCache::add_scavenge_root_nmethod(nm);
925 }
926
927 // Now copy code back
928
929 {
930 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
931 //
932 // Deoptimization may have happened while we waited for the lock.
933 // In that case we don't bother to do any patching we just return
934 // and let the deopt happen
935 if (!caller_is_deopted()) {
936 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
937 address instr_pc = jump->jump_destination();
938 NativeInstruction* ni = nativeInstruction_at(instr_pc);
939 if (ni->is_jump() ) {
940 // the jump has not been patched yet
941 // The jump destination is slow case and therefore not part of the stubs
942 // (stubs are only for StaticCalls)
943
944 // format of buffer
945 // ....
946 // instr byte 0 <-- copy_buff
1107 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1108 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1109 relocInfo::none, rtype);
1110 #endif
1111 #ifdef PPC
1112 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1113 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1114 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1115 relocInfo::none, rtype);
1116 }
1117 #endif
1118 }
1119
1120 } else {
1121 ICache::invalidate_range(copy_buff, *byte_count);
1122 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1123 }
1124 }
1125 }
1126 }
1127 JRT_END
1128
1129 //
1130 // Entry point for compiled code. We want to patch a nmethod.
1131 // We don't do a normal VM transition here because we want to
1132 // know after the patching is complete and any safepoint(s) are taken
1133 // if the calling nmethod was deoptimized. We do this by calling a
1134 // helper method which does the normal VM transition and when it
1135 // completes we can check for deoptimization. This simplifies the
1136 // assembly code in the cpu directories.
1137 //
1138 int Runtime1::move_klass_patching(JavaThread* thread) {
1139 //
1140 // NOTE: we are still in Java
1141 //
1142 Thread* THREAD = thread;
1143 debug_only(NoHandleMark nhm;)
1144 {
1145 // Enter VM mode
1146
|
897
898 if (deoptimize_for_volatile) {
899 // At compile time we assumed the field wasn't volatile but after
900 // loading it turns out it was volatile so we have to throw the
901 // compiled code out and let it be regenerated.
902 if (TracePatching) {
903 tty->print_cr("Deoptimizing for patching volatile field reference");
904 }
905 // It's possible the nmethod was invalidated in the last
906 // safepoint, but if it's still alive then make it not_entrant.
907 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
908 if (nm != NULL) {
909 nm->make_not_entrant();
910 }
911
912 Deoptimization::deoptimize_frame(thread, caller_frame.id());
913
914 // Return to the now deoptimized frame.
915 }
916
917 // Now copy code back
918
919 {
920 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
921 //
922 // Deoptimization may have happened while we waited for the lock.
923 // In that case we don't bother to do any patching we just return
924 // and let the deopt happen
925 if (!caller_is_deopted()) {
926 NativeGeneralJump* jump = nativeGeneralJump_at(caller_frame.pc());
927 address instr_pc = jump->jump_destination();
928 NativeInstruction* ni = nativeInstruction_at(instr_pc);
929 if (ni->is_jump() ) {
930 // the jump has not been patched yet
931 // The jump destination is slow case and therefore not part of the stubs
932 // (stubs are only for StaticCalls)
933
934 // format of buffer
935 // ....
936 // instr byte 0 <-- copy_buff
1097 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1098 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1099 relocInfo::none, rtype);
1100 #endif
1101 #ifdef PPC
1102 { address instr_pc2 = instr_pc + NativeMovConstReg::lo_offset;
1103 RelocIterator iter2(nm, instr_pc2, instr_pc2 + 1);
1104 relocInfo::change_reloc_info_for_address(&iter2, (address) instr_pc2,
1105 relocInfo::none, rtype);
1106 }
1107 #endif
1108 }
1109
1110 } else {
1111 ICache::invalidate_range(copy_buff, *byte_count);
1112 NativeGeneralJump::insert_unconditional(instr_pc, being_initialized_entry);
1113 }
1114 }
1115 }
1116 }
1117
1118 // If we are patching in a non-perm oop, make sure the nmethod
1119 // is on the right list.
1120 if (ScavengeRootsInCode && mirror.not_null() && mirror()->is_scavengable()) {
1121 MutexLockerEx ml_code (CodeCache_lock, Mutex::_no_safepoint_check_flag);
1122 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
1123 guarantee(nm != NULL, "only nmethods can contain non-perm oops");
1124 if (!nm->on_scavenge_root_list()) {
1125 CodeCache::add_scavenge_root_nmethod(nm);
1126 }
1127
1128 // Since we've patched some oops in the nmethod,
1129 // (re)register it with the heap.
1130 Universe::heap()->register_nmethod(nm);
1131 }
1132 JRT_END
1133
1134 //
1135 // Entry point for compiled code. We want to patch a nmethod.
1136 // We don't do a normal VM transition here because we want to
1137 // know after the patching is complete and any safepoint(s) are taken
1138 // if the calling nmethod was deoptimized. We do this by calling a
1139 // helper method which does the normal VM transition and when it
1140 // completes we can check for deoptimization. This simplifies the
1141 // assembly code in the cpu directories.
1142 //
1143 int Runtime1::move_klass_patching(JavaThread* thread) {
1144 //
1145 // NOTE: we are still in Java
1146 //
1147 Thread* THREAD = thread;
1148 debug_only(NoHandleMark nhm;)
1149 {
1150 // Enter VM mode
1151
|