314 FUNCTION_CASE(entry, StubRoutines::dexp());
315 FUNCTION_CASE(entry, StubRoutines::dlog());
316 FUNCTION_CASE(entry, StubRoutines::dlog10());
317 FUNCTION_CASE(entry, StubRoutines::dpow());
318 FUNCTION_CASE(entry, StubRoutines::dsin());
319 FUNCTION_CASE(entry, StubRoutines::dcos());
320 FUNCTION_CASE(entry, StubRoutines::dtan());
321
322 #undef FUNCTION_CASE
323
324 // Soft float adds more runtime names.
325 return pd_name_for_address(entry);
326 }
327
328
329 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
330 NOT_PRODUCT(_new_instance_slowcase_cnt++;)
331
332 assert(klass->is_klass(), "not a class");
333 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
334 instanceKlassHandle h(thread, klass);
335 h->check_valid_for_instantiation(true, CHECK);
336 // make sure klass is initialized
337 h->initialize(CHECK);
338 // allocate instance and return via TLS
339 oop obj = h->allocate_instance(CHECK);
340 thread->set_vm_result(obj);
341 JRT_END
342
343
344 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
345 NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
346 // Note: no handle for klass needed since they are not used
347 // anymore after new_typeArray() and no GC can happen before.
348 // (This may have to change if this code changes!)
349 assert(klass->is_klass(), "not a class");
350 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
351 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
352 thread->set_vm_result(obj);
353 // This is pretty rare but this runtime patch is stressful to deoptimization
354 // if we deoptimize here so force a deopt to stress the path.
840 frame runtime_frame = thread->last_frame();
841 frame caller_frame = runtime_frame.sender(®_map);
842
843 // last java frame on stack
844 vframeStream vfst(thread, true);
845 assert(!vfst.at_end(), "Java frame must exist");
846
847 methodHandle caller_method(THREAD, vfst.method());
848 // Note that caller_method->code() may not be same as caller_code because of OSR's
849 // Note also that in the presence of inlining it is not guaranteed
850 // that caller_method() == caller_code->method()
851
852 int bci = vfst.bci();
853 Bytecodes::Code code = caller_method()->java_code_at(bci);
854
855 // this is used by assertions in the access_field_patching_id
856 BasicType patch_field_type = T_ILLEGAL;
857 bool deoptimize_for_volatile = false;
858 bool deoptimize_for_atomic = false;
859 int patch_field_offset = -1;
860 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
861 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
862 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
863 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
864 bool load_klass_or_mirror_patch_id =
865 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
866
867 if (stub_id == Runtime1::access_field_patching_id) {
868
869 Bytecode_field field_access(caller_method, bci);
870 fieldDescriptor result; // initialize class if needed
871 Bytecodes::Code code = field_access.code();
872 constantPoolHandle constants(THREAD, caller_method->constants());
873 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
874 patch_field_offset = result.offset();
875
876 // If we're patching a field which is volatile then at compile it
877 // must not have been know to be volatile, so the generated code
878 // isn't correct for a volatile reference. The nmethod has to be
879 // deoptimized so that the code can be regenerated correctly.
880 // This check is only needed for access_field_patching since this
881 // is the path for patching field offsets. load_klass is only
888 // the generated code is not correct either, force deoptimizing.
889 // We need to only cover T_LONG and T_DOUBLE fields, as we can
890 // break access atomicity only for them.
891
892 // Strictly speaking, the deoptimizaation on 64-bit platforms
893 // is unnecessary, and T_LONG stores on 32-bit platforms need
894 // to be handled by special patching code when AlwaysAtomicAccesses
895 // becomes product feature. At this point, we are still going
896 // for the deoptimization for consistency against volatile
897 // accesses.
898
899 patch_field_type = result.field_type();
900 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
901
902 } else if (load_klass_or_mirror_patch_id) {
903 Klass* k = NULL;
904 switch (code) {
905 case Bytecodes::_putstatic:
906 case Bytecodes::_getstatic:
907 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
908 init_klass = KlassHandle(THREAD, klass);
909 mirror = Handle(THREAD, klass->java_mirror());
910 }
911 break;
912 case Bytecodes::_new:
913 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
914 k = caller_method->constants()->klass_at(bnew.index(), CHECK);
915 }
916 break;
917 case Bytecodes::_multianewarray:
918 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
919 k = caller_method->constants()->klass_at(mna.index(), CHECK);
920 }
921 break;
922 case Bytecodes::_instanceof:
923 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
924 k = caller_method->constants()->klass_at(io.index(), CHECK);
925 }
926 break;
927 case Bytecodes::_checkcast:
928 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
929 k = caller_method->constants()->klass_at(cc.index(), CHECK);
930 }
931 break;
932 case Bytecodes::_anewarray:
933 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
934 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
935 k = ek->array_klass(CHECK);
936 }
937 break;
938 case Bytecodes::_ldc:
939 case Bytecodes::_ldc_w:
940 {
941 Bytecode_loadconstant cc(caller_method, bci);
942 oop m = cc.resolve_constant(CHECK);
943 mirror = Handle(THREAD, m);
944 }
945 break;
946 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
947 }
948 // convert to handle
949 load_klass = KlassHandle(THREAD, k);
950 } else if (stub_id == load_appendix_patching_id) {
951 Bytecode_invoke bytecode(caller_method, bci);
952 Bytecodes::Code bc = bytecode.invoke_code();
953
954 CallInfo info;
955 constantPoolHandle pool(thread, caller_method->constants());
956 int index = bytecode.index();
957 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
958 switch (bc) {
959 case Bytecodes::_invokehandle: {
960 int cache_index = ConstantPool::decode_cpcache_index(index, true);
961 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
962 ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
963 cpce->set_method_handle(pool, info);
964 appendix = Handle(THREAD, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
965 break;
966 }
967 case Bytecodes::_invokedynamic: {
968 ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
969 cpce->set_dynamic_call(pool, info);
1050
1051 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1052 }
1053 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1054 bool do_patch = true;
1055 if (stub_id == Runtime1::access_field_patching_id) {
1056 // The offset may not be correct if the class was not loaded at code generation time.
1057 // Set it now.
1058 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1059 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1060 assert(patch_field_offset >= 0, "illegal offset");
1061 n_move->add_offset_in_bytes(patch_field_offset);
1062 } else if (load_klass_or_mirror_patch_id) {
1063 // If a getstatic or putstatic is referencing a klass which
1064 // isn't fully initialized, the patch body isn't copied into
1065 // place until initialization is complete. In this case the
1066 // patch site is setup so that any threads besides the
1067 // initializing thread are forced to come into the VM and
1068 // block.
1069 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1070 InstanceKlass::cast(init_klass())->is_initialized();
1071 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1072 if (jump->jump_destination() == being_initialized_entry) {
1073 assert(do_patch == true, "initialization must be complete at this point");
1074 } else {
1075 // patch the instruction <move reg, klass>
1076 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1077
1078 assert(n_copy->data() == 0 ||
1079 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1080 "illegal init value");
1081 if (stub_id == Runtime1::load_klass_patching_id) {
1082 assert(load_klass() != NULL, "klass not set");
1083 n_copy->set_data((intx) (load_klass()));
1084 } else {
1085 assert(mirror() != NULL, "klass not set");
1086 // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1087 n_copy->set_data(cast_from_oop<intx>(mirror()));
1088 }
1089
1090 if (TracePatching) {
1091 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1092 }
1093 }
1094 } else if (stub_id == Runtime1::load_appendix_patching_id) {
1095 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1096 assert(n_copy->data() == 0 ||
1097 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1098 "illegal init value");
1099 n_copy->set_data(cast_from_oop<intx>(appendix()));
1100
1101 if (TracePatching) {
1102 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1103 }
1114 // have the right value. On intel the value is kept
1115 // directly in the instruction instead of in the metadata
1116 // table, so set_data above effectively updated the value.
1117 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1118 assert(nm != NULL, "invalid nmethod_pc");
1119 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1120 bool found = false;
1121 while (mds.next() && !found) {
1122 if (mds.type() == relocInfo::oop_type) {
1123 assert(stub_id == Runtime1::load_mirror_patching_id ||
1124 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1125 oop_Relocation* r = mds.oop_reloc();
1126 oop* oop_adr = r->oop_addr();
1127 *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1128 r->fix_oop_relocation();
1129 found = true;
1130 } else if (mds.type() == relocInfo::metadata_type) {
1131 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1132 metadata_Relocation* r = mds.metadata_reloc();
1133 Metadata** metadata_adr = r->metadata_addr();
1134 *metadata_adr = load_klass();
1135 r->fix_metadata_relocation();
1136 found = true;
1137 }
1138 }
1139 assert(found, "the metadata must exist!");
1140 }
1141 #endif
1142 if (do_patch) {
1143 // replace instructions
1144 // first replace the tail, then the call
1145 #ifdef ARM
1146 if((load_klass_or_mirror_patch_id ||
1147 stub_id == Runtime1::load_appendix_patching_id) &&
1148 nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1149 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1150 address addr = NULL;
1151 assert(nm != NULL, "invalid nmethod_pc");
1152 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1153 while (mds.next()) {
1154 if (mds.type() == relocInfo::oop_type) {
|
314 FUNCTION_CASE(entry, StubRoutines::dexp());
315 FUNCTION_CASE(entry, StubRoutines::dlog());
316 FUNCTION_CASE(entry, StubRoutines::dlog10());
317 FUNCTION_CASE(entry, StubRoutines::dpow());
318 FUNCTION_CASE(entry, StubRoutines::dsin());
319 FUNCTION_CASE(entry, StubRoutines::dcos());
320 FUNCTION_CASE(entry, StubRoutines::dtan());
321
322 #undef FUNCTION_CASE
323
324 // Soft float adds more runtime names.
325 return pd_name_for_address(entry);
326 }
327
328
329 JRT_ENTRY(void, Runtime1::new_instance(JavaThread* thread, Klass* klass))
330 NOT_PRODUCT(_new_instance_slowcase_cnt++;)
331
332 assert(klass->is_klass(), "not a class");
333 Handle holder(THREAD, klass->klass_holder()); // keep the klass alive
334 InstanceKlass* h = InstanceKlass::cast(klass);
335 h->check_valid_for_instantiation(true, CHECK);
336 // make sure klass is initialized
337 h->initialize(CHECK);
338 // allocate instance and return via TLS
339 oop obj = h->allocate_instance(CHECK);
340 thread->set_vm_result(obj);
341 JRT_END
342
343
344 JRT_ENTRY(void, Runtime1::new_type_array(JavaThread* thread, Klass* klass, jint length))
345 NOT_PRODUCT(_new_type_array_slowcase_cnt++;)
346 // Note: no handle for klass needed since they are not used
347 // anymore after new_typeArray() and no GC can happen before.
348 // (This may have to change if this code changes!)
349 assert(klass->is_klass(), "not a class");
350 BasicType elt_type = TypeArrayKlass::cast(klass)->element_type();
351 oop obj = oopFactory::new_typeArray(elt_type, length, CHECK);
352 thread->set_vm_result(obj);
353 // This is pretty rare but this runtime patch is stressful to deoptimization
354 // if we deoptimize here so force a deopt to stress the path.
840 frame runtime_frame = thread->last_frame();
841 frame caller_frame = runtime_frame.sender(®_map);
842
843 // last java frame on stack
844 vframeStream vfst(thread, true);
845 assert(!vfst.at_end(), "Java frame must exist");
846
847 methodHandle caller_method(THREAD, vfst.method());
848 // Note that caller_method->code() may not be same as caller_code because of OSR's
849 // Note also that in the presence of inlining it is not guaranteed
850 // that caller_method() == caller_code->method()
851
852 int bci = vfst.bci();
853 Bytecodes::Code code = caller_method()->java_code_at(bci);
854
855 // this is used by assertions in the access_field_patching_id
856 BasicType patch_field_type = T_ILLEGAL;
857 bool deoptimize_for_volatile = false;
858 bool deoptimize_for_atomic = false;
859 int patch_field_offset = -1;
860 Klass* init_klass = NULL; // klass needed by load_klass_patching code
861 Klass* load_klass = NULL; // klass needed by load_klass_patching code
862 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code
863 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
864 bool load_klass_or_mirror_patch_id =
865 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
866
867 if (stub_id == Runtime1::access_field_patching_id) {
868
869 Bytecode_field field_access(caller_method, bci);
870 fieldDescriptor result; // initialize class if needed
871 Bytecodes::Code code = field_access.code();
872 constantPoolHandle constants(THREAD, caller_method->constants());
873 LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
874 patch_field_offset = result.offset();
875
876 // If we're patching a field which is volatile then at compile it
877 // must not have been know to be volatile, so the generated code
878 // isn't correct for a volatile reference. The nmethod has to be
879 // deoptimized so that the code can be regenerated correctly.
880 // This check is only needed for access_field_patching since this
881 // is the path for patching field offsets. load_klass is only
888 // the generated code is not correct either, force deoptimizing.
889 // We need to only cover T_LONG and T_DOUBLE fields, as we can
890 // break access atomicity only for them.
891
892 // Strictly speaking, the deoptimizaation on 64-bit platforms
893 // is unnecessary, and T_LONG stores on 32-bit platforms need
894 // to be handled by special patching code when AlwaysAtomicAccesses
895 // becomes product feature. At this point, we are still going
896 // for the deoptimization for consistency against volatile
897 // accesses.
898
899 patch_field_type = result.field_type();
900 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
901
902 } else if (load_klass_or_mirror_patch_id) {
903 Klass* k = NULL;
904 switch (code) {
905 case Bytecodes::_putstatic:
906 case Bytecodes::_getstatic:
907 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
908 init_klass = klass;
909 mirror = Handle(THREAD, klass->java_mirror());
910 }
911 break;
912 case Bytecodes::_new:
913 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
914 k = caller_method->constants()->klass_at(bnew.index(), CHECK);
915 }
916 break;
917 case Bytecodes::_multianewarray:
918 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
919 k = caller_method->constants()->klass_at(mna.index(), CHECK);
920 }
921 break;
922 case Bytecodes::_instanceof:
923 { Bytecode_instanceof io(caller_method(), caller_method->bcp_from(bci));
924 k = caller_method->constants()->klass_at(io.index(), CHECK);
925 }
926 break;
927 case Bytecodes::_checkcast:
928 { Bytecode_checkcast cc(caller_method(), caller_method->bcp_from(bci));
929 k = caller_method->constants()->klass_at(cc.index(), CHECK);
930 }
931 break;
932 case Bytecodes::_anewarray:
933 { Bytecode_anewarray anew(caller_method(), caller_method->bcp_from(bci));
934 Klass* ek = caller_method->constants()->klass_at(anew.index(), CHECK);
935 k = ek->array_klass(CHECK);
936 }
937 break;
938 case Bytecodes::_ldc:
939 case Bytecodes::_ldc_w:
940 {
941 Bytecode_loadconstant cc(caller_method, bci);
942 oop m = cc.resolve_constant(CHECK);
943 mirror = Handle(THREAD, m);
944 }
945 break;
946 default: fatal("unexpected bytecode for load_klass_or_mirror_patch_id");
947 }
948 load_klass = k;
949 } else if (stub_id == load_appendix_patching_id) {
950 Bytecode_invoke bytecode(caller_method, bci);
951 Bytecodes::Code bc = bytecode.invoke_code();
952
953 CallInfo info;
954 constantPoolHandle pool(thread, caller_method->constants());
955 int index = bytecode.index();
956 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
957 switch (bc) {
958 case Bytecodes::_invokehandle: {
959 int cache_index = ConstantPool::decode_cpcache_index(index, true);
960 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
961 ConstantPoolCacheEntry* cpce = pool->cache()->entry_at(cache_index);
962 cpce->set_method_handle(pool, info);
963 appendix = Handle(THREAD, cpce->appendix_if_resolved(pool)); // just in case somebody already resolved the entry
964 break;
965 }
966 case Bytecodes::_invokedynamic: {
967 ConstantPoolCacheEntry* cpce = pool->invokedynamic_cp_cache_entry_at(index);
968 cpce->set_dynamic_call(pool, info);
1049
1050 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1051 }
1052 // depending on the code below, do_patch says whether to copy the patch body back into the nmethod
1053 bool do_patch = true;
1054 if (stub_id == Runtime1::access_field_patching_id) {
1055 // The offset may not be correct if the class was not loaded at code generation time.
1056 // Set it now.
1057 NativeMovRegMem* n_move = nativeMovRegMem_at(copy_buff);
1058 assert(n_move->offset() == 0 || (n_move->offset() == 4 && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)), "illegal offset for type");
1059 assert(patch_field_offset >= 0, "illegal offset");
1060 n_move->add_offset_in_bytes(patch_field_offset);
1061 } else if (load_klass_or_mirror_patch_id) {
1062 // If a getstatic or putstatic is referencing a klass which
1063 // isn't fully initialized, the patch body isn't copied into
1064 // place until initialization is complete. In this case the
1065 // patch site is setup so that any threads besides the
1066 // initializing thread are forced to come into the VM and
1067 // block.
1068 do_patch = (code != Bytecodes::_getstatic && code != Bytecodes::_putstatic) ||
1069 InstanceKlass::cast(init_klass)->is_initialized();
1070 NativeGeneralJump* jump = nativeGeneralJump_at(instr_pc);
1071 if (jump->jump_destination() == being_initialized_entry) {
1072 assert(do_patch == true, "initialization must be complete at this point");
1073 } else {
1074 // patch the instruction <move reg, klass>
1075 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1076
1077 assert(n_copy->data() == 0 ||
1078 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1079 "illegal init value");
1080 if (stub_id == Runtime1::load_klass_patching_id) {
1081 assert(load_klass != NULL, "klass not set");
1082 n_copy->set_data((intx) (load_klass));
1083 } else {
1084 assert(mirror() != NULL, "klass not set");
1085 // Don't need a G1 pre-barrier here since we assert above that data isn't an oop.
1086 n_copy->set_data(cast_from_oop<intx>(mirror()));
1087 }
1088
1089 if (TracePatching) {
1090 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1091 }
1092 }
1093 } else if (stub_id == Runtime1::load_appendix_patching_id) {
1094 NativeMovConstReg* n_copy = nativeMovConstReg_at(copy_buff);
1095 assert(n_copy->data() == 0 ||
1096 n_copy->data() == (intptr_t)Universe::non_oop_word(),
1097 "illegal init value");
1098 n_copy->set_data(cast_from_oop<intx>(appendix()));
1099
1100 if (TracePatching) {
1101 Disassembler::decode(copy_buff, copy_buff + *byte_count, tty);
1102 }
1113 // have the right value. On intel the value is kept
1114 // directly in the instruction instead of in the metadata
1115 // table, so set_data above effectively updated the value.
1116 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1117 assert(nm != NULL, "invalid nmethod_pc");
1118 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1119 bool found = false;
1120 while (mds.next() && !found) {
1121 if (mds.type() == relocInfo::oop_type) {
1122 assert(stub_id == Runtime1::load_mirror_patching_id ||
1123 stub_id == Runtime1::load_appendix_patching_id, "wrong stub id");
1124 oop_Relocation* r = mds.oop_reloc();
1125 oop* oop_adr = r->oop_addr();
1126 *oop_adr = stub_id == Runtime1::load_mirror_patching_id ? mirror() : appendix();
1127 r->fix_oop_relocation();
1128 found = true;
1129 } else if (mds.type() == relocInfo::metadata_type) {
1130 assert(stub_id == Runtime1::load_klass_patching_id, "wrong stub id");
1131 metadata_Relocation* r = mds.metadata_reloc();
1132 Metadata** metadata_adr = r->metadata_addr();
1133 *metadata_adr = load_klass;
1134 r->fix_metadata_relocation();
1135 found = true;
1136 }
1137 }
1138 assert(found, "the metadata must exist!");
1139 }
1140 #endif
1141 if (do_patch) {
1142 // replace instructions
1143 // first replace the tail, then the call
1144 #ifdef ARM
1145 if((load_klass_or_mirror_patch_id ||
1146 stub_id == Runtime1::load_appendix_patching_id) &&
1147 nativeMovConstReg_at(copy_buff)->is_pc_relative()) {
1148 nmethod* nm = CodeCache::find_nmethod(instr_pc);
1149 address addr = NULL;
1150 assert(nm != NULL, "invalid nmethod_pc");
1151 RelocIterator mds(nm, copy_buff, copy_buff + 1);
1152 while (mds.next()) {
1153 if (mds.type() == relocInfo::oop_type) {
|