792 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id )) 793 NOT_PRODUCT(_patch_code_slowcase_cnt++;) 794 795 ResourceMark rm(thread); 796 RegisterMap reg_map(thread, false); 797 frame runtime_frame = thread->last_frame(); 798 frame caller_frame = runtime_frame.sender(®_map); 799 800 // last java frame on stack 801 vframeStream vfst(thread, true); 802 assert(!vfst.at_end(), "Java frame must exist"); 803 804 methodHandle caller_method(THREAD, vfst.method()); 805 // Note that caller_method->code() may not be same as caller_code because of OSR's 806 // Note also that in the presence of inlining it is not guaranteed 807 // that caller_method() == caller_code->method() 808 809 int bci = vfst.bci(); 810 Bytecodes::Code code = caller_method()->java_code_at(bci); 811 812 #ifndef PRODUCT 813 // this is used by assertions in the access_field_patching_id 814 BasicType patch_field_type = T_ILLEGAL; 815 #endif // PRODUCT 816 bool deoptimize_for_volatile = false; 817 int patch_field_offset = -1; 818 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 819 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 820 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 821 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code 822 bool load_klass_or_mirror_patch_id = 823 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 824 825 if (stub_id == Runtime1::access_field_patching_id) { 826 827 Bytecode_field field_access(caller_method, bci); 828 fieldDescriptor result; // initialize class if needed 829 Bytecodes::Code code = field_access.code(); 830 constantPoolHandle constants(THREAD, caller_method->constants()); 831 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK); 832 patch_field_offset = result.offset(); 833 834 // If we're patching a field which is volatile then at compile it 835 // must not have been know to be volatile, so the generated code 836 // isn't correct for a volatile reference. The nmethod has to be 837 // deoptimized so that the code can be regenerated correctly. 838 // This check is only needed for access_field_patching since this 839 // is the path for patching field offsets. load_klass is only 840 // used for patching references to oops which don't need special 841 // handling in the volatile case. 842 deoptimize_for_volatile = result.access_flags().is_volatile(); 843 844 #ifndef PRODUCT 845 patch_field_type = result.field_type(); 846 #endif 847 } else if (load_klass_or_mirror_patch_id) { 848 Klass* k = NULL; 849 switch (code) { 850 case Bytecodes::_putstatic: 851 case Bytecodes::_getstatic: 852 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); 853 init_klass = KlassHandle(THREAD, klass); 854 mirror = Handle(THREAD, klass->java_mirror()); 855 } 856 break; 857 case Bytecodes::_new: 858 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); 859 k = caller_method->constants()->klass_at(bnew.index(), CHECK); 860 } 861 break; 862 case Bytecodes::_multianewarray: 863 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); 864 k = caller_method->constants()->klass_at(mna.index(), CHECK); 865 } 866 break; 901 int index = bytecode.index(); 902 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); 903 appendix = info.resolved_appendix(); 904 switch (bc) { 905 case Bytecodes::_invokehandle: { 906 int cache_index = ConstantPool::decode_cpcache_index(index, true); 907 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index"); 908 pool->cache()->entry_at(cache_index)->set_method_handle(pool, info); 909 break; 910 } 911 case Bytecodes::_invokedynamic: { 912 pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info); 913 break; 914 } 915 default: fatal("unexpected bytecode for load_appendix_patching_id"); 916 } 917 } else { 918 ShouldNotReachHere(); 919 } 920 921 if (deoptimize_for_volatile) { 922 // At compile time we assumed the field wasn't volatile but after 923 // loading it turns out it was volatile so we have to throw the 924 // compiled code out and let it be regenerated. 925 if (TracePatching) { 926 tty->print_cr("Deoptimizing for patching volatile field reference"); 927 } 928 // It's possible the nmethod was invalidated in the last 929 // safepoint, but if it's still alive then make it not_entrant. 930 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 931 if (nm != NULL) { 932 nm->make_not_entrant(); 933 } 934 935 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 936 937 // Return to the now deoptimized frame. 938 } 939 940 // Now copy code back 941 942 { 943 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag); 944 // 945 // Deoptimization may have happened while we waited for the lock. 946 // In that case we don't bother to do any patching we just return 947 // and let the deopt happen | 792 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id )) 793 NOT_PRODUCT(_patch_code_slowcase_cnt++;) 794 795 ResourceMark rm(thread); 796 RegisterMap reg_map(thread, false); 797 frame runtime_frame = thread->last_frame(); 798 frame caller_frame = runtime_frame.sender(®_map); 799 800 // last java frame on stack 801 vframeStream vfst(thread, true); 802 assert(!vfst.at_end(), "Java frame must exist"); 803 804 methodHandle caller_method(THREAD, vfst.method()); 805 // Note that caller_method->code() may not be same as caller_code because of OSR's 806 // Note also that in the presence of inlining it is not guaranteed 807 // that caller_method() == caller_code->method() 808 809 int bci = vfst.bci(); 810 Bytecodes::Code code = caller_method()->java_code_at(bci); 811 812 // this is used by assertions in the access_field_patching_id 813 BasicType patch_field_type = T_ILLEGAL; 814 bool deoptimize_for_volatile = false; 815 bool deoptimize_for_atomic = false; 816 int patch_field_offset = -1; 817 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 818 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 819 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 820 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code 821 bool load_klass_or_mirror_patch_id = 822 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 823 824 if (stub_id == Runtime1::access_field_patching_id) { 825 826 Bytecode_field field_access(caller_method, bci); 827 fieldDescriptor result; // initialize class if needed 828 Bytecodes::Code code = field_access.code(); 829 constantPoolHandle constants(THREAD, caller_method->constants()); 830 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK); 831 patch_field_offset = result.offset(); 832 833 // If we're patching a field which is volatile then at compile it 834 // must not have been know to be volatile, so the generated code 835 // isn't correct for a volatile reference. The nmethod has to be 836 // deoptimized so that the code can be regenerated correctly. 837 // This check is only needed for access_field_patching since this 838 // is the path for patching field offsets. load_klass is only 839 // used for patching references to oops which don't need special 840 // handling in the volatile case. 841 842 deoptimize_for_volatile = result.access_flags().is_volatile(); 843 844 // If we are patching a field which should be atomic, then 845 // the generated code is not correct either, force deoptimizing. 846 // We need to only cover T_LONG and T_DOUBLE fields, as we can 847 // break access atomicity only for them. 848 849 // Strictly speaking, the deoptimizaation on 64-bit platforms 850 // is unnecessary, and T_LONG stores on 32-bit platforms need 851 // to be handled by special patching code when AlwaysAtomicAccesses 852 // becomes product feature. At this point, we are still going 853 // for the deoptimization for consistency against volatile 854 // accesses. 855 856 patch_field_type = result.field_type(); 857 deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)); 858 859 } else if (load_klass_or_mirror_patch_id) { 860 Klass* k = NULL; 861 switch (code) { 862 case Bytecodes::_putstatic: 863 case Bytecodes::_getstatic: 864 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); 865 init_klass = KlassHandle(THREAD, klass); 866 mirror = Handle(THREAD, klass->java_mirror()); 867 } 868 break; 869 case Bytecodes::_new: 870 { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci)); 871 k = caller_method->constants()->klass_at(bnew.index(), CHECK); 872 } 873 break; 874 case Bytecodes::_multianewarray: 875 { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci)); 876 k = caller_method->constants()->klass_at(mna.index(), CHECK); 877 } 878 break; 913 int index = bytecode.index(); 914 LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK); 915 appendix = info.resolved_appendix(); 916 switch (bc) { 917 case Bytecodes::_invokehandle: { 918 int cache_index = ConstantPool::decode_cpcache_index(index, true); 919 assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index"); 920 pool->cache()->entry_at(cache_index)->set_method_handle(pool, info); 921 break; 922 } 923 case Bytecodes::_invokedynamic: { 924 pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info); 925 break; 926 } 927 default: fatal("unexpected bytecode for load_appendix_patching_id"); 928 } 929 } else { 930 ShouldNotReachHere(); 931 } 932 933 if (deoptimize_for_volatile || deoptimize_for_atomic) { 934 // At compile time we assumed the field wasn't volatile/atomic but after 935 // loading it turns out it was volatile/atomic so we have to throw the 936 // compiled code out and let it be regenerated. 937 if (TracePatching) { 938 if (deoptimize_for_volatile) { 939 tty->print_cr("Deoptimizing for patching volatile field reference"); 940 } 941 if (deoptimize_for_atomic) { 942 tty->print_cr("Deoptimizing for patching atomic field reference"); 943 } 944 } 945 946 // It's possible the nmethod was invalidated in the last 947 // safepoint, but if it's still alive then make it not_entrant. 948 nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); 949 if (nm != NULL) { 950 nm->make_not_entrant(); 951 } 952 953 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 954 955 // Return to the now deoptimized frame. 956 } 957 958 // Now copy code back 959 960 { 961 MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag); 962 // 963 // Deoptimization may have happened while we waited for the lock. 964 // In that case we don't bother to do any patching we just return 965 // and let the deopt happen |