692 frame stub_frame = thread->last_frame(); 693 assert(stub_frame.is_runtime_frame(), "sanity check"); 694 frame caller_frame = stub_frame.sender(®_map); 695 696 // We are coming from a compiled method; check this is true. 697 assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity"); 698 699 // Deoptimize the caller frame. 700 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 701 702 // Return to the now deoptimized frame. 703 JRT_END 704 705 706 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { 707 Bytecode_field field_access(caller, bci); 708 // This can be static or non-static field access 709 Bytecodes::Code code = field_access.code(); 710 711 // We must load class, initialize class and resolvethe field 712 FieldAccessInfo result; // initialize class if needed 713 constantPoolHandle constants(THREAD, caller->constants()); 714 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK_NULL); 715 return result.klass()(); 716 } 717 718 719 // 720 // This routine patches sites where a class wasn't loaded or 721 // initialized at the time the code was generated. It handles 722 // references to classes, fields and forcing of initialization. Most 723 // of the cases are straightforward and involving simply forcing 724 // resolution of a class, rewriting the instruction stream with the 725 // needed constant and replacing the call in this function with the 726 // patched code. The case for static field is more complicated since 727 // the thread which is in the process of initializing a class can 728 // access it's static fields but other threads can't so the code 729 // either has to deoptimize when this case is detected or execute a 730 // check that the current thread is the initializing thread. The 731 // current 732 // 733 // Patches basically look like this: 734 // 735 // 809 810 int bci = vfst.bci(); 811 Bytecodes::Code code = caller_method()->java_code_at(bci); 812 813 #ifndef PRODUCT 814 // this is used by assertions in the access_field_patching_id 815 BasicType patch_field_type = T_ILLEGAL; 816 #endif // PRODUCT 817 bool deoptimize_for_volatile = false; 818 int patch_field_offset = -1; 819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 822 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code 823 bool load_klass_or_mirror_patch_id = 824 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 825 826 if (stub_id == Runtime1::access_field_patching_id) { 827 828 Bytecode_field field_access(caller_method, bci); 829 FieldAccessInfo result; // initialize class if needed 830 Bytecodes::Code code = field_access.code(); 831 constantPoolHandle constants(THREAD, caller_method->constants()); 832 LinkResolver::resolve_field(result, constants, field_access.index(), Bytecodes::java_code(code), false, CHECK); 833 patch_field_offset = result.field_offset(); 834 835 // If we're patching a field which is volatile then at compile it 836 // must not have been know to be volatile, so the generated code 837 // isn't correct for a volatile reference. The nmethod has to be 838 // deoptimized so that the code can be regenerated correctly. 839 // This check is only needed for access_field_patching since this 840 // is the path for patching field offsets. load_klass is only 841 // used for patching references to oops which don't need special 842 // handling in the volatile case. 843 deoptimize_for_volatile = result.access_flags().is_volatile(); 844 845 #ifndef PRODUCT 846 patch_field_type = result.field_type(); 847 #endif 848 } else if (load_klass_or_mirror_patch_id) { 849 Klass* k = NULL; 850 switch (code) { 851 case Bytecodes::_putstatic: 852 case Bytecodes::_getstatic: 853 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); | 692 frame stub_frame = thread->last_frame(); 693 assert(stub_frame.is_runtime_frame(), "sanity check"); 694 frame caller_frame = stub_frame.sender(®_map); 695 696 // We are coming from a compiled method; check this is true. 697 assert(CodeCache::find_nmethod(caller_frame.pc()) != NULL, "sanity"); 698 699 // Deoptimize the caller frame. 700 Deoptimization::deoptimize_frame(thread, caller_frame.id()); 701 702 // Return to the now deoptimized frame. 703 JRT_END 704 705 706 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) { 707 Bytecode_field field_access(caller, bci); 708 // This can be static or non-static field access 709 Bytecodes::Code code = field_access.code(); 710 711 // We must load class, initialize class and resolvethe field 712 fieldDescriptor result; // initialize class if needed 713 constantPoolHandle constants(THREAD, caller->constants()); 714 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL); 715 return result.field_holder(); 716 } 717 718 719 // 720 // This routine patches sites where a class wasn't loaded or 721 // initialized at the time the code was generated. It handles 722 // references to classes, fields and forcing of initialization. Most 723 // of the cases are straightforward and involving simply forcing 724 // resolution of a class, rewriting the instruction stream with the 725 // needed constant and replacing the call in this function with the 726 // patched code. The case for static field is more complicated since 727 // the thread which is in the process of initializing a class can 728 // access it's static fields but other threads can't so the code 729 // either has to deoptimize when this case is detected or execute a 730 // check that the current thread is the initializing thread. The 731 // current 732 // 733 // Patches basically look like this: 734 // 735 // 809 810 int bci = vfst.bci(); 811 Bytecodes::Code code = caller_method()->java_code_at(bci); 812 813 #ifndef PRODUCT 814 // this is used by assertions in the access_field_patching_id 815 BasicType patch_field_type = T_ILLEGAL; 816 #endif // PRODUCT 817 bool deoptimize_for_volatile = false; 818 int patch_field_offset = -1; 819 KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code 820 KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code 821 Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code 822 Handle appendix(THREAD, NULL); // oop needed by appendix_patching code 823 bool load_klass_or_mirror_patch_id = 824 (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id); 825 826 if (stub_id == Runtime1::access_field_patching_id) { 827 828 Bytecode_field field_access(caller_method, bci); 829 fieldDescriptor result; // initialize class if needed 830 Bytecodes::Code code = field_access.code(); 831 constantPoolHandle constants(THREAD, caller_method->constants()); 832 LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK); 833 patch_field_offset = result.offset(); 834 835 // If we're patching a field which is volatile then at compile it 836 // must not have been know to be volatile, so the generated code 837 // isn't correct for a volatile reference. The nmethod has to be 838 // deoptimized so that the code can be regenerated correctly. 839 // This check is only needed for access_field_patching since this 840 // is the path for patching field offsets. load_klass is only 841 // used for patching references to oops which don't need special 842 // handling in the volatile case. 843 deoptimize_for_volatile = result.access_flags().is_volatile(); 844 845 #ifndef PRODUCT 846 patch_field_type = result.field_type(); 847 #endif 848 } else if (load_klass_or_mirror_patch_id) { 849 Klass* k = NULL; 850 switch (code) { 851 case Bytecodes::_putstatic: 852 case Bytecodes::_getstatic: 853 { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK); |