src/share/vm/c1/c1_Runtime1.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File 8073191-work Sdiff src/share/vm/c1

src/share/vm/c1/c1_Runtime1.cpp

Print this page




 745       }
 746     }
 747   }
 748 
 749   // Deoptimize the caller frame.
 750   Deoptimization::deoptimize_frame(thread, caller_frame.id());
 751   // Return to the now deoptimized frame.
 752 JRT_END
 753 
 754 
 755 #ifndef DEOPTIMIZE_WHEN_PATCHING
 756 
 757 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
 758   Bytecode_field field_access(caller, bci);
 759   // This can be static or non-static field access
 760   Bytecodes::Code code       = field_access.code();
 761 
 762   // We must load class, initialize class and resolvethe field
 763   fieldDescriptor result; // initialize class if needed
 764   constantPoolHandle constants(THREAD, caller->constants());
 765   LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK_NULL);
 766   return result.field_holder();
 767 }
 768 
 769 
 770 //
 771 // This routine patches sites where a class wasn't loaded or
 772 // initialized at the time the code was generated.  It handles
 773 // references to classes, fields and forcing of initialization.  Most
 774 // of the cases are straightforward and involving simply forcing
 775 // resolution of a class, rewriting the instruction stream with the
 776 // needed constant and replacing the call in this function with the
 777 // patched code.  The case for static field is more complicated since
 778 // the thread which is in the process of initializing a class can
 779 // access it's static fields but other threads can't so the code
 780 // either has to deoptimize when this case is detected or execute a
 781 // check that the current thread is the initializing thread.  The
 782 // current
 783 //
 784 // Patches basically look like this:
 785 //


 862   Bytecodes::Code code = caller_method()->java_code_at(bci);
 863 
 864   // this is used by assertions in the access_field_patching_id
 865   BasicType patch_field_type = T_ILLEGAL;
 866   bool deoptimize_for_volatile = false;
 867   bool deoptimize_for_atomic = false;
 868   int patch_field_offset = -1;
 869   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
 870   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
 871   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
 872   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
 873   bool load_klass_or_mirror_patch_id =
 874     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 875 
 876   if (stub_id == Runtime1::access_field_patching_id) {
 877 
 878     Bytecode_field field_access(caller_method, bci);
 879     fieldDescriptor result; // initialize class if needed
 880     Bytecodes::Code code = field_access.code();
 881     constantPoolHandle constants(THREAD, caller_method->constants());
 882     LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
 883     patch_field_offset = result.offset();
 884 
 885     // If we're patching a field which is volatile then at compile it
 886     // must not have been know to be volatile, so the generated code
 887     // isn't correct for a volatile reference.  The nmethod has to be
 888     // deoptimized so that the code can be regenerated correctly.
 889     // This check is only needed for access_field_patching since this
 890     // is the path for patching field offsets.  load_klass is only
 891     // used for patching references to oops which don't need special
 892     // handling in the volatile case.
 893 
 894     deoptimize_for_volatile = result.access_flags().is_volatile();
 895 
 896     // If we are patching a field which should be atomic, then
 897     // the generated code is not correct either, force deoptimizing.
 898     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 899     // break access atomicity only for them.
 900 
 901     // Strictly speaking, the deoptimizaation on 64-bit platforms
 902     // is unnecessary, and T_LONG stores on 32-bit platforms need




 745       }
 746     }
 747   }
 748 
 749   // Deoptimize the caller frame.
 750   Deoptimization::deoptimize_frame(thread, caller_frame.id());
 751   // Return to the now deoptimized frame.
 752 JRT_END
 753 
 754 
 755 #ifndef DEOPTIMIZE_WHEN_PATCHING
 756 
 757 static Klass* resolve_field_return_klass(methodHandle caller, int bci, TRAPS) {
 758   Bytecode_field field_access(caller, bci);
 759   // This can be static or non-static field access
 760   Bytecodes::Code code       = field_access.code();
 761 
 762   // We must load class, initialize class and resolvethe field
 763   fieldDescriptor result; // initialize class if needed
 764   constantPoolHandle constants(THREAD, caller->constants());
 765   LinkResolver::resolve_field_access(result, constants, field_access.index(), caller, Bytecodes::java_code(code), CHECK_NULL);
 766   return result.field_holder();
 767 }
 768 
 769 
 770 //
 771 // This routine patches sites where a class wasn't loaded or
 772 // initialized at the time the code was generated.  It handles
 773 // references to classes, fields and forcing of initialization.  Most
 774 // of the cases are straightforward and involving simply forcing
 775 // resolution of a class, rewriting the instruction stream with the
 776 // needed constant and replacing the call in this function with the
 777 // patched code.  The case for static field is more complicated since
 778 // the thread which is in the process of initializing a class can
 779 // access it's static fields but other threads can't so the code
 780 // either has to deoptimize when this case is detected or execute a
 781 // check that the current thread is the initializing thread.  The
 782 // current
 783 //
 784 // Patches basically look like this:
 785 //


 862   Bytecodes::Code code = caller_method()->java_code_at(bci);
 863 
 864   // this is used by assertions in the access_field_patching_id
 865   BasicType patch_field_type = T_ILLEGAL;
 866   bool deoptimize_for_volatile = false;
 867   bool deoptimize_for_atomic = false;
 868   int patch_field_offset = -1;
 869   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
 870   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
 871   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
 872   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
 873   bool load_klass_or_mirror_patch_id =
 874     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 875 
 876   if (stub_id == Runtime1::access_field_patching_id) {
 877 
 878     Bytecode_field field_access(caller_method, bci);
 879     fieldDescriptor result; // initialize class if needed
 880     Bytecodes::Code code = field_access.code();
 881     constantPoolHandle constants(THREAD, caller_method->constants());
 882     LinkResolver::resolve_field_access(result, constants, field_access.index(), caller_method, Bytecodes::java_code(code), CHECK);
 883     patch_field_offset = result.offset();
 884 
 885     // If we're patching a field which is volatile then at compile it
 886     // must not have been know to be volatile, so the generated code
 887     // isn't correct for a volatile reference.  The nmethod has to be
 888     // deoptimized so that the code can be regenerated correctly.
 889     // This check is only needed for access_field_patching since this
 890     // is the path for patching field offsets.  load_klass is only
 891     // used for patching references to oops which don't need special
 892     // handling in the volatile case.
 893 
 894     deoptimize_for_volatile = result.access_flags().is_volatile();
 895 
 896     // If we are patching a field which should be atomic, then
 897     // the generated code is not correct either, force deoptimizing.
 898     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 899     // break access atomicity only for them.
 900 
 901     // Strictly speaking, the deoptimizaation on 64-bit platforms
 902     // is unnecessary, and T_LONG stores on 32-bit platforms need


src/share/vm/c1/c1_Runtime1.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File