src/share/vm/c1/c1_Runtime1.cpp

Print this page




 792 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
 793   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 794 
 795   ResourceMark rm(thread);
 796   RegisterMap reg_map(thread, false);
 797   frame runtime_frame = thread->last_frame();
 798   frame caller_frame = runtime_frame.sender(&reg_map);
 799 
 800   // last java frame on stack
 801   vframeStream vfst(thread, true);
 802   assert(!vfst.at_end(), "Java frame must exist");
 803 
 804   methodHandle caller_method(THREAD, vfst.method());
 805   // Note that caller_method->code() may not be same as caller_code because of OSR's
 806   // Note also that in the presence of inlining it is not guaranteed
 807   // that caller_method() == caller_code->method()
 808 
 809   int bci = vfst.bci();
 810   Bytecodes::Code code = caller_method()->java_code_at(bci);
 811 
 812 #ifndef PRODUCT
 813   // this is used by assertions in the access_field_patching_id
 814   BasicType patch_field_type = T_ILLEGAL;
 815 #endif // PRODUCT
 816   bool deoptimize_for_volatile = false;

 817   int patch_field_offset = -1;
 818   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
 819   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
 820   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
 821   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
 822   bool load_klass_or_mirror_patch_id =
 823     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 824 
 825   if (stub_id == Runtime1::access_field_patching_id) {
 826 
 827     Bytecode_field field_access(caller_method, bci);
 828     fieldDescriptor result; // initialize class if needed
 829     Bytecodes::Code code = field_access.code();
 830     constantPoolHandle constants(THREAD, caller_method->constants());
 831     LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
 832     patch_field_offset = result.offset();
 833 
 834     // If we're patching a field which is volatile then at compile it
 835     // must not have been know to be volatile, so the generated code
 836     // isn't correct for a volatile reference.  The nmethod has to be
 837     // deoptimized so that the code can be regenerated correctly.
 838     // This check is only needed for access_field_patching since this
 839     // is the path for patching field offsets.  load_klass is only
 840     // used for patching references to oops which don't need special
 841     // handling in the volatile case.

 842     deoptimize_for_volatile = result.access_flags().is_volatile();
 843 
 844 #ifndef PRODUCT




 845     patch_field_type = result.field_type();
 846 #endif

 847   } else if (load_klass_or_mirror_patch_id) {
 848     Klass* k = NULL;
 849     switch (code) {
 850       case Bytecodes::_putstatic:
 851       case Bytecodes::_getstatic:
 852         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
 853           init_klass = KlassHandle(THREAD, klass);
 854           mirror = Handle(THREAD, klass->java_mirror());
 855         }
 856         break;
 857       case Bytecodes::_new:
 858         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
 859           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
 860         }
 861         break;
 862       case Bytecodes::_multianewarray:
 863         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
 864           k = caller_method->constants()->klass_at(mna.index(), CHECK);
 865         }
 866         break;


 901     int index = bytecode.index();
 902     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
 903     appendix = info.resolved_appendix();
 904     switch (bc) {
 905       case Bytecodes::_invokehandle: {
 906         int cache_index = ConstantPool::decode_cpcache_index(index, true);
 907         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
 908         pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
 909         break;
 910       }
 911       case Bytecodes::_invokedynamic: {
 912         pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
 913         break;
 914       }
 915       default: fatal("unexpected bytecode for load_appendix_patching_id");
 916     }
 917   } else {
 918     ShouldNotReachHere();
 919   }
 920 
 921   if (deoptimize_for_volatile) {
 922     // At compile time we assumed the field wasn't volatile but after
 923     // loading it turns out it was volatile so we have to throw the
 924     // compiled code out and let it be regenerated.
 925     if (TracePatching) {

 926       tty->print_cr("Deoptimizing for patching volatile field reference");
 927     }





 928     // It's possible the nmethod was invalidated in the last
 929     // safepoint, but if it's still alive then make it not_entrant.
 930     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
 931     if (nm != NULL) {
 932       nm->make_not_entrant();
 933     }
 934 
 935     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 936 
 937     // Return to the now deoptimized frame.
 938   }
 939 
 940   // Now copy code back
 941 
 942   {
 943     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 944     //
 945     // Deoptimization may have happened while we waited for the lock.
 946     // In that case we don't bother to do any patching we just return
 947     // and let the deopt happen




 792 JRT_ENTRY(void, Runtime1::patch_code(JavaThread* thread, Runtime1::StubID stub_id ))
 793   NOT_PRODUCT(_patch_code_slowcase_cnt++;)
 794 
 795   ResourceMark rm(thread);
 796   RegisterMap reg_map(thread, false);
 797   frame runtime_frame = thread->last_frame();
 798   frame caller_frame = runtime_frame.sender(&reg_map);
 799 
 800   // last java frame on stack
 801   vframeStream vfst(thread, true);
 802   assert(!vfst.at_end(), "Java frame must exist");
 803 
 804   methodHandle caller_method(THREAD, vfst.method());
 805   // Note that caller_method->code() may not be same as caller_code because of OSR's
 806   // Note also that in the presence of inlining it is not guaranteed
 807   // that caller_method() == caller_code->method()
 808 
 809   int bci = vfst.bci();
 810   Bytecodes::Code code = caller_method()->java_code_at(bci);
 811 

 812   // this is used by assertions in the access_field_patching_id
 813   BasicType patch_field_type = T_ILLEGAL;

 814   bool deoptimize_for_volatile = false;
 815   bool deoptimize_for_atomic = false;
 816   int patch_field_offset = -1;
 817   KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code
 818   KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code
 819   Handle mirror(THREAD, NULL);                    // oop needed by load_mirror_patching code
 820   Handle appendix(THREAD, NULL);                  // oop needed by appendix_patching code
 821   bool load_klass_or_mirror_patch_id =
 822     (stub_id == Runtime1::load_klass_patching_id || stub_id == Runtime1::load_mirror_patching_id);
 823 
 824   if (stub_id == Runtime1::access_field_patching_id) {
 825 
 826     Bytecode_field field_access(caller_method, bci);
 827     fieldDescriptor result; // initialize class if needed
 828     Bytecodes::Code code = field_access.code();
 829     constantPoolHandle constants(THREAD, caller_method->constants());
 830     LinkResolver::resolve_field_access(result, constants, field_access.index(), Bytecodes::java_code(code), CHECK);
 831     patch_field_offset = result.offset();
 832 
 833     // If we're patching a field which is volatile then at compile it
 834     // must not have been know to be volatile, so the generated code
 835     // isn't correct for a volatile reference.  The nmethod has to be
 836     // deoptimized so that the code can be regenerated correctly.
 837     // This check is only needed for access_field_patching since this
 838     // is the path for patching field offsets.  load_klass is only
 839     // used for patching references to oops which don't need special
 840     // handling in the volatile case.
 841 
 842     deoptimize_for_volatile = result.access_flags().is_volatile();
 843 
 844     // If we are patching a field which should be atomic, then
 845     // the generated code is not correct either, force deoptimizing.
 846     // We need to only cover T_LONG and T_DOUBLE fields, as we can
 847     // break access atomicity only for them.
 848 
 849     patch_field_type = result.field_type();
 850     deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG));
 851 
 852   } else if (load_klass_or_mirror_patch_id) {
 853     Klass* k = NULL;
 854     switch (code) {
 855       case Bytecodes::_putstatic:
 856       case Bytecodes::_getstatic:
 857         { Klass* klass = resolve_field_return_klass(caller_method, bci, CHECK);
 858           init_klass = KlassHandle(THREAD, klass);
 859           mirror = Handle(THREAD, klass->java_mirror());
 860         }
 861         break;
 862       case Bytecodes::_new:
 863         { Bytecode_new bnew(caller_method(), caller_method->bcp_from(bci));
 864           k = caller_method->constants()->klass_at(bnew.index(), CHECK);
 865         }
 866         break;
 867       case Bytecodes::_multianewarray:
 868         { Bytecode_multianewarray mna(caller_method(), caller_method->bcp_from(bci));
 869           k = caller_method->constants()->klass_at(mna.index(), CHECK);
 870         }
 871         break;


 906     int index = bytecode.index();
 907     LinkResolver::resolve_invoke(info, Handle(), pool, index, bc, CHECK);
 908     appendix = info.resolved_appendix();
 909     switch (bc) {
 910       case Bytecodes::_invokehandle: {
 911         int cache_index = ConstantPool::decode_cpcache_index(index, true);
 912         assert(cache_index >= 0 && cache_index < pool->cache()->length(), "unexpected cache index");
 913         pool->cache()->entry_at(cache_index)->set_method_handle(pool, info);
 914         break;
 915       }
 916       case Bytecodes::_invokedynamic: {
 917         pool->invokedynamic_cp_cache_entry_at(index)->set_dynamic_call(pool, info);
 918         break;
 919       }
 920       default: fatal("unexpected bytecode for load_appendix_patching_id");
 921     }
 922   } else {
 923     ShouldNotReachHere();
 924   }
 925 
 926   if (deoptimize_for_volatile || deoptimize_for_atomic) {
 927     // At compile time we assumed the field wasn't volatile/atomic but after
 928     // loading it turns out it was volatile/atomic so we have to throw the
 929     // compiled code out and let it be regenerated.
 930     if (TracePatching) {
 931       if (deoptimize_for_volatile) {
 932         tty->print_cr("Deoptimizing for patching volatile field reference");
 933       }
 934       if (deoptimize_for_atomic) {
 935         tty->print_cr("Deoptimizing for patching atomic field reference");
 936       }
 937     }
 938 
 939     // It's possible the nmethod was invalidated in the last
 940     // safepoint, but if it's still alive then make it not_entrant.
 941     nmethod* nm = CodeCache::find_nmethod(caller_frame.pc());
 942     if (nm != NULL) {
 943       nm->make_not_entrant();
 944     }
 945 
 946     Deoptimization::deoptimize_frame(thread, caller_frame.id());
 947 
 948     // Return to the now deoptimized frame.
 949   }
 950 
 951   // Now copy code back
 952 
 953   {
 954     MutexLockerEx ml_patch (Patching_lock, Mutex::_no_safepoint_check_flag);
 955     //
 956     // Deoptimization may have happened while we waited for the lock.
 957     // In that case we don't bother to do any patching we just return
 958     // and let the deopt happen