src/share/vm/c1/c1_Runtime1.cpp

Print this page

        

*** 807,821 **** // that caller_method() == caller_code->method() int bci = vfst.bci(); Bytecodes::Code code = caller_method()->java_code_at(bci); - #ifndef PRODUCT // this is used by assertions in the access_field_patching_id BasicType patch_field_type = T_ILLEGAL; - #endif // PRODUCT bool deoptimize_for_volatile = false; int patch_field_offset = -1; KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code Handle appendix(THREAD, NULL); // oop needed by appendix_patching code --- 807,820 ---- // that caller_method() == caller_code->method() int bci = vfst.bci(); Bytecodes::Code code = caller_method()->java_code_at(bci); // this is used by assertions in the access_field_patching_id BasicType patch_field_type = T_ILLEGAL; bool deoptimize_for_volatile = false; + bool deoptimize_for_atomic = false; int patch_field_offset = -1; KlassHandle init_klass(THREAD, NULL); // klass needed by load_klass_patching code KlassHandle load_klass(THREAD, NULL); // klass needed by load_klass_patching code Handle mirror(THREAD, NULL); // oop needed by load_mirror_patching code Handle appendix(THREAD, NULL); // oop needed by appendix_patching code
*** 837,851 **** // deoptimized so that the code can be regenerated correctly. // This check is only needed for access_field_patching since this // is the path for patching field offsets. load_klass is only // used for patching references to oops which don't need special // handling in the volatile case. deoptimize_for_volatile = result.access_flags().is_volatile(); ! #ifndef PRODUCT patch_field_type = result.field_type(); ! #endif } else if (load_klass_or_mirror_patch_id) { Klass* k = NULL; switch (code) { case Bytecodes::_putstatic: case Bytecodes::_getstatic: --- 836,863 ---- // deoptimized so that the code can be regenerated correctly. // This check is only needed for access_field_patching since this // is the path for patching field offsets. load_klass is only // used for patching references to oops which don't need special // handling in the volatile case. + deoptimize_for_volatile = result.access_flags().is_volatile(); ! // If we are patching a field which should be atomic, then ! // the generated code is not correct either, force deoptimizing. ! // We need to only cover T_LONG and T_DOUBLE fields, as we can ! // break access atomicity only for them. ! ! // Strictly speaking, the deoptimizaation on 64-bit platforms ! // is unnecessary, and T_LONG stores on 32-bit platforms need ! // to be handled by special patching code when AlwaysAtomicAccesses ! // becomes product feature. At this point, we are still going ! // for the deoptimization for consistency against volatile ! // accesses. ! patch_field_type = result.field_type(); ! deoptimize_for_atomic = (AlwaysAtomicAccesses && (patch_field_type == T_DOUBLE || patch_field_type == T_LONG)); ! } else if (load_klass_or_mirror_patch_id) { Klass* k = NULL; switch (code) { case Bytecodes::_putstatic: case Bytecodes::_getstatic:
*** 916,932 **** } } else { ShouldNotReachHere(); } ! if (deoptimize_for_volatile) { ! // At compile time we assumed the field wasn't volatile but after ! // loading it turns out it was volatile so we have to throw the // compiled code out and let it be regenerated. if (TracePatching) { tty->print_cr("Deoptimizing for patching volatile field reference"); } // It's possible the nmethod was invalidated in the last // safepoint, but if it's still alive then make it not_entrant. nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); if (nm != NULL) { nm->make_not_entrant(); --- 928,950 ---- } } else { ShouldNotReachHere(); } ! if (deoptimize_for_volatile || deoptimize_for_atomic) { ! // At compile time we assumed the field wasn't volatile/atomic but after ! // loading it turns out it was volatile/atomic so we have to throw the // compiled code out and let it be regenerated. if (TracePatching) { + if (deoptimize_for_volatile) { tty->print_cr("Deoptimizing for patching volatile field reference"); } + if (deoptimize_for_atomic) { + tty->print_cr("Deoptimizing for patching atomic field reference"); + } + } + // It's possible the nmethod was invalidated in the last // safepoint, but if it's still alive then make it not_entrant. nmethod* nm = CodeCache::find_nmethod(caller_frame.pc()); if (nm != NULL) { nm->make_not_entrant();