--- old/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp 2015-06-27 04:11:00.000000000 +0300 +++ new/src/cpu/aarch64/vm/c1_LIRGenerator_aarch64.cpp 2015-06-27 04:11:00.000000000 +0300 @@ -1334,6 +1334,17 @@ } } +LIR_Opr LIRGenerator::unpack_offset(LIR_Opr src, LIR_Opr off) { + LIR_Opr tmp = new_register(T_LONG); + LabelObj* Lcont = new LabelObj(); + __ move(off, tmp); + __ cmp(lir_cond_equal, src, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_OBJECT, Lcont->label()); + __ shift_right(tmp, Unsafe::offset_shift, tmp); + __ branch_destination(Lcont->label()); + return tmp; +} + void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { BasicType type = x->basic_type(); LIRItem src(x->object(), this); --- old/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp 2015-06-27 04:11:01.000000000 +0300 +++ new/src/cpu/aarch64/vm/jniFastGetField_aarch64.cpp 2015-06-27 04:11:01.000000000 +0300 @@ -29,6 +29,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/safepoint.hpp" +#include "runtime/jfieldIDWorkaround.hpp" #define __ masm-> @@ -82,7 +83,7 @@ // robj ^ rcounter ^ rcounter == robj // robj is address dependent on rcounter. __ ldr(robj, Address(robj, 0)); // *obj - __ lsr(roffset, c_rarg2, 2); // offset + __ lsr(roffset, c_rarg2, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); // Used by the segfault handler --- old/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp 2015-06-27 04:11:01.000000000 +0300 +++ new/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp 2015-06-27 04:11:01.000000000 +0300 @@ -1204,6 +1204,17 @@ } } +LIR_Opr LIRGenerator::unpack_offset(LIR_Opr src, LIR_Opr off) { + LIR_Opr tmp = new_register(T_LONG); + LabelObj* Lcont = new LabelObj(); + __ move(off, tmp); + __ cmp(lir_cond_equal, src, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_OBJECT, Lcont->label()); + __ shift_right(tmp, Unsafe::offset_shift, tmp); + __ branch_destination(Lcont->label()); + return tmp; +} + void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { BasicType type = x->basic_type(); LIRItem src(x->object(), this); --- old/src/cpu/sparc/vm/jniFastGetField_sparc.cpp 2015-06-27 04:11:02.000000000 +0300 +++ new/src/cpu/sparc/vm/jniFastGetField_sparc.cpp 2015-06-27 04:11:01.000000000 +0300 @@ -28,6 +28,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/safepoint.hpp" +#include "runtime/jfieldIDWorkaround.hpp" // TSO ensures that loads are blocking and ordered with respect to // to earlier loads, so we don't need LoadLoad membars. @@ -40,7 +41,7 @@ // O0: env // O1: obj // O2: jfieldID -// O4: offset (O2 >> 2) +// O4: offset (O2 >> 3) // G4: old safepoint counter address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) { @@ -67,7 +68,7 @@ __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); - __ delayed()->srl (O2, 2, O4); + __ delayed()->srl (O2, jfieldIDWorkaround::offset_shift, O4); __ ld_ptr (O1, 0, O5); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); @@ -146,7 +147,7 @@ __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); - __ delayed()->srl (O2, 2, O4); + __ delayed()->srl (O2, jfieldIDWorkaround::offset_shift, O4); __ ld_ptr (O1, 0, O5); __ add (O5, O4, O5); @@ -218,7 +219,7 @@ __ ld (cnt_addr, G4); __ andcc (G4, 1, G0); __ br (Assembler::notZero, false, Assembler::pn, label1); - __ delayed()->srl (O2, 2, O4); + __ delayed()->srl (O2, jfieldIDWorkaround::offset_shift, O4); __ ld_ptr (O1, 0, O5); assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); --- old/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2015-06-27 04:11:02.000000000 +0300 +++ new/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp 2015-06-27 04:11:02.000000000 +0300 @@ -33,6 +33,7 @@ #include "ci/ciArray.hpp" #include "ci/ciObjArrayKlass.hpp" #include "ci/ciTypeArrayKlass.hpp" +#include "prims/unsafe.hpp" #include "runtime/sharedRuntime.hpp" #include "runtime/stubRoutines.hpp" #include "vmreg_x86.inline.hpp" @@ -719,7 +720,7 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) { assert(x->number_of_arguments() == 4, "wrong type"); LIRItem obj (x->argument_at(0), this); // object - LIRItem offset(x->argument_at(1), this); // offset of field + LIRItem off (x->argument_at(1), this); // offset of field LIRItem cmp (x->argument_at(2), this); // value to compare with field LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp @@ -733,7 +734,9 @@ // get address of field obj.load_item(); - offset.load_nonconstant(); + off.load_nonconstant(); + + LIR_Opr offset = unpack_offset(obj.result(), off.result()); if (type == objectType) { cmp.load_item_force(FrameMap::rax_oop_opr); @@ -750,28 +753,28 @@ LIR_Opr addr = new_pointer_register(); LIR_Address* a; - if(offset.result()->is_constant()) { + if(offset->is_constant()) { #ifdef _LP64 - jlong c = offset.result()->as_jlong(); + jlong c = offset->as_jlong(); if ((jlong)((jint)c) == c) { a = new LIR_Address(obj.result(), (jint)c, as_BasicType(type)); } else { LIR_Opr tmp = new_register(T_LONG); - __ move(offset.result(), tmp); + __ move(offset, tmp); a = new LIR_Address(obj.result(), tmp, as_BasicType(type)); } #else a = new LIR_Address(obj.result(), - offset.result()->as_jint(), + offset->as_jint(), as_BasicType(type)); #endif } else { a = new LIR_Address(obj.result(), - offset.result(), + offset, LIR_Address::times_1, 0, as_BasicType(type)); @@ -1433,6 +1436,17 @@ } } +LIR_Opr LIRGenerator::unpack_offset(LIR_Opr src, LIR_Opr off) { + LIR_Opr tmp = new_register(NOT_LP64(T_INT) LP64_ONLY(T_LONG)); + LabelObj* Lcont = new LabelObj(); + __ move(off, tmp); + __ cmp(lir_cond_equal, src, LIR_OprFact::oopConst(NULL)); + __ branch(lir_cond_equal, T_OBJECT, Lcont->label()); + __ shift_right(tmp, Unsafe::offset_shift, tmp); + __ branch_destination(Lcont->label()); + return tmp; +} + void LIRGenerator::do_UnsafeGetAndSetObject(UnsafeGetAndSetObject* x) { BasicType type = x->basic_type(); LIRItem src(x->object(), this); @@ -1443,10 +1457,11 @@ value.load_item(); off.load_nonconstant(); + LIR_Opr offset = unpack_offset(src.result(), off.result()); + LIR_Opr dst = rlock_result(x, type); LIR_Opr data = value.result(); bool is_obj = (type == T_ARRAY || type == T_OBJECT); - LIR_Opr offset = off.result(); assert (type == T_INT || (!x->is_add() && is_obj) LP64_ONLY( || type == T_LONG ), "unexpected type"); LIR_Address* addr; --- old/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2015-06-27 04:11:03.000000000 +0300 +++ new/src/cpu/x86/vm/jniFastGetField_x86_32.cpp 2015-06-27 04:11:02.000000000 +0300 @@ -28,6 +28,7 @@ #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" #include "runtime/safepoint.hpp" +#include "runtime/jfieldIDWorkaround.hpp" #define __ masm-> @@ -86,7 +87,7 @@ } __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj - __ shrptr (rax, 2); // offset + __ shrptr (rax, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); @@ -203,7 +204,7 @@ } __ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj - __ shrptr(rsi, 2); // offset + __ shrptr(rsi, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small"); speculative_load_pclist[count++] = __ pc(); @@ -292,7 +293,7 @@ } __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID __ movptr(rdx, Address(rdx, 0)); // *obj - __ shrptr(rax, 2); // offset + __ shrptr(rax, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); --- old/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2015-06-27 04:11:03.000000000 +0300 +++ new/src/cpu/x86/vm/jniFastGetField_x86_64.cpp 2015-06-27 04:11:03.000000000 +0300 @@ -27,6 +27,7 @@ #include "memory/resourceArea.hpp" #include "prims/jniFastGetField.hpp" #include "prims/jvm_misc.hpp" +#include "runtime/jfieldIDWorkaround.hpp" #include "runtime/safepoint.hpp" #define __ masm-> @@ -82,7 +83,7 @@ } __ movptr(robj, Address(robj, 0)); // *obj __ mov (roffset, c_rarg2); - __ shrptr(roffset, 2); // offset + __ shrptr(roffset, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); @@ -180,7 +181,7 @@ } __ movptr(robj, Address(robj, 0)); // *obj __ mov (roffset, c_rarg2); - __ shrptr(roffset, 2); // offset + __ shrptr(roffset, jfieldIDWorkaround::offset_shift); // offset assert(count < LIST_CAPACITY, "LIST_CAPACITY too small"); speculative_load_pclist[count] = __ pc(); --- old/src/share/vm/c1/c1_GraphBuilder.cpp 2015-06-27 04:11:04.000000000 +0300 +++ new/src/share/vm/c1/c1_GraphBuilder.cpp 2015-06-27 04:11:03.000000000 +0300 @@ -1578,6 +1578,10 @@ // Stable static fields are checked for non-default values in ciField::initialize_from(). } if (constant != NULL) { + // Don't bother with static fields yet. +// if (!field->is_stable()) { +// dependency_recorder()->assert_constant_field_value_klass(field, field->holder()); +// } push(type, append(constant)); } else { if (state_before == NULL) { @@ -1621,6 +1625,9 @@ // Stable field with default value can't be constant. constant = NULL; } + if (TrustFinalNonStaticFields && !field->is_stable()) { + dependency_recorder()->assert_constant_field_value_instance(field, const_oop); + } } else { // For CallSite objects treat the target field as a compile time constant. if (const_oop->is_call_site()) { --- old/src/share/vm/c1/c1_LIRGenerator.cpp 2015-06-27 04:11:05.000000000 +0300 +++ new/src/share/vm/c1/c1_LIRGenerator.cpp 2015-06-27 04:11:04.000000000 +0300 @@ -2222,9 +2222,10 @@ off.load_item(); src.load_item(); + LIR_Opr offset = unpack_offset(src.result(), off.result()); LIR_Opr value = rlock_result(x, x->basic_type()); - get_Object_unsafe(value, src.result(), off.result(), type, x->is_volatile()); + get_Object_unsafe(value, src.result(), offset, type, x->is_volatile()); #if INCLUDE_ALL_GCS // We might be reading the value of the referent field of a @@ -2248,7 +2249,7 @@ bool gen_source_check = true; // Assume we need to check the src object for null. bool gen_type_check = true; // Assume we need to check the reference_type. - if (off.is_constant()) { + if (false /*off.is_constant()*/) { jlong off_con = (off.type()->is_int() ? (jlong) off.get_jint_constant() : off.get_jlong_constant()); @@ -2325,7 +2326,7 @@ referent_off = new_register(T_LONG); __ move(LIR_OprFact::longConst(java_lang_ref_Reference::referent_offset), referent_off); } - __ cmp(lir_cond_notEqual, off.result(), referent_off); + __ cmp(lir_cond_notEqual, offset, referent_off); __ branch(lir_cond_notEqual, as_BasicType(off.type()), Lcont->label()); } if (gen_source_check) { @@ -2376,11 +2377,11 @@ data.load_item(); } off.load_item(); - + LIR_Opr offset = unpack_offset(src.result(), off.result()); set_no_result(x); if (x->is_volatile() && os::is_MP()) __ membar_release(); - put_Object_unsafe(src.result(), off.result(), data.result(), type, x->is_volatile()); + put_Object_unsafe(src.result(), offset, data.result(), type, x->is_volatile()); if (x->is_volatile() && os::is_MP()) __ membar(); } --- old/src/share/vm/c1/c1_LIRGenerator.hpp 2015-06-27 04:11:06.000000000 +0300 +++ new/src/share/vm/c1/c1_LIRGenerator.hpp 2015-06-27 04:11:05.000000000 +0300 @@ -295,6 +295,8 @@ void volatile_field_store(LIR_Opr value, LIR_Address* address, CodeEmitInfo* info); void volatile_field_load(LIR_Address* address, LIR_Opr result, CodeEmitInfo* info); + LIR_Opr unpack_offset(LIR_Opr src, LIR_Opr offset); + void put_Object_unsafe(LIR_Opr src, LIR_Opr offset, LIR_Opr data, BasicType type, bool is_volatile); void get_Object_unsafe(LIR_Opr dest, LIR_Opr src, LIR_Opr offset, BasicType type, bool is_volatile); --- old/src/share/vm/ci/ciInstanceKlass.hpp 2015-06-27 04:11:06.000000000 +0300 +++ new/src/share/vm/ci/ciInstanceKlass.hpp 2015-06-27 04:11:06.000000000 +0300 @@ -261,6 +261,8 @@ return NULL; } + bool set_finals() { return get_instanceKlass()->set_finals(); }; + // Dump the current state of this klass for compilation replay. virtual void dump_replay_data(outputStream* out); }; --- old/src/share/vm/code/dependencies.cpp 2015-06-27 04:11:07.000000000 +0300 +++ new/src/share/vm/code/dependencies.cpp 2015-06-27 04:11:06.000000000 +0300 @@ -26,6 +26,7 @@ #include "ci/ciArrayKlass.hpp" #include "ci/ciEnv.hpp" #include "ci/ciKlass.hpp" +#include "ci/ciField.hpp" #include "ci/ciMethod.hpp" #include "classfile/javaClasses.inline.hpp" #include "code/dependencies.hpp" @@ -33,10 +34,10 @@ #include "oops/oop.inline.hpp" #include "runtime/handles.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/perfData.hpp" #include "runtime/thread.inline.hpp" #include "utilities/copy.hpp" - #ifdef ASSERT static bool must_be_in_vm() { Thread* thread = Thread::current(); @@ -47,6 +48,14 @@ } #endif //ASSERT +// Globals + +PerfCounter* Dependencies::_perf_dependency_checking_time = NULL; +PerfCounter* Dependencies::_perf_dependencies_checked_count = NULL; +PerfCounter* Dependencies::_perf_dependencies_invalidated = NULL; +PerfCounter* Dependencies::_perf_dependencies_total_count = NULL; +PerfCounter* Dependencies::_perf_dependencies_context_traversals = NULL; + void Dependencies::initialize(ciEnv* env) { Arena* arena = env->arena(); _oop_recorder = env->oop_recorder(); @@ -120,6 +129,20 @@ assert_common_2(call_site_target_value, call_site, method_handle); } +void Dependencies::assert_constant_field_value_klass(ciField* field, ciKlass* ctxk) { + // FIXME: how to record a field? no metadata associated; offset is int + assert_common_1(constant_field_value_klass, ctxk /*, field*/); +} + +void Dependencies::assert_constant_field_value_instance(ciField* field, ciObject* obj) { + if (field->holder()->set_finals()) { + // FIXME: how to record a field? no metadata associated; offset is int + assert_common_2(constant_field_value_instance, field->holder(), /*field,*/ obj); + } else { + assert_constant_field_value_klass(field, field->holder()); + } +} + // Helper function. If we are adding a new dep. under ctxk2, // try to find an old dep. under a broader* ctxk1. If there is // @@ -372,7 +395,9 @@ "abstract_with_exclusive_concrete_subtypes_2", "exclusive_concrete_methods_2", "no_finalizable_subclasses", - "call_site_target_value" + "call_site_target_value", + "constant_field_value_instance", + "constant_field_value_klass" }; int Dependencies::_dep_args[TYPE_LIMIT] = { @@ -386,7 +411,9 @@ 3, // unique_concrete_subtypes_2 ctxk, k1, k2 3, // unique_concrete_methods_2 ctxk, m1, m2 1, // no_finalizable_subclasses ctxk - 2 // call_site_target_value call_site, method_handle + 2, // call_site_target_value call_site, method_handle + 2, // constant_field_value_instance ctxk oop + 1 // constant_field_value_klass ctxk }; const char* Dependencies::dep_name(Dependencies::DepType dept) { @@ -1531,6 +1558,22 @@ return NULL; // assertion still valid } +void Dependencies::invalidate_dependent_nmethods(instanceKlassHandle ctxk, DepChange& changes, TRAPS) { + MutexLocker mu(Compile_lock, THREAD); + + int marked = 0; + { + MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); + marked = ctxk->mark_dependent_nmethods(changes); + } + if (marked > 0) { + ctxk->set_finals(true); + // At least one nmethod has been marked for deoptimization + VM_Deoptimize op; + VMThread::execute(&op); + } +} + void Dependencies::DepStream::trace_and_log_witness(Klass* witness) { if (witness != NULL) { if (TraceDependencies) { @@ -1611,12 +1654,37 @@ if (changes.is_call_site_change()) return check_call_site_dependency(changes.as_call_site_change()); + if (changes.is_constant_field_change()) { + Handle holder = changes.as_constant_field_change()->holder(); + int offset = changes.as_constant_field_change()->offset(); + int dep_offset = -1; // TODO: store offset in dependency + switch (type()) { + case constant_field_value_instance: + if (holder.is_null()) return context_type(); // all oops + if (holder() == argument_oop(1)) { + if (offset == -1) return context_type(); // all fields + if (offset == dep_offset) return context_type(); // same field + } + break; + case constant_field_value_klass: + if (offset == -1) return context_type(); // all fields + if (offset == dep_offset) return context_type(); // same field + break; + } + } + // irrelevant dependency; skip it return NULL; } void DepChange::print() { + if (is_klass_change()) + tty->print_cr("klass_change"); + if (is_call_site_change()) + tty->print_cr("call_site_change"); + if (is_constant_field_change()) + tty->print_cr("constant_field_change: offset=%d %s", as_constant_field_change()->offset(), as_constant_field_change()->holder()->print_string()); int nsup = 0, nint = 0; for (ContextStream str(*this); str.next(); ) { Klass* k = str.klass(); --- old/src/share/vm/code/dependencies.hpp 2015-06-27 04:11:08.000000000 +0300 +++ new/src/share/vm/code/dependencies.hpp 2015-06-27 04:11:07.000000000 +0300 @@ -59,6 +59,7 @@ class DepChange; class KlassDepChange; class CallSiteDepChange; +class ConstantFieldDepChange; class No_Safepoint_Verifier; class Dependencies: public ResourceObj { @@ -162,6 +163,9 @@ // This dependency asserts when the CallSite.target value changed. call_site_target_value, + constant_field_value_instance, + constant_field_value_klass, + TYPE_LIMIT }; enum { @@ -279,6 +283,9 @@ void assert_has_no_finalizable_subclasses(ciKlass* ctxk); void assert_call_site_target_value(ciCallSite* call_site, ciMethodHandle* method_handle); + void assert_constant_field_value_klass(ciField* field, ciKlass* ctxk); + void assert_constant_field_value_instance(ciField* field, ciObject* obj); + // Define whether a given method or type is concrete. // These methods define the term "concrete" as used in this module. // For this module, an "abstract" class is one which is non-concrete. @@ -350,6 +357,8 @@ static Method* find_unique_concrete_method(Klass* ctxk, Method* m); static int find_exclusive_concrete_subtypes(Klass* ctxk, int klen, Klass* k[]); + static void invalidate_dependent_nmethods(instanceKlassHandle ctxk, DepChange& changes, TRAPS); + // Create the encoding which will be stored in an nmethod. void encode_content_bytes(); @@ -400,6 +409,12 @@ log_dependency(dept, ciargs); } + static PerfCounter* _perf_dependency_checking_time; + static PerfCounter* _perf_dependencies_checked_count; + static PerfCounter* _perf_dependencies_context_traversals; + static PerfCounter* _perf_dependencies_invalidated; + static PerfCounter* _perf_dependencies_total_count; + class DepArgument : public ResourceObj { private: bool _is_oop; @@ -496,14 +511,30 @@ bool next(); DepType type() { return _type; } - bool is_oop_argument(int i) { return type() == call_site_target_value; } + bool is_oop_argument(int i) { + assert(0 <= i && i < argument_count(), "oob"); + switch (type()) { + case call_site_target_value: return true; + case constant_field_value_instance: return (i == 1); + default: return false; + } + } + bool is_int_argument(int i) { + assert(0 <= i && i < argument_count(), "oob"); + switch (type()) { + case constant_field_value_instance: return (i == 2); + case constant_field_value_klass: return (i == 1); + default: return false; + } + } uintptr_t get_identifier(int i); int argument_count() { return dep_args(type()); } int argument_index(int i) { assert(0 <= i && i < argument_count(), "oob"); return _xi[i]; } Metadata* argument(int i); // => recorded_oop_at(argument_index(i)) - oop argument_oop(int i); // => recorded_oop_at(argument_index(i)) + oop argument_oop(int i); // => recorded_oop_at(argument_index(i)) + int argument_int(int i) { return argument_index(i); } Klass* context_type(); bool is_klass_type() { return Dependencies::is_klass_type(type()); } @@ -573,6 +604,7 @@ // What kind of DepChange is this? virtual bool is_klass_change() const { return false; } virtual bool is_call_site_change() const { return false; } + virtual bool is_constant_field_change() const { return false; } // Subclass casting with assertions. KlassDepChange* as_klass_change() { @@ -583,6 +615,10 @@ assert(is_call_site_change(), "bad cast"); return (CallSiteDepChange*) this; } + ConstantFieldDepChange* as_constant_field_change() { + assert(is_constant_field_change(), "bad cast"); + return (ConstantFieldDepChange*) this; + } void print(); @@ -692,4 +728,20 @@ oop method_handle() const { return _method_handle(); } }; +class ConstantFieldDepChange : public DepChange { + private: + Handle _holder; + int _offset; + + public: + ConstantFieldDepChange(Handle holder, int offset) : _holder(holder), _offset(offset) {} + + // What kind of DepChange is this? + virtual bool is_constant_field_change() const { return true; } + + oop holder() const { return _holder(); } + int offset() const { return _offset; } +}; + + #endif // SHARE_VM_CODE_DEPENDENCIES_HPP --- old/src/share/vm/code/nmethod.cpp 2015-06-27 04:11:08.000000000 +0300 +++ new/src/share/vm/code/nmethod.cpp 2015-06-27 04:11:08.000000000 +0300 @@ -564,7 +564,8 @@ // check every nmethod for dependencies which makes it linear in // the number of methods compiled. For applications with a lot // classes the slow way is too slow. - for (Dependencies::DepStream deps(nm); deps.next(); ) { + int total = 0; + for (Dependencies::DepStream deps(nm); deps.next(); total++) { if (deps.type() == Dependencies::call_site_target_value) { // CallSite dependencies are managed on per-CallSite instance basis. oop call_site = deps.argument_oop(0); @@ -578,6 +579,8 @@ InstanceKlass::cast(klass)->add_dependent_nmethod(nm); } } + Dependencies::_perf_dependencies_total_count->inc(total); + NOT_PRODUCT(nmethod_stats.note_nmethod(nm)); if (PrintAssembly || CompilerOracle::has_option_string(method, "PrintAssembly")) { Disassembler::decode(nm); --- old/src/share/vm/compiler/compileBroker.cpp 2015-06-27 04:11:09.000000000 +0300 +++ new/src/share/vm/compiler/compileBroker.cpp 2015-06-27 04:11:09.000000000 +0300 @@ -26,6 +26,7 @@ #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" #include "code/codeCache.hpp" +#include "code/dependencies.hpp" #include "compiler/compileBroker.hpp" #include "compiler/compileLog.hpp" #include "compiler/compilerOracle.hpp" @@ -970,6 +971,26 @@ PerfData::U_None, (jlong)CompileBroker::no_compile, CHECK); + + Dependencies::_perf_dependency_checking_time = + PerfDataManager::create_counter(SUN_CI, "NMethodDependenciesCheckingTime", + PerfData::U_Ticks, CHECK); + + Dependencies::_perf_dependencies_checked_count = + PerfDataManager::create_counter(SUN_CI, "NMethodDependenciesCheckedCount", + PerfData::U_Events, CHECK); + + Dependencies::_perf_dependencies_invalidated = + PerfDataManager::create_counter(SUN_CI, "NMethodDependenciesInvalidated", + PerfData::U_Events, CHECK); + + Dependencies::_perf_dependencies_total_count = + PerfDataManager::create_counter(SUN_CI, "NMethodDependenciesTotalCount", + PerfData::U_Events, CHECK); + + Dependencies::_perf_dependencies_context_traversals = + PerfDataManager::create_counter(SUN_CI, "NMethodDependenciesContextTraversals", + PerfData::U_Events, CHECK); } _initialized = true; --- old/src/share/vm/interpreter/interpreterRuntime.cpp 2015-06-27 04:11:09.000000000 +0300 +++ new/src/share/vm/interpreter/interpreterRuntime.cpp 2015-06-27 04:11:09.000000000 +0300 @@ -1041,6 +1041,7 @@ if ((ik->field_access_flags(index) & JVM_ACC_FIELD_ACCESS_WATCHED) == 0) return; bool is_static = (obj == NULL); + bool is_final = ((ik->field_access_flags(index) & JVM_ACC_FINAL) != 0); HandleMark hm(thread); Handle h_obj; @@ -1049,7 +1050,7 @@ h_obj = Handle(thread, obj); } instanceKlassHandle h_cp_entry_f1(thread, (Klass*)cp_entry->f1_as_klass()); - jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2_as_index(), is_static); + jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_cp_entry_f1, cp_entry->f2_as_index(), is_static, is_final); JvmtiExport::post_field_access(thread, method(thread), bcp(thread), h_cp_entry_f1, h_obj, fid); IRT_END @@ -1078,10 +1079,11 @@ default: ShouldNotReachHere(); return; } bool is_static = (obj == NULL); + bool is_final = ((ik->field_access_flags(index) & JVM_ACC_FINAL) != 0); HandleMark hm(thread); instanceKlassHandle h_klass(thread, k); - jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2_as_index(), is_static); + jfieldID fid = jfieldIDWorkaround::to_jfieldID(h_klass, cp_entry->f2_as_index(), is_static, is_final); jvalue fvalue; #ifdef _LP64 fvalue = *value; --- old/src/share/vm/oops/instanceKlass.cpp 2015-06-27 04:11:10.000000000 +0300 +++ new/src/share/vm/oops/instanceKlass.cpp 2015-06-27 04:11:10.000000000 +0300 @@ -1813,19 +1813,13 @@ return id; } -int nmethodBucket::decrement() { +int nmethodBucketEntry::decrement() { return Atomic::add(-1, (volatile int *)&_count); } -// -// Walk the list of dependent nmethods searching for nmethods which -// are dependent on the changes that were passed in and mark them for -// deoptimization. Returns the number of nmethods found. -// -int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) { - assert_locked_or_safepoint(CodeCache_lock); - int found = 0; - for (nmethodBucket* b = deps; b != NULL; b = b->next()) { +int nmethodBucketEntry::mark_dependent_nmethods(nmethodBucketEntry* deps, DepChange& changes) { + int found = 0, total = 0; + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next(), total++) { nmethod* nm = b->get_nmethod(); // since dependencies aren't removed until an nmethod becomes a zombie, // the dependency list may contain nmethods which aren't alive. @@ -1841,61 +1835,42 @@ found++; } } + Dependencies::_perf_dependencies_checked_count->inc(total); return found; } -// -// Add an nmethodBucket to the list of dependencies for this nmethod. -// It's possible that an nmethod has multiple dependencies on this klass -// so a count is kept for each bucket to guarantee that creation and -// deletion of dependencies is consistent. Returns new head of the list. -// -nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* deps, nmethod* nm) { +nmethodBucketEntry* nmethodBucketEntry::add_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) { assert_locked_or_safepoint(CodeCache_lock); - for (nmethodBucket* b = deps; b != NULL; b = b->next()) { + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) { if (nm == b->get_nmethod()) { b->increment(); return deps; } } - return new nmethodBucket(nm, deps); + return new nmethodBucketEntry(nm, deps); } -// -// Decrement count of the nmethod in the dependency list and remove -// the bucket completely when the count goes to 0. This method must -// find a corresponding bucket otherwise there's a bug in the -// recording of dependencies. Returns true if the bucket is ready for reclamation. -// -bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) { +bool nmethodBucketEntry::remove_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm, bool& found) { assert_locked_or_safepoint(CodeCache_lock); - - for (nmethodBucket* b = deps; b != NULL; b = b->next()) { + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) { if (nm == b->get_nmethod()) { int val = b->decrement(); guarantee(val >= 0, err_msg("Underflow: %d", val)); + found = true; return (val == 0); } } -#ifdef ASSERT - tty->print_raw_cr("### can't find dependent nmethod"); - nm->print(); -#endif // ASSERT - ShouldNotReachHere(); return false; } -// -// Reclaim all unused buckets. Returns new head of the list. -// -nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) { - nmethodBucket* first = deps; - nmethodBucket* last = NULL; - nmethodBucket* b = first; +nmethodBucketEntry* nmethodBucketEntry::clean_dependent_nmethods(nmethodBucketEntry* deps) { + nmethodBucketEntry* first = deps; + nmethodBucketEntry* last = NULL; + nmethodBucketEntry* b = first; while (b != NULL) { assert(b->count() >= 0, err_msg("bucket count: %d", b->count())); - nmethodBucket* next = b->next(); + nmethodBucketEntry* next = b->next(); if (b->count() == 0) { if (last == NULL) { first = next; @@ -1913,9 +1888,16 @@ } #ifndef PRODUCT -void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) { +void nmethodBucketEntry::verify(nmethodBucketEntry* deps) { + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) { + assert(b->count() >= 0, err_msg("bucket count: %d", b->count())); + assert(b->count() != 0, "empty buckets need to be cleaned"); + } +} + +void nmethodBucketEntry::print_dependent_nmethods(nmethodBucketEntry* deps, bool verbose) { int idx = 0; - for (nmethodBucket* b = deps; b != NULL; b = b->next()) { + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) { nmethod* nm = b->get_nmethod(); tty->print("[%d] count=%d { ", idx++, b->count()); if (!verbose) { @@ -1929,8 +1911,8 @@ } } -bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) { - for (nmethodBucket* b = deps; b != NULL; b = b->next()) { +bool nmethodBucketEntry::is_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm) { + for (nmethodBucketEntry* b = deps; b != NULL; b = b->next()) { if (nm == b->get_nmethod()) { #ifdef ASSERT int count = b->count(); @@ -1943,6 +1925,149 @@ } #endif //PRODUCT + +int nmethodBucket::bucket_index(DepChange& changes) { + if (changes.is_klass_change()) return KlassBucket; + else if (changes.is_call_site_change()) return CallSiteBucket; + else if (changes.is_constant_field_change()) return ConstantFieldBucket; + else { + ShouldNotReachHere(); + return -1; + } +} + +// +// Walk the list of dependent nmethods searching for nmethods which +// are dependent on the changes that were passed in and mark them for +// deoptimization. Returns the number of nmethods found. +// +int nmethodBucket::mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes) { + assert_locked_or_safepoint(CodeCache_lock); + PerfTraceTime pt(Dependencies::_perf_dependency_checking_time); + int found = 0; + if (deps != NULL) { + int idx = bucket_index(changes); + nmethodBucketEntry* b = deps->_buckets[idx]; + found = nmethodBucketEntry::mark_dependent_nmethods(b, changes); + Dependencies::_perf_dependencies_context_traversals->inc(1); + Dependencies::_perf_dependencies_invalidated->inc(found); + } + return found; +} + +// +// Add an nmethodBucket to the list of dependencies for this nmethod. +// It's possible that an nmethod has multiple dependencies on this klass +// so a count is kept for each bucket to guarantee that creation and +// deletion of dependencies is consistent. Returns new head of the list. +// +nmethodBucket* nmethodBucket::add_dependent_nmethod(nmethodBucket* b, nmethod* nm) { + assert_locked_or_safepoint(CodeCache_lock); + if (b == NULL) { + b = new nmethodBucket(); + } + bool has_deps[Bucket_LIMIT]; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { has_deps[i] = false; } + for (Dependencies::DepStream deps(nm); deps.next(); ) { + switch(deps.type()) { + case Dependencies::call_site_target_value: has_deps[CallSiteBucket] = true; break; + case Dependencies::constant_field_value_instance: // fallthru + case Dependencies::constant_field_value_klass: has_deps[ConstantFieldBucket] = true; break; + default: has_deps[KlassBucket] = true; break; + } + } + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + if (has_deps[i]) { + b->_buckets[i] = nmethodBucketEntry::add_dependent_nmethod(b->_buckets[i], nm); + } + } + return b; +} + +// +// Decrement count of the nmethod in the dependency list and remove +// the bucket completely when the count goes to 0. This method must +// find a corresponding bucket otherwise there's a bug in the +// recording of dependencies. Returns true if the bucket is ready for reclamation. +// +bool nmethodBucket::remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm) { + if (deps != NULL) { + assert_locked_or_safepoint(CodeCache_lock); + bool found = false, removed = false; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + bool r = nmethodBucketEntry::remove_dependent_nmethod(deps->_buckets[i], nm, found); + removed = removed || r; + } + if (found) { + return removed; + } + } +#ifdef ASSERT + tty->print_raw_cr("### can't find dependent nmethod"); + nm->print(); +#endif // ASSERT + ShouldNotReachHere(); + return false; +} + +// +// Reclaim all unused buckets. Returns new head of the list. +// +nmethodBucket* nmethodBucket::clean_dependent_nmethods(nmethodBucket* deps) { + if (deps == NULL) return NULL; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + deps->_buckets[i] = nmethodBucketEntry::clean_dependent_nmethods(deps->_buckets[i]); + } + return deps; +} + +int nmethodBucket::release(nmethodBucket* deps) { + if (deps == NULL) return 0; + int marked = 0; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + nmethodBucketEntry* entry = deps->_buckets[i]; + while (entry != NULL) { + nmethod* nm = entry->get_nmethod(); + if (entry->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) { + nm->mark_for_deoptimization(); + marked++; + } + nmethodBucketEntry* next = entry->next(); + delete entry; + entry = next; + } + } + delete deps; + return marked; +} + +#ifndef PRODUCT +void nmethodBucket::verify(nmethodBucket* deps) { + if (deps == NULL) return; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + nmethodBucketEntry::verify(deps->_buckets[i]); + } +} + +void nmethodBucket::print_dependent_nmethods(nmethodBucket* deps, bool verbose) { + if (deps == NULL) return; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + tty->print_cr("Bucket #%d: ", i); + nmethodBucketEntry::print_dependent_nmethods(deps->_buckets[i], verbose); + } +} + +bool nmethodBucket::is_dependent_nmethod(nmethodBucket* deps, nmethod* nm) { + if (deps == NULL) return false; + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + if (nmethodBucketEntry::is_dependent_nmethod(deps->_buckets[i], nm)) { + return true; + } + } + return false; +} +#endif //PRODUCT + int InstanceKlass::mark_dependent_nmethods(DepChange& changes) { assert_locked_or_safepoint(CodeCache_lock); return nmethodBucket::mark_dependent_nmethods(_dependencies, changes); @@ -1958,10 +2083,7 @@ #ifdef ASSERT else { // Verification - for (nmethodBucket* b = _dependencies; b != NULL; b = b->next()) { - assert(b->count() >= 0, err_msg("bucket count: %d", b->count())); - assert(b->count() != 0, "empty buckets need to be cleaned"); - } + nmethodBucket::verify(_dependencies); } #endif } @@ -2162,13 +2284,9 @@ } // release dependencies - nmethodBucket* b = _dependencies; + int marked = nmethodBucket::release(_dependencies); + assert(marked == 0, ""); _dependencies = NULL; - while (b != NULL) { - nmethodBucket* next = b->next(); - delete b; - b = next; - } // Deallocate breakpoint records if (breakpoints() != 0x0) { --- old/src/share/vm/oops/instanceKlass.hpp 2015-06-27 04:11:11.000000000 +0300 +++ new/src/share/vm/oops/instanceKlass.hpp 2015-06-27 04:11:11.000000000 +0300 @@ -290,9 +290,15 @@ // have this embedded field. // + // TODO + bool _set_finals; + friend class SystemDictionary; public: + bool set_finals() { return _set_finals; } + void set_finals(bool b) { _set_finals = b; } + bool has_nonstatic_fields() const { return (_misc_flags & _misc_has_nonstatic_fields) != 0; } @@ -1260,6 +1266,36 @@ void verify(Klass* holder); }; +class nmethodBucketEntry : public CHeapObj { + friend class VMStructs; +private: + nmethod* _nmethod; + int _count; + nmethodBucketEntry* _next; +public: + nmethodBucketEntry(nmethod* nmethod, nmethodBucketEntry* next) { + _nmethod = nmethod; + _next = next; + _count = 1; + } + int count() { return _count; } + int increment() { _count += 1; return _count; } + int decrement(); + nmethodBucketEntry* next() { return _next; } + void set_next(nmethodBucketEntry* b) { _next = b; } + nmethod* get_nmethod() { return _nmethod; } + + static int mark_dependent_nmethods(nmethodBucketEntry* deps, DepChange& changes); + static nmethodBucketEntry* add_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm); + static bool remove_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm, bool& found); + static nmethodBucketEntry* clean_dependent_nmethods(nmethodBucketEntry* deps); + +#ifndef PRODUCT + static void verify(nmethodBucketEntry* deps); + static void print_dependent_nmethods(nmethodBucketEntry* deps, bool verbose); + static bool is_dependent_nmethod(nmethodBucketEntry* deps, nmethod* nm); +#endif //PRODUCT +}; // // nmethodBucket is used to record dependent nmethods for @@ -1274,28 +1310,32 @@ class nmethodBucket: public CHeapObj { friend class VMStructs; private: - nmethod* _nmethod; - int _count; - nmethodBucket* _next; + enum { + FIRST_Bucket = 0, + KlassBucket = FIRST_Bucket, + CallSiteBucket, + ConstantFieldBucket, + Bucket_LIMIT + }; - public: - nmethodBucket(nmethod* nmethod, nmethodBucket* next) { - _nmethod = nmethod; - _next = next; - _count = 1; + nmethodBucketEntry* _buckets[Bucket_LIMIT]; + + nmethodBucket() { + for (int i = FIRST_Bucket; i < Bucket_LIMIT; i++) { + _buckets[i] = NULL; + } } - int count() { return _count; } - int increment() { _count += 1; return _count; } - int decrement(); - nmethodBucket* next() { return _next; } - void set_next(nmethodBucket* b) { _next = b; } - nmethod* get_nmethod() { return _nmethod; } + static int bucket_index(DepChange& changes); + + public: static int mark_dependent_nmethods(nmethodBucket* deps, DepChange& changes); static nmethodBucket* add_dependent_nmethod(nmethodBucket* deps, nmethod* nm); static bool remove_dependent_nmethod(nmethodBucket* deps, nmethod* nm); static nmethodBucket* clean_dependent_nmethods(nmethodBucket* deps); + static int release(nmethodBucket* deps); #ifndef PRODUCT + static void verify(nmethodBucket* deps); static void print_dependent_nmethods(nmethodBucket* deps, bool verbose); static bool is_dependent_nmethod(nmethodBucket* deps, nmethod* nm); #endif //PRODUCT --- old/src/share/vm/opto/library_call.cpp 2015-06-27 04:11:12.000000000 +0300 +++ new/src/share/vm/opto/library_call.cpp 2015-06-27 04:11:11.000000000 +0300 @@ -47,6 +47,7 @@ #include "opto/runtime.hpp" #include "opto/subnode.hpp" #include "prims/nativeLookup.hpp" +#include "prims/unsafe.hpp" #include "runtime/sharedRuntime.hpp" #include "trace/traceMacros.hpp" @@ -227,13 +228,13 @@ bool inline_min_max(vmIntrinsics::ID id); Node* generate_min_max(vmIntrinsics::ID id, Node* x, Node* y); // This returns Type::AnyPtr, RawPtr, or OopPtr. - int classify_unsafe_addr(Node* &base, Node* &offset); - Node* make_unsafe_address(Node* base, Node* offset); + int classify_unsafe_addr(Node* &base, Node* &offset, bool decode_offset); + Node* make_unsafe_address(Node* base, Node* offset, bool decode_offset); // Helper for inline_unsafe_access. // Generates the guards that check whether the result of // Unsafe.getObject should be recorded in an SATB log buffer. void insert_pre_barrier(Node* base_oop, Node* offset, Node* pre_val, bool need_mem_bar); - bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile); + bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned = false); static bool klass_needs_init_guard(Node* kls); bool inline_unsafe_allocate(); bool inline_unsafe_copyMemory(); @@ -824,15 +825,15 @@ case vmIntrinsics::_putFloatVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_FLOAT, is_volatile); case vmIntrinsics::_putDoubleVolatile: return inline_unsafe_access(!is_native_ptr, is_store, T_DOUBLE, is_volatile); - case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile); - case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile); - - case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile); - case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile); - case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile); - case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile); + case vmIntrinsics::_getShortUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_SHORT, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_getCharUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_CHAR, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_getIntUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_INT, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_getLongUnaligned: return inline_unsafe_access(!is_native_ptr, !is_store, T_LONG, !is_volatile, /*unaligned=*/true); + + case vmIntrinsics::_putShortUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_SHORT, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_putCharUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_CHAR, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_putIntUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_INT, !is_volatile, /*unaligned=*/true); + case vmIntrinsics::_putLongUnaligned: return inline_unsafe_access(!is_native_ptr, is_store, T_LONG, !is_volatile, /*unaligned=*/true); case vmIntrinsics::_compareAndSwapObject: return inline_unsafe_load_store(T_OBJECT, LS_cmpxchg); case vmIntrinsics::_compareAndSwapInt: return inline_unsafe_load_store(T_INT, LS_cmpxchg); @@ -2280,7 +2281,7 @@ } inline int -LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset) { +LibraryCallKit::classify_unsafe_addr(Node* &base, Node* &offset, bool decode_offset) { const TypePtr* base_type = TypePtr::NULL_PTR; if (base != NULL) base_type = _gvn.type(base)->isa_ptr(); if (base_type == NULL) { @@ -2294,8 +2295,13 @@ } else if (base_type->base() == Type::RawPtr) { return Type::RawPtr; } else if (base_type->isa_oopptr()) { + Node* decoded_offset = offset; + if (decode_offset) { + decoded_offset = _gvn.transform(new RShiftXNode(offset, intcon(Unsafe::offset_shift))); + } // Base is never null => always a heap address. if (base_type->ptr() == TypePtr::NotNull) { + offset = decoded_offset; return Type::OopPtr; } // Offset is small => always a heap address. @@ -2304,9 +2310,25 @@ base_type->offset() == 0 && // (should always be?) offset_type->_lo >= 0 && !MacroAssembler::needs_explicit_null_check(offset_type->_hi)) { + offset = decoded_offset; return Type::OopPtr; } // Otherwise, it might either be oop+off or NULL+addr. + // For oop+off case the offset should be decoded first, but + // NULL+addr can be used as is. + IdealKit ideal(this); +#define __ ideal. + IdealVariable off(ideal); + __ declarations_done(); + __ set(off, offset); + __ if_then(base, BoolTest::ne, null(), PROB_UNLIKELY(0.999)); { + __ set(off, decoded_offset); + } __ end_if(); + // Final sync IdealKit and GraphKit. + decoded_offset = __ value(off); + final_sync(ideal); +#undef __ + offset = decoded_offset; return Type::AnyPtr; } else { // No information: @@ -2314,8 +2336,8 @@ } } -inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset) { - int kind = classify_unsafe_addr(base, offset); +inline Node* LibraryCallKit::make_unsafe_address(Node* base, Node* offset, bool decode_offset) { + int kind = classify_unsafe_addr(base, offset, decode_offset); if (kind == Type::RawPtr) { return basic_plus_adr(top(), base, offset); } else { @@ -2460,10 +2482,6 @@ #undef __ } - -// Interpret Unsafe.fieldOffset cookies correctly: -extern jlong Unsafe_field_offset_to_byte_offset(jlong field_offset); - const TypeOopPtr* LibraryCallKit::sharpen_unsafe_type(Compile::AliasType* alias_type, const TypePtr *adr_type, bool is_native_ptr) { // Attempt to infer a sharper value type from the offset and base type. ciKlass* sharpened_klass = NULL; @@ -2503,7 +2521,7 @@ return NULL; } -bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile) { +bool LibraryCallKit::inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, bool is_volatile, bool unaligned) { if (callee()->is_static()) return false; // caller must have the capability! #ifndef PRODUCT @@ -2561,20 +2579,15 @@ Node* base = argument(1); // type: oop // The offset is a value produced by Unsafe.staticFieldOffset or Unsafe.objectFieldOffset offset = argument(2); // type: long - // We currently rely on the cookies produced by Unsafe.xxxFieldOffset - // to be plain byte offsets, which are also the same as those accepted - // by oopDesc::field_base. - assert(Unsafe_field_offset_to_byte_offset(11) == 11, - "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half! offset = ConvL2X(offset); - adr = make_unsafe_address(base, offset); + adr = make_unsafe_address(base, offset, /*decode=*/!unaligned); heap_base_oop = base; val = is_store ? argument(4) : NULL; } else { Node* ptr = argument(1); // type: long ptr = ConvL2X(ptr); // adjust Java long to machine word - adr = make_unsafe_address(NULL, ptr); + adr = make_unsafe_address(NULL, ptr, /*decode=*/false); val = is_store ? argument(3) : NULL; } @@ -2652,8 +2665,15 @@ if (heap_base_oop != top() && field != NULL && field->is_constant() && field->layout_type() == type) { // final or stable field - const Type* con_type = Type::make_constant(alias_type->field(), heap_base_oop); + ciField* field = alias_type->field(); + const Type* con_type = Type::make_constant(field, heap_base_oop); if (con_type != NULL) { + if (TrustFinalNonStaticFields && + !field->is_static() && heap_base_oop->is_Con()) { + const TypeOopPtr* oop_ptr = heap_base_oop->bottom_type()->isa_oopptr(); + ciObject* constant_oop = oop_ptr->const_oop(); + C->dependencies()->assert_constant_field_value_instance(field, constant_oop); + } p = makecon(con_type); } } @@ -2706,6 +2726,17 @@ break; } + { // Need to check all dependent nmethods when final field is updated through Unsafe. + Node* final_bit = _gvn.transform(new AndXNode(/*offset*/argument(2), MakeConX(Unsafe::final_mask))); + Node* cmp_final_bit = _gvn.transform(new CmpXNode(final_bit, MakeConX(0))); + Node* bol_final_bit = _gvn.transform(new BoolNode(cmp_final_bit, BoolTest::eq)); + + BuildCutout unless(this, bol_final_bit, PROB_MAX); + uncommon_trap(Deoptimization::Reason_intrinsic, + Deoptimization::Action_none, + NULL, "final_field_unsafe_update"); + } + MemNode::MemOrd mo = is_volatile ? MemNode::release : MemNode::unordered; if (type != T_OBJECT ) { (void) store_to_memory(control(), adr, val, type, adr_type, mo, is_volatile); @@ -2840,13 +2871,9 @@ } // Build field offset expression. - // We currently rely on the cookies produced by Unsafe.xxxFieldOffset - // to be plain byte offsets, which are also the same as those accepted - // by oopDesc::field_base. - assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half of long offsets offset = ConvL2X(offset); - Node* adr = make_unsafe_address(base, offset); + Node* adr = make_unsafe_address(base, offset, /*decode=*/true); const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); // For CAS, unlike inline_unsafe_access, there seems no point in @@ -3035,10 +3062,9 @@ } // Build field offset expression. - assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); // 32-bit machines ignore the high half of long offsets offset = ConvL2X(offset); - Node* adr = make_unsafe_address(base, offset); + Node* adr = make_unsafe_address(base, offset, /*decode=*/true); const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); const Type *value_type = Type::get_const_basic_type(type); Compile::AliasType* alias_type = C->alias_type(adr_type); @@ -4424,11 +4450,8 @@ Node* dst_off = ConvL2X(argument(5)); // type: long Node* size = ConvL2X(argument(7)); // type: long - assert(Unsafe_field_offset_to_byte_offset(11) == 11, - "fieldOffset must be byte-scaled"); - - Node* src = make_unsafe_address(src_ptr, src_off); - Node* dst = make_unsafe_address(dst_ptr, dst_off); + Node* src = make_unsafe_address(src_ptr, src_off, /*decode=*/true); + Node* dst = make_unsafe_address(dst_ptr, dst_off, /*decode=*/true); // Conservatively insert a memory barrier on all memory slices. // Do not let writes of the copy source or destination float below the copy. --- old/src/share/vm/opto/parse3.cpp 2015-06-27 04:11:13.000000000 +0300 +++ new/src/share/vm/opto/parse3.cpp 2015-06-27 04:11:12.000000000 +0300 @@ -151,11 +151,17 @@ // final or stable field const Type* con_type = Type::make_constant(field, obj); if (con_type != NULL) { + if (TrustFinalNonStaticFields && + !field->is_static() && obj->is_Con()) { + const TypeOopPtr* oop_ptr = obj->bottom_type()->isa_oopptr(); + ciObject* constant_oop = oop_ptr->const_oop(); + C->dependencies()->assert_constant_field_value_instance(field, constant_oop); + } push_node(con_type->basic_type(), makecon(con_type)); return; } } - + ciType* field_klass = field->type(); bool is_vol = field->is_volatile(); --- old/src/share/vm/prims/jni.cpp 2015-06-27 04:11:13.000000000 +0300 +++ new/src/share/vm/prims/jni.cpp 2015-06-27 04:11:13.000000000 +0300 @@ -31,6 +31,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" +#include "code/dependencies.hpp" #include "gc/shared/gcLocker.inline.hpp" #include "interpreter/linkResolver.hpp" #include "memory/allocation.hpp" @@ -60,6 +61,7 @@ #include "runtime/fieldDescriptor.hpp" #include "runtime/fprofiler.hpp" #include "runtime/handles.inline.hpp" +#include "runtime/init.hpp" #include "runtime/interfaceSupport.hpp" #include "runtime/java.hpp" #include "runtime/javaCalls.hpp" @@ -168,7 +170,7 @@ // out-of-line helpers for class jfieldIDWorkaround: bool jfieldIDWorkaround::is_valid_jfieldID(Klass* k, jfieldID id) { - if (jfieldIDWorkaround::is_instance_jfieldID(k, id)) { + if (jfieldIDWorkaround::is_instance_jfieldID(id)) { uintptr_t as_uint = (uintptr_t) id; intptr_t offset = raw_instance_offset(id); if (is_checked_jfieldID(id)) { @@ -229,7 +231,7 @@ } void jfieldIDWorkaround::verify_instance_jfieldID(Klass* k, jfieldID id) { - guarantee(jfieldIDWorkaround::is_instance_jfieldID(k, id), "must be an instance field" ); + guarantee(jfieldIDWorkaround::is_instance_jfieldID(id), "must be an instance field" ); uintptr_t as_uint = (uintptr_t) id; intptr_t offset = raw_instance_offset(id); if (VerifyJNIFields) { @@ -506,7 +508,8 @@ // It may also have hash bits for k, if VerifyJNIFields is turned on. intptr_t offset = InstanceKlass::cast(k1())->field_offset( slot ); assert(InstanceKlass::cast(k1())->contains_field_offset(offset), "stay within object"); - ret = jfieldIDWorkaround::to_instance_jfieldID(k1(), offset); + bool is_final = (modifiers & JVM_ACC_FINAL) != 0; + ret = jfieldIDWorkaround::to_instance_jfieldID(k1(), offset, is_final); return ret; JNI_END @@ -2041,7 +2044,7 @@ // A jfieldID for a non-static field is simply the offset of the field within the instanceOop // It may also have hash bits for k, if VerifyJNIFields is turned on. - ret = jfieldIDWorkaround::to_instance_jfieldID(k(), fd.offset()); + ret = jfieldIDWorkaround::to_instance_jfieldID(k(), fd.offset(), fd.access_flags().is_final()); return ret; JNI_END @@ -2161,9 +2164,26 @@ return (address)jni_GetDoubleField; } +static void check_final_field(jobject obj, jfieldID fieldID, TRAPS) { + if (TrustFinalNonStaticFields && + jfieldIDWorkaround::is_final_jfieldID(fieldID) && + jfieldIDWorkaround::is_instance_jfieldID(fieldID)) { + ResetNoHandleMark rm; + HandleMark hm; + Handle recv(THREAD, JNIHandles::resolve(obj)); + if (!recv.is_null()) { + instanceKlassHandle ctxk(THREAD, InstanceKlass::cast(recv->klass())); + int offset = (int)jfieldIDWorkaround::from_instance_jfieldID(ctxk(), fieldID); + ConstantFieldDepChange changes(recv, offset); + Dependencies::invalidate_dependent_nmethods(ctxk, changes, THREAD); + } + } +} + JNI_QUICK_ENTRY(void, jni_SetObjectField(JNIEnv *env, jobject obj, jfieldID fieldID, jobject value)) JNIWrapper("SetObjectField"); HOTSPOT_JNI_SETOBJECTFIELD_ENTRY(env, obj, (uintptr_t) fieldID, value); + check_final_field(obj, fieldID, thread); oop o = JNIHandles::resolve_non_null(obj); Klass* k = o->klass(); int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID); @@ -2188,6 +2208,7 @@ \ EntryProbe; \ \ + check_final_field(obj, fieldID, thread); \ oop o = JNIHandles::resolve_non_null(obj); \ Klass* k = o->klass(); \ int offset = jfieldIDWorkaround::from_instance_jfieldID(k, fieldID); \ --- old/src/share/vm/prims/jvmtiEnv.cpp 2015-06-27 04:11:14.000000000 +0300 +++ new/src/share/vm/prims/jvmtiEnv.cpp 2015-06-27 04:11:14.000000000 +0300 @@ -2363,7 +2363,8 @@ for (FilteredFieldStream src_st(instanceK_h, true, true); !src_st.eos(); src_st.next()) { result_list[id_index--] = jfieldIDWorkaround::to_jfieldID( instanceK_h, src_st.offset(), - src_st.access_flags().is_static()); + src_st.access_flags().is_static(), + src_st.access_flags().is_final()); } assert(id_index == -1, "just checking"); // Fill in the results --- old/src/share/vm/prims/methodHandles.cpp 2015-06-27 04:11:15.000000000 +0300 +++ new/src/share/vm/prims/methodHandles.cpp 2015-06-27 04:11:15.000000000 +0300 @@ -36,6 +36,7 @@ #include "oops/oop.inline.hpp" #include "prims/methodHandles.hpp" #include "prims/jvmtiRedefineClassesTrace.hpp" +#include "prims/unsafe.hpp" #include "runtime/compilationPolicy.hpp" #include "runtime/javaCalls.hpp" #include "runtime/reflection.hpp" @@ -1214,7 +1215,8 @@ ? (flags & JVM_ACC_STATIC) != 0 : (flags & JVM_ACC_STATIC) == 0)) { int vmindex = java_lang_invoke_MemberName::vmindex(mname); - return (jlong) vmindex; + bool is_final = (flags & JVM_ACC_FINAL) != 0; + return Unsafe::field_offset_from_byte_offset(vmindex, is_final); } } const char* msg = (must_be_static ? "static field required" : "non-static field required"); @@ -1339,16 +1341,7 @@ { MutexLockerEx mu2(CodeCache_lock, Mutex::_no_safepoint_check_flag); nmethodBucket* b = java_lang_invoke_MethodHandleNatives_CallSiteContext::vmdependencies(context()); - while(b != NULL) { - nmethod* nm = b->get_nmethod(); - if (b->count() > 0 && nm->is_alive() && !nm->is_marked_for_deoptimization()) { - nm->mark_for_deoptimization(); - marked++; - } - nmethodBucket* next = b->next(); - delete b; - b = next; - } + marked = nmethodBucket::release(b); java_lang_invoke_MethodHandleNatives_CallSiteContext::set_vmdependencies(context(), NULL); // reset context } if (marked > 0) { --- old/src/share/vm/prims/unsafe.cpp 2015-06-27 04:11:16.000000000 +0300 +++ new/src/share/vm/prims/unsafe.cpp 2015-06-27 04:11:15.000000000 +0300 @@ -29,6 +29,7 @@ #include "oops/oop.inline.hpp" #include "prims/jni.h" #include "prims/jvm.h" +#include "prims/unsafe.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/globals.hpp" #include "runtime/interfaceSupport.hpp" @@ -40,6 +41,7 @@ #include "utilities/copy.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" +#include "code/dependencies.hpp" #if INCLUDE_ALL_GCS #include "gc/g1/g1SATBCardTableModRefBS.hpp" #endif // INCLUDE_ALL_GCS @@ -74,7 +76,6 @@ #define UnsafeWrapper(arg) /*nothing, for the present*/ - inline void* addr_from_java(jlong addr) { // This assert fails in a variety of ways on 32-bit systems. // It is impossible to predict whether native code that converts @@ -88,35 +89,13 @@ return (uintptr_t)p; } - -// Note: The VM's obj_field and related accessors use byte-scaled -// ("unscaled") offsets, just as the unsafe methods do. - -// However, the method Unsafe.fieldOffset explicitly declines to -// guarantee this. The field offset values manipulated by the Java user -// through the Unsafe API are opaque cookies that just happen to be byte -// offsets. We represent this state of affairs by passing the cookies -// through conversion functions when going between the VM and the Unsafe API. -// The conversion functions just happen to be no-ops at present. - -inline jlong field_offset_to_byte_offset(jlong field_offset) { - return field_offset; -} - -inline jlong field_offset_from_byte_offset(jlong byte_offset) { - return byte_offset; -} - -inline jint invocation_key_from_method_slot(jint slot) { - return slot; -} - -inline jint invocation_key_to_method_slot(jint key) { - return key; -} - inline void* index_oop_from_field_offset_long(oop p, jlong field_offset) { - jlong byte_offset = field_offset_to_byte_offset(field_offset); + jlong byte_offset = 0; + if (oopDesc::is_null(p)) { + byte_offset = field_offset; + } else { + byte_offset = Unsafe::field_offset_to_byte_offset(field_offset); + } #ifdef ASSERT if (p != NULL) { assert(byte_offset >= 0 && byte_offset <= (jlong)MAX_OBJECT_SIZE, "sane offset"); @@ -135,22 +114,19 @@ return (address)p + byte_offset; } -// Externally callable versions: -// (Use these in compiler intrinsics which emulate unsafe primitives.) -jlong Unsafe_field_offset_to_byte_offset(jlong field_offset) { - return field_offset; -} -jlong Unsafe_field_offset_from_byte_offset(jlong byte_offset) { - return byte_offset; -} -jint Unsafe_invocation_key_from_method_slot(jint slot) { - return invocation_key_from_method_slot(slot); -} -jint Unsafe_invocation_key_to_method_slot(jint key) { - return invocation_key_to_method_slot(key); +static void check_final_field(jobject obj, jlong field_offset, TRAPS) { + if (TrustFinalNonStaticFields && + (field_offset & Unsafe::final_mask) != 0) { + Handle recv(THREAD, JNIHandles::resolve(obj)); + if (!recv.is_null()) { + instanceKlassHandle ctxk(THREAD, InstanceKlass::cast(recv->klass())); + jlong byte_offset = Unsafe::field_offset_to_byte_offset(field_offset); + ConstantFieldDepChange changes(recv, byte_offset); + Dependencies::invalidate_dependent_nmethods(ctxk, changes, THREAD); + } + } } - ///// Data in the Java heap. #define GET_FIELD(obj, offset, type_name, v) \ @@ -158,6 +134,7 @@ type_name v = *(type_name*)index_oop_from_field_offset_long(p, offset) #define SET_FIELD(obj, offset, type_name, x) \ + check_final_field(obj, offset, THREAD); \ oop p = JNIHandles::resolve(obj); \ *(type_name*)index_oop_from_field_offset_long(p, offset) = x @@ -169,6 +146,7 @@ volatile type_name v = OrderAccess::load_acquire((volatile type_name*)index_oop_from_field_offset_long(p, offset)); #define SET_FIELD_VOLATILE(obj, offset, type_name, x) \ + check_final_field(obj, offset, THREAD); \ oop p = JNIHandles::resolve(obj); \ OrderAccess::release_store_fence((volatile type_name*)index_oop_from_field_offset_long(p, offset), x); @@ -218,6 +196,7 @@ UNSAFE_ENTRY(void, Unsafe_SetObject(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) UnsafeWrapper("Unsafe_SetObject"); + check_final_field(obj, offset, THREAD); oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); if (UseCompressedOops) { @@ -244,6 +223,7 @@ UNSAFE_ENTRY(void, Unsafe_SetObjectVolatile(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jobject x_h)) UnsafeWrapper("Unsafe_SetObjectVolatile"); + check_final_field(obj, offset, THREAD); oop x = JNIHandles::resolve(x_h); oop p = JNIHandles::resolve(obj); void* addr = index_oop_from_field_offset_long(p, offset); @@ -685,7 +665,7 @@ oop reflected = JNIHandles::resolve_non_null(field); oop mirror = java_lang_reflect_Field::clazz(reflected); - Klass* k = java_lang_Class::as_Klass(mirror); + Klass* k = java_lang_Class::as_Klass(mirror); int slot = java_lang_reflect_Field::slot(reflected); int modifiers = java_lang_reflect_Field::modifiers(reflected); @@ -697,7 +677,8 @@ } int offset = InstanceKlass::cast(k)->field_offset(slot); - return field_offset_from_byte_offset(offset); + bool is_final = (modifiers & JVM_ACC_FINAL) != 0; + return Unsafe::field_offset_from_byte_offset(offset, is_final); } UNSAFE_ENTRY(jlong, Unsafe_ObjectFieldOffset(JNIEnv *env, jobject unsafe, jobject field)) @@ -787,7 +768,7 @@ UnsafeWrapper("Unsafe_ArrayBaseOffset"); int base, scale; getBaseAndScale(base, scale, acls, CHECK_0); - return field_offset_from_byte_offset(base); + return Unsafe::field_offset_from_byte_offset(base); UNSAFE_END @@ -809,7 +790,7 @@ // The following allows for a pretty general fieldOffset cookie scheme, // but requires it to be linear in byte offset. - return field_offset_from_byte_offset(scale) - field_offset_from_byte_offset(0); + return Unsafe::field_offset_from_byte_offset(scale) - Unsafe::field_offset_from_byte_offset(0); UNSAFE_END @@ -1199,7 +1180,6 @@ return ret; UNSAFE_END - /// JVM_RegisterUnsafeMethods #define ADR "J" --- old/src/share/vm/runtime/jfieldIDWorkaround.hpp 2015-06-27 04:11:16.000000000 +0300 +++ new/src/share/vm/runtime/jfieldIDWorkaround.hpp 2015-06-27 04:11:16.000000000 +0300 @@ -38,19 +38,20 @@ // is accompanied by an indication of which class it applies to. // // Bit-format of a jfieldID (most significant first): - // address:30 instance=0:1 checked=0:1 - // offset:30 instance=1:1 checked=0:1 - // klass:23 offset:7 instance=1:1 checked=1:1 + // address:29 final=0:1 instance=0:1 checked=0:1 + // offset:29 final=0:1 instance=1:1 checked=0:1 + // klass:22 offset:7 final=1:1 instance=1:1 checked=1:1 // // If the offset does not fit in 7 bits, or if the fieldID is // not checked, then the checked bit is zero and the rest of // the word (30 bits) contains only the offset. // - private: +public: enum { checked_bits = 1, instance_bits = 1, - address_bits = BitsPerWord - checked_bits - instance_bits, + final_bits = 1, + address_bits = BitsPerWord - checked_bits - instance_bits - final_bits, large_offset_bits = address_bits, // unioned with address small_offset_bits = 7, @@ -58,13 +59,15 @@ checked_shift = 0, instance_shift = checked_shift + checked_bits, - address_shift = instance_shift + instance_bits, + final_shift = instance_shift + instance_bits, + address_shift = final_shift + final_bits, offset_shift = address_shift, // unioned with address klass_shift = offset_shift + small_offset_bits, checked_mask_in_place = right_n_bits(checked_bits) << checked_shift, instance_mask_in_place = right_n_bits(instance_bits) << instance_shift, + final_mask_in_place = right_n_bits(final_bits) << final_shift, #ifndef _WIN64 large_offset_mask = right_n_bits(large_offset_bits), small_offset_mask = right_n_bits(small_offset_bits), @@ -78,7 +81,7 @@ const static uintptr_t small_offset_mask = right_n_bits(small_offset_bits); const static uintptr_t klass_mask = right_n_bits(klass_bits); #endif - + private: // helper routines: static bool is_checked_jfieldID(jfieldID id) { uintptr_t as_uint = (uintptr_t) id; @@ -98,7 +101,7 @@ public: static bool is_valid_jfieldID(Klass* k, jfieldID id); - static bool is_instance_jfieldID(Klass* k, jfieldID id) { + static bool is_instance_jfieldID(jfieldID id) { uintptr_t as_uint = (uintptr_t) id; return ((as_uint & instance_mask_in_place) != 0); } @@ -106,9 +109,14 @@ uintptr_t as_uint = (uintptr_t) id; return ((as_uint & instance_mask_in_place) == 0); } + static bool is_final_jfieldID(jfieldID id) { + uintptr_t as_uint = (uintptr_t) id; + return ((as_uint & final_mask_in_place) != 0); + } - static jfieldID to_instance_jfieldID(Klass* k, int offset) { + static jfieldID to_instance_jfieldID(Klass* k, int offset, bool is_final) { intptr_t as_uint = ((offset & large_offset_mask) << offset_shift) | instance_mask_in_place; + if (is_final) as_uint |= final_mask_in_place; if (VerifyJNIFields) { as_uint |= encode_klass_hash(k, offset); } @@ -150,13 +158,13 @@ return result; } - static jfieldID to_jfieldID(instanceKlassHandle k, int offset, bool is_static) { + static jfieldID to_jfieldID(instanceKlassHandle k, int offset, bool is_static, bool is_final) { if (is_static) { JNIid *id = k->jni_id_for(offset); debug_only(id->set_is_static_field_id()); return jfieldIDWorkaround::to_static_jfieldID(id); } else { - return jfieldIDWorkaround::to_instance_jfieldID(k(), offset); + return jfieldIDWorkaround::to_instance_jfieldID(k(), offset, is_final); } } }; --- old/src/share/vm/runtime/vmStructs.cpp 2015-06-27 04:11:17.000000000 +0300 +++ new/src/share/vm/runtime/vmStructs.cpp 2015-06-27 04:11:17.000000000 +0300 @@ -325,9 +325,10 @@ volatile_nonstatic_field(InstanceKlass, _idnum_allocated_count, u2) \ nonstatic_field(InstanceKlass, _annotations, Annotations*) \ nonstatic_field(InstanceKlass, _dependencies, nmethodBucket*) \ - nonstatic_field(nmethodBucket, _nmethod, nmethod*) \ - nonstatic_field(nmethodBucket, _count, int) \ - nonstatic_field(nmethodBucket, _next, nmethodBucket*) \ + nonstatic_field(nmethodBucket, _buckets[0], nmethodBucketEntry*) \ + nonstatic_field(nmethodBucketEntry, _nmethod, nmethod*) \ + nonstatic_field(nmethodBucketEntry, _count, int) \ + nonstatic_field(nmethodBucketEntry, _next, nmethodBucketEntry*) \ nonstatic_field(InstanceKlass, _method_ordering, Array*) \ nonstatic_field(InstanceKlass, _default_vtable_indices, Array*) \ nonstatic_field(Klass, _super_check_offset, juint) \ @@ -1478,6 +1479,7 @@ \ declare_toplevel_type(DataLayout) \ declare_toplevel_type(nmethodBucket) \ + declare_toplevel_type(nmethodBucketEntry) \ \ /********/ \ /* Oops */ \ --- old/test/compiler/unsafe/UnsafeGetConstantField.java 2015-06-27 04:11:18.000000000 +0300 +++ new/test/compiler/unsafe/UnsafeGetConstantField.java 2015-06-27 04:11:18.000000000 +0300 @@ -28,12 +28,12 @@ * @summary tests on constant folding of unsafe get operations * @library /testlibrary /../../test/lib * @run main/bootclasspath -XX:+UnlockDiagnosticVMOptions - * -Xbatch -XX:-TieredCompilation + * -Xbatch -XX:-TieredCompilation -Xverify:all * -XX:+FoldStableValues * -XX:+UseUnalignedAccesses * java.lang.invoke.UnsafeGetConstantField * @run main/bootclasspath -XX:+UnlockDiagnosticVMOptions - * -Xbatch -XX:-TieredCompilation + * -Xbatch -XX:-TieredCompilation -Xverify:all * -XX:+FoldStableValues * -XX:-UseUnalignedAccesses * java.lang.invoke.UnsafeGetConstantField @@ -180,6 +180,7 @@ final boolean stable; final boolean hasDefaultValue; final String nameSuffix; + final boolean useRawOffset; final String className; final String classDesc; @@ -192,6 +193,7 @@ this.hasDefaultValue = hasDefaultValue; this.nameSuffix = suffix; + useRawOffset = suffix.equals("Unaligned"); fieldDesc = type.desc(); className = String.format("%s$Test%s%s__f=%d__s=%b__d=%b", internalName(THIS_CLASS), type.typeName, suffix, flags, stable, hasDefaultValue); @@ -276,6 +278,10 @@ getField(mv); mv.visitMethodInsn(INVOKEVIRTUAL, UNSAFE_NAME, (isStatic() ? "staticFieldOffset" : "objectFieldOffset"), "(Ljava/lang/reflect/Field;)J", false); + if (useRawOffset) { + mv.visitLdcInsn(1); + mv.visitInsn(LSHR); + } mv.visitFieldInsn(PUTSTATIC, className, "FIELD_OFFSET", "J"); // Compute base offset for static field --- /dev/null 2015-06-27 04:11:18.000000000 +0300 +++ new/src/share/vm/prims/unsafe.hpp 2015-06-27 04:11:18.000000000 +0300 @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "utilities/globalDefinitions.hpp" + +class Unsafe { +public: + // Note: The VM's obj_field and related accessors use byte-scaled + // ("unscaled") offsets, just as the unsafe methods do. + + // However, the method Unsafe.fieldOffset explicitly declines to + // guarantee this. The field offset values manipulated by the Java user + // through the Unsafe API are opaque cookies that just happen to be byte + // offsets. We represent this state of affairs by passing the cookies + // through conversion functions when going between the VM and the Unsafe API. + // The conversion functions just happen to be no-ops at present. + enum { + final_bits = 1, + offset_bits = BitsPerLong - final_bits, + + final_shift = 0, + offset_shift = final_bits, + + final_mask = right_n_bits(final_bits) + }; + + static jlong field_offset_to_byte_offset(jlong field_offset) { + return (field_offset >> offset_shift); + } + + static jlong field_offset_from_byte_offset(jlong byte_offset, bool is_final = false) { + return (byte_offset << offset_shift) | (is_final ? final_mask : 0); + } + + static jint invocation_key_from_method_slot(jint slot) { + return slot; + } + + static jint invocation_key_to_method_slot(jint key) { + return key; + } +}; +