--- old/src/share/vm/oops/klass.hpp 2016-04-08 18:53:59.000000000 +0300 +++ new/src/share/vm/oops/klass.hpp 2016-04-08 18:53:59.000000000 +0300 @@ -308,6 +308,8 @@ _lh_array_tag_obj_value = ~0x01 // 0x80000000 >> 30 }; + static const juint _lh_array_bit = 1 << (BitsPerInt - 1); // 0x80000000 + static int layout_helper_size_in_bytes(jint lh) { assert(lh > (jint)_lh_neutral_value, "must be instance"); return (int) lh & ~_lh_instance_slow_path_bit; --- old/src/share/vm/opto/graphKit.cpp 2016-04-08 18:54:00.000000000 +0300 +++ new/src/share/vm/opto/graphKit.cpp 2016-04-08 18:54:00.000000000 +0300 @@ -3285,6 +3285,7 @@ // This two-faced routine is useful because allocation sites // almost always feature constant types. Node* GraphKit::get_layout_helper(Node* klass_node, jint& constant_value) { + constant_value = Klass::_lh_neutral_value; // put in a known value const TypeKlassPtr* inst_klass = _gvn.type(klass_node)->isa_klassptr(); if (!StressReflectiveCode && inst_klass != NULL) { ciKlass* klass = inst_klass->klass(); @@ -3293,11 +3294,9 @@ jint lhelper = klass->layout_helper(); if (lhelper != Klass::_lh_neutral_value) { constant_value = lhelper; - return (Node*) NULL; } } } - constant_value = Klass::_lh_neutral_value; // put in a known value Node* lhp = basic_plus_adr(klass_node, klass_node, in_bytes(Klass::layout_helper_offset())); return make_load(NULL, lhp, TypeInt::INT, T_INT, MemNode::unordered); } @@ -3406,7 +3405,6 @@ // - If 'return_size_val', report the the total object size to the caller. // - deoptimize_on_exception controls how Java exceptions are handled (rethrow vs deoptimize) Node* GraphKit::new_instance(Node* klass_node, - Node* extra_slow_test, Node* *return_size_val, bool deoptimize_on_exception) { // Compute size in doublewords @@ -3415,33 +3413,47 @@ // The layout_helper also encodes (in a low bit) the need for a slow path. jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + bool layout_is_con = (layout_con != Klass::_lh_neutral_value); - if (extra_slow_test == NULL) extra_slow_test = intcon(0); - // Generate the initial go-slow test. It's either ALWAYS (return a - // Node for 1) or NEVER (return a NULL) or perhaps (in the reflective - // case) a computed value derived from the layout_helper. - Node* initial_slow_test = NULL; + // Generate the go-slow test. It's either ALWAYS (ConI(1)) , NEVER (ConI(0)), + // or (in the reflective case) a computed value derived from the Klass::_layout_helper and + // InstanceKlass::_init_state. + Node* slow_test = intcon(0); if (layout_is_con) { assert(!StressReflectiveCode, "stress mode does not use these paths"); - bool must_go_slow = Klass::layout_helper_needs_slow_path(layout_con); - initial_slow_test = must_go_slow ? intcon(1) : extra_slow_test; + if (!Klass::layout_helper_is_instance(layout_con) || // exception is thrown from runtime + Klass::layout_helper_needs_slow_path(layout_con)) { + slow_test = intcon(1); + } } else { // reflective case // This reflective path is used by Unsafe.allocateInstance. // (It may be stress-tested by specifying StressReflectiveCode.) - // Basically, we want to get into the VM is there's an illegal argument. - Node* bit = intcon(Klass::_lh_instance_slow_path_bit); - initial_slow_test = _gvn.transform( new AndINode(layout_val, bit) ); - if (extra_slow_test != intcon(0)) { - initial_slow_test = _gvn.transform( new OrINode(initial_slow_test, extra_slow_test) ); - } + // Basically, we want to get into the VM if there's an illegal argument. + + // Note: The argument might still be an illegal value like + // Serializable.class or Object[].class. The runtime will handle it. + // But we must make an explicit check for initialization. + + // Check for instance klass & slow bit. + Node* bit = intcon(Klass::_lh_array_bit | Klass::_lh_instance_slow_path_bit); + Node* lh_test = _gvn.transform(new AndINode(layout_val, bit)); + + Node* insp = basic_plus_adr(klass_node, in_bytes(InstanceKlass::init_state_offset())); + // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler + // can generate code to load it as unsigned byte. + Node* init_state = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); + Node* init_bits = intcon(InstanceKlass::fully_initialized); + Node* init_test = _gvn.transform(new SubINode(init_state, init_bits)); + // The 'init_test' is non-zero if we need to take a slow path. + + slow_test = _gvn.transform(new OrINode(lh_test, init_test)); // (Macro-expander will further convert this to a Bool, if necessary.) } // Find the size in bytes. This is easy; it's the layout_helper. // The size value must be valid even if the slow path is taken. Node* size = NULL; - if (layout_is_con) { + if (layout_is_con && Klass::layout_helper_is_instance(layout_con)) { size = MakeConX(Klass::layout_helper_size_in_bytes(layout_con)); } else { // reflective case // This reflective path is used by clone and Unsafe.allocateInstance. @@ -3472,8 +3484,7 @@ AllocateNode* alloc = new AllocateNode(C, AllocateNode::alloc_type(Type::TOP), control(), mem, i_o(), - size, klass_node, - initial_slow_test); + size, klass_node, slow_test); return set_output_for_allocation(alloc, oop_type, deoptimize_on_exception); } @@ -3489,7 +3500,7 @@ bool deoptimize_on_exception) { jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(klass_node, layout_con); - int layout_is_con = (layout_val == NULL); + bool layout_is_con = (layout_con != Klass::_lh_neutral_value); if (!layout_is_con && !StressReflectiveCode && !too_many_traps(Deoptimization::Reason_class_check)) { --- old/src/share/vm/opto/graphKit.hpp 2016-04-08 18:54:01.000000000 +0300 +++ new/src/share/vm/opto/graphKit.hpp 2016-04-08 18:54:01.000000000 +0300 @@ -871,7 +871,6 @@ bool deoptimize_on_exception=false); Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* new_instance(Node* klass_node, - Node* slow_test = NULL, Node* *return_size_val = NULL, bool deoptimize_on_exception = false); Node* new_array(Node* klass_node, Node* count_val, int nargs, --- old/src/share/vm/opto/library_call.cpp 2016-04-08 18:54:01.000000000 +0300 +++ new/src/share/vm/opto/library_call.cpp 2016-04-08 18:54:01.000000000 +0300 @@ -247,7 +247,6 @@ typedef enum { Relaxed, Opaque, Volatile, Acquire, Release } AccessKind; bool inline_unsafe_access(bool is_native_ptr, bool is_store, BasicType type, AccessKind kind, bool is_unaligned); - static bool klass_needs_init_guard(Node* kls); bool inline_unsafe_allocate(); bool inline_unsafe_newArray(bool uninitialized); bool inline_unsafe_copyMemory(); @@ -3122,19 +3121,6 @@ } } -bool LibraryCallKit::klass_needs_init_guard(Node* kls) { - if (!kls->is_Con()) { - return true; - } - const TypeKlassPtr* klsptr = kls->bottom_type()->isa_klassptr(); - if (klsptr == NULL) { - return true; - } - ciInstanceKlass* ik = klsptr->klass()->as_instance_klass(); - // don't need a guard for a klass that is already initialized - return !ik->is_initialized(); -} - //----------------------------inline_unsafe_allocate--------------------------- // public native Object Unsafe.allocateInstance(Class cls); bool LibraryCallKit::inline_unsafe_allocate() { @@ -3148,21 +3134,7 @@ kls = null_check(kls); if (stopped()) return true; // argument was like int.class - Node* test = NULL; - if (LibraryCallKit::klass_needs_init_guard(kls)) { - // Note: The argument might still be an illegal value like - // Serializable.class or Object[].class. The runtime will handle it. - // But we must make an explicit check for initialization. - Node* insp = basic_plus_adr(kls, in_bytes(InstanceKlass::init_state_offset())); - // Use T_BOOLEAN for InstanceKlass::_init_state so the compiler - // can generate code to load it as unsigned byte. - Node* inst = make_load(NULL, insp, TypeInt::UBYTE, T_BOOLEAN, MemNode::unordered); - Node* bits = intcon(InstanceKlass::fully_initialized); - test = _gvn.transform(new SubINode(inst, bits)); - // The 'test' is non-zero if we need to take a slow path. - } - - Node* obj = new_instance(kls, test); + Node* obj = new_instance(kls); set_result(obj); return true; } @@ -3736,9 +3708,9 @@ // Branch around if the kls is an oop array (Object[] or subtype) // // Like generate_guard, adds a new path onto the region. - jint layout_con = 0; + jint layout_con = Klass::_lh_neutral_value; Node* layout_val = get_layout_helper(kls, layout_con); - if (layout_val == NULL) { + if (layout_con != Klass::_lh_neutral_value) { bool query = (obj_array ? Klass::layout_helper_is_objArray(layout_con) : Klass::layout_helper_is_array(layout_con)); @@ -4712,7 +4684,7 @@ // Need to deoptimize on exception from allocation since Object.clone intrinsic // is reexecuted if deoptimization occurs and there could be problems when merging // exception state between multiple Object.clone versions (reexecute=true vs reexecute=false). - Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size, /*deoptimize_on_exception=*/true); + Node* alloc_obj = new_instance(obj_klass, &obj_size, /*deoptimize_on_exception=*/true); copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); --- old/src/share/vm/opto/runtime.cpp 2016-04-08 18:54:02.000000000 +0300 +++ new/src/share/vm/opto/runtime.cpp 2016-04-08 18:54:02.000000000 +0300 @@ -220,7 +220,9 @@ // These checks are cheap to make and support reflective allocation. int lh = klass->layout_helper(); - if (Klass::layout_helper_needs_slow_path(lh) || !InstanceKlass::cast(klass)->is_initialized()) { + if (!Klass::layout_helper_is_instance(lh) || + Klass::layout_helper_needs_slow_path(lh) || + !InstanceKlass::cast(klass)->is_initialized()) { Handle holder(THREAD, klass->klass_holder()); // keep the klass alive klass->check_valid_for_instantiation(false, THREAD); if (!HAS_PENDING_EXCEPTION) {