--- old/src/hotspot/share/opto/library_call.cpp 2019-03-11 14:26:44.982354618 +0100 +++ new/src/hotspot/share/opto/library_call.cpp 2019-03-11 14:26:44.770354621 +0100 @@ -52,6 +52,7 @@ #include "opto/runtime.hpp" #include "opto/rootnode.hpp" #include "opto/subnode.hpp" +#include "opto/valuetypenode.hpp" #include "prims/nativeLookup.hpp" #include "prims/unsafe.hpp" #include "runtime/objectMonitor.hpp" @@ -163,7 +164,6 @@ void generate_string_range_check(Node* array, Node* offset, Node* length, bool char_count); Node* generate_current_thread(Node* &tls_output); - Node* load_mirror_from_klass(Node* klass); Node* load_klass_from_mirror_common(Node* mirror, bool never_see_null, RegionNode* region, int null_path, int offset); @@ -185,20 +185,36 @@ int modifier_mask, int modifier_bits, RegionNode* region); Node* generate_interface_guard(Node* kls, RegionNode* region); + Node* generate_value_guard(Node* kls, RegionNode* region); + + enum ArrayKind { + AnyArray, + NonArray, + ObjectArray, + NonObjectArray, + TypeArray, + ValueArray + }; + Node* generate_array_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, false, false); + return generate_array_guard_common(kls, region, AnyArray); } Node* generate_non_array_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, false, true); + return generate_array_guard_common(kls, region, NonArray); } Node* generate_objArray_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, true, false); + return generate_array_guard_common(kls, region, ObjectArray); } Node* generate_non_objArray_guard(Node* kls, RegionNode* region) { - return generate_array_guard_common(kls, region, true, true); + return generate_array_guard_common(kls, region, NonObjectArray); } - Node* generate_array_guard_common(Node* kls, RegionNode* region, - bool obj_array, bool not_array); + Node* generate_typeArray_guard(Node* kls, RegionNode* region) { + return generate_array_guard_common(kls, region, TypeArray); + } + Node* generate_valueArray_guard(Node* kls, RegionNode* region) { + return generate_array_guard_common(kls, region, ValueArray); + } + Node* generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind); Node* generate_virtual_guard(Node* obj_klass, RegionNode* slow_region); CallJavaNode* generate_method_call(vmIntrinsics::ID method_id, bool is_virtual = false, bool is_static = false); @@ -253,6 +269,8 @@ bool inline_unsafe_allocate(); bool inline_unsafe_newArray(bool uninitialized); bool inline_unsafe_copyMemory(); + bool inline_unsafe_make_private_buffer(); + bool inline_unsafe_finish_private_buffer(); bool inline_native_currentThread(); bool inline_native_time_funcs(address method, const char* funcName); @@ -589,6 +607,8 @@ case vmIntrinsics::_inflateStringC: case vmIntrinsics::_inflateStringB: return inline_string_copy(!is_compress); + case vmIntrinsics::_makePrivateBuffer: return inline_unsafe_make_private_buffer(); + case vmIntrinsics::_finishPrivateBuffer: return inline_unsafe_finish_private_buffer(); case vmIntrinsics::_getReference: return inline_unsafe_access(!is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_getBoolean: return inline_unsafe_access(!is_store, T_BOOLEAN, Relaxed, false); case vmIntrinsics::_getByte: return inline_unsafe_access(!is_store, T_BYTE, Relaxed, false); @@ -598,6 +618,7 @@ case vmIntrinsics::_getLong: return inline_unsafe_access(!is_store, T_LONG, Relaxed, false); case vmIntrinsics::_getFloat: return inline_unsafe_access(!is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_getDouble: return inline_unsafe_access(!is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_getValue: return inline_unsafe_access(!is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_putReference: return inline_unsafe_access( is_store, T_OBJECT, Relaxed, false); case vmIntrinsics::_putBoolean: return inline_unsafe_access( is_store, T_BOOLEAN, Relaxed, false); @@ -608,6 +629,7 @@ case vmIntrinsics::_putLong: return inline_unsafe_access( is_store, T_LONG, Relaxed, false); case vmIntrinsics::_putFloat: return inline_unsafe_access( is_store, T_FLOAT, Relaxed, false); case vmIntrinsics::_putDouble: return inline_unsafe_access( is_store, T_DOUBLE, Relaxed, false); + case vmIntrinsics::_putValue: return inline_unsafe_access( is_store, T_VALUETYPE,Relaxed, false); case vmIntrinsics::_getReferenceVolatile: return inline_unsafe_access(!is_store, T_OBJECT, Volatile, false); case vmIntrinsics::_getBooleanVolatile: return inline_unsafe_access(!is_store, T_BOOLEAN, Volatile, false); @@ -2360,18 +2382,18 @@ if (!is_store) { // Object getReference(Object base, int/long offset), etc. BasicType rtype = sig->return_type()->basic_type(); - assert(rtype == type, "getter must return the expected value"); - assert(sig->count() == 2, "oop getter has 2 arguments"); + assert(rtype == type || (rtype == T_OBJECT && type == T_VALUETYPE), "getter must return the expected value"); + assert(sig->count() == 2 || (type == T_VALUETYPE && sig->count() == 3), "oop getter has 2 or 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "getter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "getter offset is correct"); } else { // void putReference(Object base, int/long offset, Object x), etc. assert(sig->return_type()->basic_type() == T_VOID, "putter must not return a value"); - assert(sig->count() == 3, "oop putter has 3 arguments"); + assert(sig->count() == 3 || (type == T_VALUETYPE && sig->count() == 4), "oop putter has 3 arguments"); assert(sig->type_at(0)->basic_type() == T_OBJECT, "putter base is object"); assert(sig->type_at(1)->basic_type() == T_LONG, "putter offset is correct"); BasicType vtype = sig->type_at(sig->count()-1)->basic_type(); - assert(vtype == type, "putter must accept the expected value"); + assert(vtype == type || (type == T_VALUETYPE && vtype == T_OBJECT), "putter must accept the expected value"); } #endif // ASSERT } @@ -2396,13 +2418,73 @@ // by oopDesc::field_addr. assert(Unsafe_field_offset_to_byte_offset(11) == 11, "fieldOffset must be byte-scaled"); + + ciValueKlass* value_klass = NULL; + if (type == T_VALUETYPE) { + Node* cls = null_check(argument(4)); + if (stopped()) { + return true; + } + Node* kls = load_klass_from_mirror(cls, false, NULL, 0); + const TypeKlassPtr* kls_t = _gvn.type(kls)->isa_klassptr(); + if (!kls_t->klass_is_exact()) { + return false; + } + ciKlass* klass = kls_t->klass(); + if (!klass->is_valuetype()) { + return false; + } + value_klass = klass->as_value_klass(); + } + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (base->is_ValueType()) { + ValueTypeNode* vt = base->as_ValueType(); + + if (is_store) { + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; + } + base = vt->get_oop(); + } else { + if (offset->is_Con()) { + long off = find_long_con(offset, 0); + ciValueKlass* vk = _gvn.type(vt)->is_valuetype()->value_klass(); + if ((long)(int)off != off || !vk->contains_field_offset(off)) { + return false; + } + + ciField* f = vk->get_non_flattened_field_by_offset((int)off); + + if (f != NULL) { + BasicType bt = f->layout_type(); + if (bt == T_ARRAY || bt == T_NARROWOOP) { + bt = T_OBJECT; + } + if (bt == type) { + if (bt != T_VALUETYPE || f->type() == value_klass) { + set_result(vt->field_value_by_offset((int)off, false)); + return true; + } + } + } + } + vt = vt->allocate(this)->as_ValueType(); + base = vt->get_oop(); + } + } + // 32-bit machines ignore the high half! offset = ConvL2X(offset); adr = make_unsafe_address(base, offset, is_store ? ACCESS_WRITE : ACCESS_READ, type, kind == Relaxed); if (_gvn.type(base)->isa_ptr() != TypePtr::NULL_PTR) { heap_base_oop = base; - } else if (type == T_OBJECT) { + } else if (type == T_OBJECT || (value_klass != NULL && value_klass->has_object_fields())) { return false; // off-heap oop accesses are not supported } @@ -2413,7 +2495,7 @@ decorators |= IN_HEAP; } - val = is_store ? argument(4) : NULL; + val = is_store ? argument(4 + (type == T_VALUETYPE ? 1 : 0)) : NULL; const TypePtr *adr_type = _gvn.type(adr)->isa_ptr(); @@ -2427,7 +2509,31 @@ } bool mismatched = false; - BasicType bt = alias_type->basic_type(); + BasicType bt = T_ILLEGAL; + ciField* field = NULL; + if (adr_type->isa_instptr()) { + const TypeInstPtr* instptr = adr_type->is_instptr(); + ciInstanceKlass* k = instptr->klass()->as_instance_klass(); + int off = instptr->offset(); + if (instptr->const_oop() != NULL && + instptr->klass() == ciEnv::current()->Class_klass() && + instptr->offset() >= (instptr->klass()->as_instance_klass()->size_helper() * wordSize)) { + k = instptr->const_oop()->as_instance()->java_lang_Class_klass()->as_instance_klass(); + field = k->get_field_by_offset(off, true); + } else { + field = k->get_non_flattened_field_by_offset(off); + } + if (field != NULL) { + bt = field->layout_type(); + } + assert(bt == alias_type->basic_type() || bt == T_VALUETYPE, "should match"); + if (field != NULL && bt == T_VALUETYPE && !field->is_flattened()) { + bt = T_OBJECT; + } + } else { + bt = alias_type->basic_type(); + } + if (bt != T_ILLEGAL) { assert(alias_type->adr_type()->is_oopptr(), "should be on-heap access"); if (bt == T_BYTE && adr_type->isa_aryptr()) { @@ -2448,6 +2554,28 @@ mismatched = true; // conservatively mark all "wide" on-heap accesses as mismatched } + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr()) { + if (field == NULL || field->type() != value_klass) { + mismatched = true; + } + } else if (adr_type->isa_aryptr()) { + const Type* elem = adr_type->is_aryptr()->elem(); + if (!elem->isa_valuetype()) { + mismatched = true; + } else if (elem->is_valuetype()->value_klass() != value_klass) { + mismatched = true; + } + } + if (is_store) { + const Type* val_t = _gvn.type(val); + if (!val_t->isa_valuetype() || + val_t->is_valuetype()->value_klass() != value_klass) { + return false; + } + } + } + assert(!mismatched || alias_type->adr_type()->is_oopptr(), "off-heap access can't be mismatched"); if (mismatched) { @@ -2460,17 +2588,17 @@ // Figure out the memory ordering. decorators |= mo_decorator_for_access_kind(kind); - if (!is_store && type == T_OBJECT) { - const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); - if (tjp != NULL) { - value_type = tjp; + if (!is_store) { + if (type == T_OBJECT) { + const TypeOopPtr* tjp = sharpen_unsafe_type(alias_type, adr_type); + if (tjp != NULL) { + value_type = tjp; + } + } else if (type == T_VALUETYPE) { + value_type = NULL; } } - receiver = null_check(receiver); - if (stopped()) { - return true; - } // Heap pointers get a null-check from the interpreter, // as a courtesy. However, this is not guaranteed by Unsafe, // and it is not possible to fully distinguish unintended nulls @@ -2479,14 +2607,24 @@ if (!is_store) { Node* p = NULL; // Try to constant fold a load from a constant field - ciField* field = alias_type->field(); + if (heap_base_oop != top() && field != NULL && field->is_constant() && !mismatched) { // final or stable field p = make_constant_from_field(field, heap_base_oop); } if (p == NULL) { // Could not constant fold the load - p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr() && !mismatched) { + ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); + int offset = adr_type->is_instptr()->offset(); + p = ValueTypeNode::make_from_flattened(this, value_klass, base, base, holder, offset, decorators); + } else { + p = ValueTypeNode::make_from_flattened(this, value_klass, base, adr, NULL, 0, decorators); + } + } else { + p = access_load_at(heap_base_oop, adr, adr_type, value_type, type, decorators); + } // Normalize the value returned by getBoolean in the following cases if (type == T_BOOLEAN && (mismatched || @@ -2513,6 +2651,14 @@ p = gvn().transform(new CastP2XNode(NULL, p)); p = ConvX2UL(p); } + if (field != NULL && field->is_flattenable()&& !field->is_flattened()) { + // Load a non-flattened but flattenable value type from memory + if (value_type->value_klass()->is_scalarizable()) { + p = ValueTypeNode::make_from_oop(this, p, value_type->value_klass()); + } else { + p = null2default(p, value_type->value_klass()); + } + } // The load node has the control of the preceding MemBarCPUOrder. All // following nodes will have the control of the MemBarCPUOrder inserted at // the end of this method. So, pushing the load onto the stack at a later @@ -2524,9 +2670,66 @@ val = ConvL2X(val); val = gvn().transform(new CastX2PNode(val)); } - access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); + if (type == T_VALUETYPE) { + if (adr_type->isa_instptr() && !mismatched) { + ciInstanceKlass* holder = adr_type->is_instptr()->klass()->as_instance_klass(); + int offset = adr_type->is_instptr()->offset(); + val->as_ValueType()->store_flattened(this, base, base, holder, offset, decorators); + } else { + val->as_ValueType()->store_flattened(this, base, adr, NULL, 0, decorators); + } + } else { + access_store_at(heap_base_oop, adr, adr_type, val, value_type, type, decorators); + } + } + + if (argument(1)->is_ValueType() && is_store) { + Node* value = ValueTypeNode::make_from_oop(this, base, _gvn.type(base)->value_klass()); + value = value->as_ValueType()->make_larval(this, false); + replace_in_map(argument(1), value); + } + + return true; +} + +bool LibraryCallKit::inline_unsafe_make_private_buffer() { + Node* receiver = argument(0); + Node* value = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!value->is_ValueType()) { + return false; + } + + set_result(value->as_ValueType()->make_larval(this, true)); + + return true; +} + +bool LibraryCallKit::inline_unsafe_finish_private_buffer() { + Node* receiver = argument(0); + Node* buffer = argument(1); + + receiver = null_check(receiver); + if (stopped()) { + return true; + } + + if (!buffer->is_ValueType()) { + return false; + } + + ValueTypeNode* vt = buffer->as_ValueType(); + if (!vt->is_allocated(&_gvn) || !_gvn.type(vt)->is_valuetype()->larval()) { + return false; } + set_result(vt->finish_larval(this)); + return true; } @@ -3062,15 +3265,6 @@ return true; } -//---------------------------load_mirror_from_klass---------------------------- -// Given a klass oop, load its java mirror (a java.lang.Class oop). -Node* LibraryCallKit::load_mirror_from_klass(Node* klass) { - Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset())); - Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered); - // mirror = ((OopHandle)mirror)->resolve(); - return access_load(load, TypeInstPtr::MIRROR, T_OBJECT, IN_NATIVE); -} - //-----------------------load_klass_from_mirror_common------------------------- // Given a java mirror (a java.lang.Class oop), load its corresponding klass oop. // Test the klass oop for null (signifying a primitive Class like Integer.TYPE), @@ -3117,6 +3311,10 @@ return generate_access_flags_guard(kls, JVM_ACC_INTERFACE, 0, region); } +Node* LibraryCallKit::generate_value_guard(Node* kls, RegionNode* region) { + return generate_access_flags_guard(kls, JVM_ACC_VALUE, 0, region); +} + //-------------------------inline_native_Class_query------------------- bool LibraryCallKit::inline_native_Class_query(vmIntrinsics::ID id) { const Type* return_type = TypeInt::BOOL; @@ -3301,18 +3499,28 @@ if (obj == NULL || obj->is_top()) { return false; // dead path } - const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); + + ciKlass* obj_klass = NULL; + if (obj->is_ValueType()) { + const TypeValueType* tvt = _gvn.type(obj)->is_valuetype(); + obj_klass = tvt->value_klass(); + } else { + const TypeOopPtr* tp = _gvn.type(obj)->isa_oopptr(); + if (tp != NULL) { + obj_klass = tp->klass(); + } + } // First, see if Class.cast() can be folded statically. // java_mirror_type() returns non-null for compile-time Class constants. ciType* tm = mirror_con->java_mirror_type(); if (tm != NULL && tm->is_klass() && - tp != NULL && tp->klass() != NULL) { - if (!tp->klass()->is_loaded()) { + obj_klass != NULL) { + if (!obj_klass->is_loaded()) { // Don't use intrinsic when class is not loaded. return false; } else { - int static_res = C->static_subtype_check(tm->as_klass(), tp->klass()); + int static_res = C->static_subtype_check(tm->as_klass(), obj_klass); if (static_res == Compile::SSC_always_true) { // isInstance() is true - fold the code. set_result(obj); @@ -3480,30 +3688,28 @@ } //---------------------generate_array_guard_common------------------------ -Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, - bool obj_array, bool not_array) { +Node* LibraryCallKit::generate_array_guard_common(Node* kls, RegionNode* region, ArrayKind kind) { if (stopped()) { return NULL; } - // If obj_array/non_array==false/false: - // Branch around if the given klass is in fact an array (either obj or prim). - // If obj_array/non_array==false/true: - // Branch around if the given klass is not an array klass of any kind. - // If obj_array/non_array==true/true: - // Branch around if the kls is not an oop array (kls is int[], String, etc.) - // If obj_array/non_array==true/false: - // Branch around if the kls is an oop array (Object[] or subtype) - // // Like generate_guard, adds a new path onto the region. jint layout_con = 0; Node* layout_val = get_layout_helper(kls, layout_con); if (layout_val == NULL) { - bool query = (obj_array - ? Klass::layout_helper_is_objArray(layout_con) - : Klass::layout_helper_is_array(layout_con)); - if (query == not_array) { + bool query = 0; + switch(kind) { + case ObjectArray: query = Klass::layout_helper_is_objArray(layout_con); break; + case NonObjectArray: query = !Klass::layout_helper_is_objArray(layout_con); break; + case TypeArray: query = Klass::layout_helper_is_typeArray(layout_con); break; + case ValueArray: query = Klass::layout_helper_is_valueArray(layout_con); break; + case AnyArray: query = Klass::layout_helper_is_array(layout_con); break; + case NonArray: query = !Klass::layout_helper_is_array(layout_con); break; + default: + ShouldNotReachHere(); + } + if (!query) { return NULL; // never a branch } else { // always a branch Node* always_branch = control(); @@ -3513,22 +3719,43 @@ return always_branch; } } + unsigned int value = 0; + BoolTest::mask btest = BoolTest::illegal; + switch(kind) { + case ObjectArray: + case NonObjectArray: { + value = Klass::_lh_array_tag_obj_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = kind == ObjectArray ? BoolTest::eq : BoolTest::ne; + break; + } + case TypeArray: { + value = Klass::_lh_array_tag_type_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case ValueArray: { + value = Klass::_lh_array_tag_vt_value; + layout_val = _gvn.transform(new RShiftINode(layout_val, intcon(Klass::_lh_array_tag_shift))); + btest = BoolTest::eq; + break; + } + case AnyArray: value = Klass::_lh_neutral_value; btest = BoolTest::lt; break; + case NonArray: value = Klass::_lh_neutral_value; btest = BoolTest::gt; break; + default: + ShouldNotReachHere(); + } // Now test the correct condition. - jint nval = (obj_array - ? (jint)(Klass::_lh_array_tag_type_value - << Klass::_lh_array_tag_shift) - : Klass::_lh_neutral_value); + jint nval = (jint)value; Node* cmp = _gvn.transform(new CmpINode(layout_val, intcon(nval))); - BoolTest::mask btest = BoolTest::lt; // correct for testing is_[obj]array - // invert the test if we are looking for a non-array - if (not_array) btest = BoolTest(btest).negate(); Node* bol = _gvn.transform(new BoolNode(cmp, btest)); return generate_fair_guard(bol, region); } //-----------------------inline_native_newArray-------------------------- -// private static native Object java.lang.reflect.newArray(Class componentType, int length); +// private static native Object java.lang.reflect.Array.newArray(Class componentType, int length); // private native Object Unsafe.allocateUninitializedArray0(Class cls, int size); bool LibraryCallKit::inline_unsafe_newArray(bool uninitialized) { Node* mirror; @@ -3644,6 +3871,19 @@ Node* end = is_copyOfRange? argument(2): argument(1); Node* array_type_mirror = is_copyOfRange? argument(3): argument(2); + const TypeAryPtr* original_t = _gvn.type(original)->isa_aryptr(); + const TypeInstPtr* mirror_t = _gvn.type(array_type_mirror)->isa_instptr(); + if (EnableValhalla && ValueArrayFlatten && + (original_t == NULL || mirror_t == NULL || + (mirror_t->java_mirror_type() == NULL && + (original_t->elem()->isa_valuetype() || + (original_t->elem()->make_oopptr() != NULL && + original_t->elem()->make_oopptr()->can_be_value_type()))))) { + // We need to know statically if the copy is to a flattened array + // or not but can't tell. + return false; + } + Node* newcopy = NULL; // Set the original stack and the reexecute bit for the interpreter to reexecute @@ -3667,16 +3907,58 @@ // Despite the generic type of Arrays.copyOf, the mirror might be int, int[], etc. // Bail out if that is so. - Node* not_objArray = generate_non_objArray_guard(klass_node, bailout); + // Value type array may have object field that would require a + // write barrier. Conservatively, go to slow path. + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + Node* not_objArray = !bs->array_copy_requires_gc_barriers(false, T_OBJECT, false, BarrierSetC2::Parsing) ? + generate_typeArray_guard(klass_node, bailout) : generate_non_objArray_guard(klass_node, bailout); if (not_objArray != NULL) { // Improve the klass node's type from the new optimistic assumption: ciKlass* ak = ciArrayKlass::make(env()->Object_klass()); - const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, 0/*offset*/); + const Type* akls = TypeKlassPtr::make(TypePtr::NotNull, ak, Type::Offset(0)); Node* cast = new CastPPNode(klass_node, akls); cast->init_req(0, control()); klass_node = _gvn.transform(cast); } + Node* original_kls = load_object_klass(original); + // ArrayCopyNode:Ideal may transform the ArrayCopyNode to + // loads/stores but it is legal only if we're sure the + // Arrays.copyOf would succeed. So we need all input arguments + // to the copyOf to be validated, including that the copy to the + // new array won't trigger an ArrayStoreException. That subtype + // check can be optimized if we know something on the type of + // the input array from type speculation. + if (_gvn.type(klass_node)->singleton() && !stopped()) { + ciKlass* subk = _gvn.type(original_kls)->is_klassptr()->klass(); + ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); + + int test = C->static_subtype_check(superk, subk); + if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { + const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); + if (t_original->speculative_type() != NULL) { + original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); + original_kls = load_object_klass(original); + } + } + } + + if (EnableValhalla) { + // Either both or neither new array klass and original array + // klass must be flattened + Node* flattened_klass = generate_valueArray_guard(klass_node, NULL); + generate_valueArray_guard(original_kls, bailout); + if (flattened_klass != NULL) { + RegionNode* r = new RegionNode(2); + record_for_igvn(r); + r->init_req(1, control()); + set_control(flattened_klass); + generate_valueArray_guard(original_kls, r); + bailout->add_req(control()); + set_control(_gvn.transform(r)); + } + } + // Bail out if either start or end is negative. generate_negative_guard(start, bailout, &start); generate_negative_guard(end, bailout, &end); @@ -3713,31 +3995,11 @@ // Extreme case: Arrays.copyOf((Integer[])x, 10, String[].class). // This will fail a store-check if x contains any non-nulls. - // ArrayCopyNode:Ideal may transform the ArrayCopyNode to - // loads/stores but it is legal only if we're sure the - // Arrays.copyOf would succeed. So we need all input arguments - // to the copyOf to be validated, including that the copy to the - // new array won't trigger an ArrayStoreException. That subtype - // check can be optimized if we know something on the type of - // the input array from type speculation. - if (_gvn.type(klass_node)->singleton()) { - ciKlass* subk = _gvn.type(load_object_klass(original))->is_klassptr()->klass(); - ciKlass* superk = _gvn.type(klass_node)->is_klassptr()->klass(); - - int test = C->static_subtype_check(superk, subk); - if (test != Compile::SSC_always_true && test != Compile::SSC_always_false) { - const TypeOopPtr* t_original = _gvn.type(original)->is_oopptr(); - if (t_original->speculative_type() != NULL) { - original = maybe_cast_profiled_obj(original, t_original->speculative_type(), true); - } - } - } - bool validated = false; // Reason_class_check rather than Reason_intrinsic because we // want to intrinsify even if this traps. if (!too_many_traps(Deoptimization::Reason_class_check)) { - Node* not_subtype_ctrl = gen_subtype_check(load_object_klass(original), + Node* not_subtype_ctrl = gen_subtype_check(original_kls, klass_node); if (not_subtype_ctrl != top()) { @@ -3754,7 +4016,7 @@ newcopy = new_array(klass_node, length, 0); // no arguments to push ArrayCopyNode* ac = ArrayCopyNode::make(this, true, original, start, newcopy, intcon(0), moved, true, false, - load_object_klass(original), klass_node); + original_kls, klass_node); if (!is_copyOfRange) { ac->set_copyof(validated); } else { @@ -3878,7 +4140,12 @@ PhiNode* result_val = new PhiNode(result_reg, TypeInt::INT); PhiNode* result_io = new PhiNode(result_reg, Type::ABIO); PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); - Node* obj = NULL; + Node* obj = argument(0); + + if (obj->is_ValueType() || gvn().type(obj)->is_valuetypeptr()) { + return false; + } + if (!is_static) { // Check for hashing null object obj = null_check_receiver(); @@ -3888,7 +4155,6 @@ } else { // Do a null check, and return zero if null. // System.identityHashCode(null) == 0 - obj = argument(0); Node* null_ctl = top(); obj = null_check_oop(obj, &null_ctl); result_reg->init_req(_null_path, null_ctl); @@ -3908,6 +4174,13 @@ RegionNode* slow_region = new RegionNode(1); record_for_igvn(slow_region); + const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); + assert(!obj_type->isa_valuetype() || !obj_type->is_valuetypeptr(), "no value type here"); + if (is_static && obj_type->can_be_value_type()) { + Node* obj_klass = load_object_klass(obj); + generate_value_guard(obj_klass, slow_region); + } + // If this is a virtual call, we generate a funny guard. We pull out // the vtable entry corresponding to hashCode() from the target object. // If the target method which we are calling happens to be the native @@ -3994,7 +4267,13 @@ // // Build special case code for calls to getClass on an object. bool LibraryCallKit::inline_native_getClass() { - Node* obj = null_check_receiver(); + Node* obj = argument(0); + if (obj->is_ValueType()) { + ciKlass* vk = _gvn.type(obj)->is_valuetype()->value_klass(); + set_result(makecon(TypeInstPtr::make(vk->java_mirror()))); + return true; + } + obj = null_check_receiver(); if (stopped()) return true; set_result(load_mirror_from_klass(load_object_klass(obj))); return true; @@ -4251,7 +4530,34 @@ // TODO: generate fields copies for small objects instead. Node* size = _gvn.transform(obj_size); - access_clone(obj, alloc_obj, size, is_array); + // Exclude the header but include array length to copy by 8 bytes words. + // Can't use base_offset_in_bytes(bt) since basic type is unknown. + int base_off = is_array ? arrayOopDesc::length_offset_in_bytes() : + instanceOopDesc::base_offset_in_bytes(); + // base_off: + // 8 - 32-bit VM + // 12 - 64-bit VM, compressed klass + // 16 - 64-bit VM, normal klass + if (base_off % BytesPerLong != 0) { + assert(UseCompressedClassPointers, ""); + if (is_array) { + // Exclude length to copy by 8 bytes words. + base_off += sizeof(int); + } else { + // Include klass to copy by 8 bytes words. + base_off = instanceOopDesc::klass_offset_in_bytes(); + } + assert(base_off % BytesPerLong == 0, "expect 8 bytes alignment"); + } + Node* src_base = basic_plus_adr(obj, base_off); + Node* dst_base = basic_plus_adr(alloc_obj, base_off); + + // Compute the length also, if needed: + Node* countx = size; + countx = _gvn.transform(new SubXNode(countx, MakeConX(base_off))); + countx = _gvn.transform(new URShiftXNode(countx, intcon(LogBytesPerLong))); + + access_clone(src_base, dst_base, countx, is_array); // Do not let reads from the cloned object float above the arraycopy. if (alloc != NULL) { @@ -4294,7 +4600,12 @@ { PreserveReexecuteState preexecs(this); jvms()->set_should_reexecute(true); - Node* obj = null_check_receiver(); + Node* obj = argument(0); + if (obj->is_ValueType()) { + return false; + } + + obj = null_check_receiver(); if (stopped()) return true; const TypeOopPtr* obj_type = _gvn.type(obj)->is_oopptr(); @@ -4304,7 +4615,8 @@ // loads/stores. Maybe a speculative type can help us. if (!obj_type->klass_is_exact() && obj_type->speculative_type() != NULL && - obj_type->speculative_type()->is_instance_klass()) { + obj_type->speculative_type()->is_instance_klass() && + !obj_type->speculative_type()->is_valuetype()) { ciInstanceKlass* spec_ik = obj_type->speculative_type()->as_instance_klass(); if (spec_ik->nof_nonstatic_fields() <= ArrayCopyLoadStoreMaxElem && !spec_ik->has_injected_fields()) { @@ -4341,61 +4653,72 @@ PhiNode* result_mem = new PhiNode(result_reg, Type::MEMORY, TypePtr::BOTTOM); record_for_igvn(result_reg); + // We only go to the fast case code if we pass a number of guards. + // The paths which do not pass are accumulated in the slow_region. + RegionNode* slow_region = new RegionNode(1); + record_for_igvn(slow_region); + Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); if (array_ctl != NULL) { // It's an array. PreserveJVMState pjvms(this); set_control(array_ctl); - Node* obj_length = load_array_length(obj); - Node* obj_size = NULL; - Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { - // If it is an oop array, it requires very special treatment, - // because gc barriers are required when accessing the array. - Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); - if (is_obja != NULL) { - PreserveJVMState pjvms2(this); - set_control(is_obja); - obj = access_resolve(obj, ACCESS_READ); - // Generate a direct call to the right arraycopy function(s). - Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); - ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); - ac->set_cloneoop(); - Node* n = _gvn.transform(ac); - assert(n == ac, "cannot disappear"); - ac->connect_outputs(this); - - result_reg->init_req(_objArray_path, control()); - result_val->init_req(_objArray_path, alloc_obj); - result_i_o ->set_req(_objArray_path, i_o()); - result_mem ->set_req(_objArray_path, reset_memory()); - } + // Value type array may have object field that would require a + // write barrier. Conservatively, go to slow path. + generate_valueArray_guard(obj_klass, slow_region); } - // Otherwise, there are no barriers to worry about. - // (We can dispense with card marks if we know the allocation - // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks - // causes the non-eden paths to take compensating steps to - // simulate a fresh allocation, so that no further - // card marks are required in compiled code to initialize - // the object.) if (!stopped()) { - copy_to_clone(obj, alloc_obj, obj_size, true); + Node* obj_length = load_array_length(obj); + Node* obj_size = NULL; + Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size); // no arguments to push + + BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2(); + if (bs->array_copy_requires_gc_barriers(true, T_OBJECT, true, BarrierSetC2::Parsing)) { + // If it is an oop array, it requires very special treatment, + // because gc barriers are required when accessing the array. + Node* is_obja = generate_objArray_guard(obj_klass, (RegionNode*)NULL); + if (is_obja != NULL) { + PreserveJVMState pjvms2(this); + set_control(is_obja); + // Generate a direct call to the right arraycopy function(s). + Node* alloc = tightly_coupled_allocation(alloc_obj, NULL); + ArrayCopyNode* ac = ArrayCopyNode::make(this, true, obj, intcon(0), alloc_obj, intcon(0), obj_length, alloc != NULL, false); + ac->set_cloneoop(); + Node* n = _gvn.transform(ac); + assert(n == ac, "cannot disappear"); + ac->connect_outputs(this); + + result_reg->init_req(_objArray_path, control()); + result_val->init_req(_objArray_path, alloc_obj); + result_i_o ->set_req(_objArray_path, i_o()); + result_mem ->set_req(_objArray_path, reset_memory()); + } + } - // Present the results of the copy. - result_reg->init_req(_array_path, control()); - result_val->init_req(_array_path, alloc_obj); - result_i_o ->set_req(_array_path, i_o()); - result_mem ->set_req(_array_path, reset_memory()); + // Otherwise, there are no barriers to worry about. + // (We can dispense with card marks if we know the allocation + // comes out of eden (TLAB)... In fact, ReduceInitialCardMarks + // causes the non-eden paths to take compensating steps to + // simulate a fresh allocation, so that no further + // card marks are required in compiled code to initialize + // the object.) + + if (!stopped()) { + copy_to_clone(obj, alloc_obj, obj_size, true); + + // Present the results of the copy. + result_reg->init_req(_array_path, control()); + result_val->init_req(_array_path, alloc_obj); + result_i_o ->set_req(_array_path, i_o()); + result_mem ->set_req(_array_path, reset_memory()); + } } } - // We only go to the instance fast case code if we pass a number of guards. - // The paths which do not pass are accumulated in the slow_region. - RegionNode* slow_region = new RegionNode(1); - record_for_igvn(slow_region); if (!stopped()) { // It's an instance (we did array above). Make the slow-path tests. // If this is a virtual call, we generate a funny guard. We grab @@ -4556,11 +4879,10 @@ _reexecute_sp = saved_reexecute_sp; // Remove the allocation from above the guards - CallProjections callprojs; - alloc->extract_projections(&callprojs, true); + CallProjections* callprojs = alloc->extract_projections(true); InitializeNode* init = alloc->initialization(); Node* alloc_mem = alloc->in(TypeFunc::Memory); - C->gvn_replace_by(callprojs.fallthrough_ioproj, alloc->in(TypeFunc::I_O)); + C->gvn_replace_by(callprojs->fallthrough_ioproj, alloc->in(TypeFunc::I_O)); C->gvn_replace_by(init->proj_out(TypeFunc::Memory), alloc_mem); C->gvn_replace_by(init->proj_out(TypeFunc::Control), alloc->in(0)); @@ -4572,7 +4894,7 @@ set_all_memory(mem); alloc->set_req(TypeFunc::Memory, mem); set_control(init->proj_out_or_null(TypeFunc::Control)); - set_i_o(callprojs.fallthrough_ioproj); + set_i_o(callprojs->fallthrough_ioproj); // Update memory as done in GraphKit::set_output_for_allocation() const TypeInt* length_type = _gvn.find_int_type(alloc->in(AllocateNode::ALength)); @@ -4816,6 +5138,26 @@ Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } + + const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); + const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); + src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); + + src_type = _gvn.type(src); + top_src = src_type->isa_aryptr(); + + if (top_dest != NULL && + top_dest->elem()->make_oopptr() != NULL && + top_dest->elem()->make_oopptr()->can_be_value_type()) { + generate_valueArray_guard(load_object_klass(dest), slow_region); + } + + if (top_src != NULL && + top_src->elem()->make_oopptr() != NULL && + top_src->elem()->make_oopptr()->can_be_value_type()) { + generate_valueArray_guard(load_object_klass(src), slow_region); + } + { PreserveJVMState pjvms(this); set_control(_gvn.transform(slow_region)); @@ -4823,10 +5165,6 @@ Deoptimization::Action_make_not_entrant); assert(stopped(), "Should be stopped"); } - - const TypeKlassPtr* dest_klass_t = _gvn.type(dest_klass)->is_klassptr(); - const Type *toop = TypeOopPtr::make_from_klass(dest_klass_t->klass()); - src = _gvn.transform(new CheckCastPPNode(control(), src, toop)); } arraycopy_move_allocation_here(alloc, dest, saved_jvms, saved_reexecute_sp, new_idx);