--- old/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2017-09-21 09:17:52.604998398 +0200 +++ new/src/cpu/x86/vm/sharedRuntime_x86_64.cpp 2017-09-21 09:17:52.492998399 +0200 @@ -483,6 +483,7 @@ assert((i + 1) < total_args_passed && sig_bt[i + 1] == T_VOID, "expecting half"); // fall through case T_OBJECT: + case T_VALUETYPE: case T_ARRAY: case T_ADDRESS: case T_VALUETYPEPTR: @@ -893,7 +894,7 @@ int off = sig_extended.at(next_arg_comp)._offset; assert(off > 0, "offset in object should be positive"); size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; - bool is_oop = (bt == T_OBJECT || bt == T_ARRAY); + bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY); has_oop_field = has_oop_field || is_oop; gen_c2i_adapter_helper(masm, bt, next_arg_comp > 0 ? sig_extended.at(next_arg_comp-1)._bt : T_ILLEGAL, size_in_bytes, regs[next_arg_comp-ignored], Address(r11, off), extraspace, is_oop); @@ -1152,7 +1153,7 @@ int off = sig_extended.at(next_arg_comp)._offset; assert(off > 0, "offset in object should be positive"); size_t size_in_bytes = is_java_primitive(bt) ? type2aelembytes(bt) : wordSize; - bool is_oop = (bt == T_OBJECT || bt == T_ARRAY); + bool is_oop = (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY); gen_i2c_adapter_helper(masm, bt, prev_bt, size_in_bytes, regs[next_arg_comp - ignored], Address(r10, off), is_oop); } } while (vt != 0); @@ -1247,6 +1248,10 @@ BasicType bt = sig_extended.at(index)._bt; if (bt == T_VALUETYPE) { has_value_argument = true; + } else if (bt == T_VALUETYPEPTR) { + // non-flattened value type field + sig_str[idx++] = type2char(T_VALUETYPE); + sig_str[idx++] = ';'; } else if (bt == T_VOID) { // Ignore } else { @@ -4200,7 +4205,7 @@ __ movflt(to, r_1->as_XMMRegister()); } else if (bt == T_DOUBLE) { __ movdbl(to, r_1->as_XMMRegister()); - } else if (bt == T_OBJECT || bt == T_ARRAY) { + } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) { __ store_heap_oop(to, r_1->as_Register()); } else { assert(is_java_primitive(bt), "unexpected basic type"); @@ -4237,7 +4242,7 @@ __ movflt(r_1->as_XMMRegister(), from); } else if (bt == T_DOUBLE) { __ movdbl(r_1->as_XMMRegister(), from); - } else if (bt == T_OBJECT || bt == T_ARRAY) { + } else if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) { __ load_heap_oop(r_1->as_Register(), from); } else { assert(is_java_primitive(bt), "unexpected basic type"); --- old/src/share/vm/ci/ciField.cpp 2017-09-21 09:17:52.980998392 +0200 +++ new/src/share/vm/ci/ciField.cpp 2017-09-21 09:17:52.884998394 +0200 @@ -218,6 +218,8 @@ _known_to_link_with_put = field->_known_to_link_with_put; _known_to_link_with_get = field->_known_to_link_with_get; _constant_value = field->_constant_value; + assert(!field->is_flattened(), "field must not be flattened"); + _is_flattened = false; } static bool trust_final_non_static_fields(ciInstanceKlass* holder) { @@ -255,6 +257,8 @@ _flags = ciFlags(fd->access_flags()); _offset = fd->offset(); _holder = CURRENT_ENV->get_instance_klass(fd->field_holder()); + _is_flattened = fd->is_flatten(); + assert(fd->field_type() == T_VALUETYPE || !_is_flattened, "flattening is only supported for value type fields"); // Check to see if the field is constant. Klass* k = _holder->get_Klass(); --- old/src/share/vm/ci/ciField.hpp 2017-09-21 09:17:53.644998383 +0200 +++ new/src/share/vm/ci/ciField.hpp 2017-09-21 09:17:53.452998386 +0200 @@ -48,6 +48,7 @@ ciType* _type; int _offset; bool _is_constant; + bool _is_flattened; ciMethod* _known_to_link_with_put; ciInstanceKlass* _known_to_link_with_get; ciConstant _constant_value; @@ -174,6 +175,7 @@ bool is_stable () const { return flags().is_stable(); } bool is_volatile () const { return flags().is_volatile(); } bool is_transient () const { return flags().is_transient(); } + bool is_flattened () const { return _is_flattened; } // The field is modified outside of instance initializer methods // (or class/initializer methods if the field is static). bool has_initialized_final_update() const { return flags().has_initialized_final_update(); } --- old/src/share/vm/ci/ciInstanceKlass.cpp 2017-09-21 09:17:54.360998373 +0200 +++ new/src/share/vm/ci/ciInstanceKlass.cpp 2017-09-21 09:17:54.148998376 +0200 @@ -63,7 +63,6 @@ _has_nonstatic_concrete_methods = ik->has_nonstatic_concrete_methods(); _is_anonymous = ik->is_anonymous(); _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields - _nof_declared_nonstatic_fields = -1; // initialized lazily by compute_nonstatic_fields _has_injected_fields = -1; _vcc_klass = NULL; _implementor = NULL; // we will fill these lazily @@ -106,7 +105,6 @@ _nonstatic_field_size = -1; _has_nonstatic_fields = false; _nonstatic_fields = NULL; // initialized lazily by compute_nonstatic_fields - _nof_declared_nonstatic_fields = -1; // initialized lazily by compute_nonstatic_fields _has_injected_fields = -1; _vcc_klass = NULL; _is_anonymous = false; @@ -457,7 +455,6 @@ if (!has_nonstatic_fields()) { Arena* arena = CURRENT_ENV->arena(); _nonstatic_fields = new (arena) GrowableArray(arena, 0, 0, NULL); - _nof_declared_nonstatic_fields = 0; return 0; } assert(!is_java_lang_Object(), "bootstrap OK"); @@ -475,7 +472,6 @@ // See if I am no larger than my super; if so, I can use his fields. if (fsize == super_fsize) { _nonstatic_fields = super_fields; - _nof_declared_nonstatic_fields = super->nof_declared_nonstatic_fields(); return super_fields->length(); } } @@ -489,26 +485,17 @@ // This can happen if this class (java.lang.Class) has invisible fields. if (super_fields != NULL) { _nonstatic_fields = super_fields; - _nof_declared_nonstatic_fields = super->nof_declared_nonstatic_fields(); return super_fields->length(); } else { - _nof_declared_nonstatic_fields = 0; return 0; } } - int flen = fields->length(); - - // Now sort them by offset, ascending. - // (In principle, they could mix with superclass fields.) - fields->sort(sort_field_by_offset); _nonstatic_fields = fields; - return flen; + return fields->length(); } -GrowableArray* -ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray* - super_fields) { +GrowableArray* ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray* super_fields, bool flatten) { ASSERT_IN_VM; Arena* arena = CURRENT_ENV->arena(); int flen = 0; @@ -521,13 +508,11 @@ // allocate the array: if (flen == 0) { - _nof_declared_nonstatic_fields = flen; return NULL; // return nothing if none are locally declared } if (super_fields != NULL) { flen += super_fields->length(); } - _nof_declared_nonstatic_fields = flen; fields = new (arena) GrowableArray(arena, flen, 0, NULL); if (super_fields != NULL) { @@ -537,12 +522,13 @@ for (JavaFieldStream fs(k); !fs.done(); fs.next()) { if (fs.access_flags().is_static()) continue; fieldDescriptor& fd = fs.field_descriptor(); - if (fd.field_type() == T_VALUETYPE) { + if (fd.is_flatten() && flatten) { + assert(fd.field_type() == T_VALUETYPE, "flattening is only supported for value type fields"); // Value type fields are embedded int field_offset = fd.offset(); // Get ValueKlass and adjust number of fields ciValueKlass* vk = get_field_type_by_offset(field_offset)->as_value_klass(); - flen += vk->flattened_field_count() - 1; + flen += vk->nof_nonstatic_fields() - 1; // Iterate over fields of the flattened value type and copy them to 'this' for (int i = 0; i < vk->nof_nonstatic_fields(); ++i) { ciField* flattened_field = vk->nonstatic_field_at(i); @@ -560,6 +546,9 @@ } } assert(fields->length() == flen, "sanity"); + // Now sort them by offset, ascending. + // (In principle, they could mix with superclass fields.) + fields->sort(sort_field_by_offset); return fields; } --- old/src/share/vm/ci/ciInstanceKlass.hpp 2017-09-21 09:17:54.788998367 +0200 +++ new/src/share/vm/ci/ciInstanceKlass.hpp 2017-09-21 09:17:54.680998369 +0200 @@ -65,8 +65,6 @@ ciConstantPoolCache* _field_cache; // cached map index->field GrowableArray* _nonstatic_fields; - int _nof_declared_nonstatic_fields; // Number of nonstatic fields declared in the bytecode - // i.e., without value types flattened into the instance. int _has_injected_fields; // any non static injected fields? lazily initialized. @@ -107,8 +105,8 @@ void compute_shared_init_state(); bool compute_shared_has_subklass(); - int compute_nonstatic_fields(); - GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields); + virtual int compute_nonstatic_fields(); + GrowableArray* compute_nonstatic_fields_impl(GrowableArray* super_fields, bool flatten = true); // Update the init_state for shared klasses void update_if_shared(InstanceKlass::ClassState expected) { @@ -195,18 +193,11 @@ // total number of nonstatic fields (including inherited): int nof_nonstatic_fields() { - if (_nonstatic_fields == NULL) + if (_nonstatic_fields == NULL) { return compute_nonstatic_fields(); - else + } else { return _nonstatic_fields->length(); - } - - int nof_declared_nonstatic_fields() { - if (_nonstatic_fields == NULL) { - compute_nonstatic_fields(); } - assert(_nof_declared_nonstatic_fields >= 0, "after lazy initialization _nof_declared_nonstatic_fields must be at least 0"); - return _nof_declared_nonstatic_fields; } bool has_injected_fields() { --- old/src/share/vm/ci/ciType.cpp 2017-09-21 09:17:55.192998362 +0200 +++ new/src/share/vm/ci/ciType.cpp 2017-09-21 09:17:55.040998364 +0200 @@ -61,6 +61,13 @@ } // ------------------------------------------------------------------ +// ciType::is__Value +// +bool ciType::is__Value() const { + return (this == ciEnv::____Value_klass); +} + +// ------------------------------------------------------------------ // ciType::name // // Return the name of this type --- old/src/share/vm/ci/ciType.hpp 2017-09-21 09:17:55.632998356 +0200 +++ new/src/share/vm/ci/ciType.hpp 2017-09-21 09:17:55.452998358 +0200 @@ -76,6 +76,7 @@ // What kind of ciObject is this? bool is_type() const { return true; } bool is_classless() const { return is_primitive_type(); } + bool is__Value() const; const char* name(); virtual void print_name_on(outputStream* st); --- old/src/share/vm/ci/ciValueKlass.cpp 2017-09-21 09:17:56.152998349 +0200 +++ new/src/share/vm/ci/ciValueKlass.cpp 2017-09-21 09:17:56.032998350 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,51 +25,23 @@ #include "precompiled.hpp" #include "ci/ciField.hpp" #include "ci/ciValueKlass.hpp" -#include "oops/fieldStreams.hpp" #include "oops/valueKlass.hpp" -int ciValueKlass::compute_field_index_map() { - assert(is_loaded(), "value class must be loaded to compute mapping of field indeces"); - - if (_field_index_map != NULL) { - return _field_index_map->length(); - } - +int ciValueKlass::compute_nonstatic_fields() { + int result = ciInstanceKlass::compute_nonstatic_fields(); + assert(super() == NULL || !super()->has_nonstatic_fields(), "a value type must not inherit fields from its superclass"); + + // Compute declared non-static fields (without flattening of value type fields) + GrowableArray* fields = NULL; + GUARDED_VM_ENTRY(fields = compute_nonstatic_fields_impl(NULL, false /* no flattening */);) Arena* arena = CURRENT_ENV->arena(); - _field_index_map = new (arena) GrowableArray(arena, nof_declared_nonstatic_fields(), 0, 0); - if (!has_nonstatic_fields()) { - return 0; - } - - // FIXME: Once it is possible to construct class hierarchies with value types. - assert(!super()->has_nonstatic_fields(), "a value type must not inherit fields from its superclass"); - - ValueKlass* vklass = ValueKlass::cast(get_Klass()); - for (JavaFieldStream fs(vklass); !fs.done(); fs.next()) { - if (fs.access_flags().is_static()) { - continue; - } - _field_index_map->append(fs.field_descriptor().index()); - } - return _field_index_map->length(); -} - -// Number of value type fields -int ciValueKlass::field_count() { - if (_field_index_map == NULL) { - return compute_field_index_map(); - } else { - return _field_index_map->length(); - } + _declared_nonstatic_fields = (fields != NULL) ? fields : new (arena) GrowableArray(arena, 0, 0, 0); + return result; } -// Size of value type fields in words -int ciValueKlass::field_size() { - int size = 0; - for (int i = 0; i < field_count(); ++i) { - size += field_type_by_index(i)->size(); - } - return size; +// Offset of the first field in the value type +int ciValueKlass::first_field_offset() const { + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->first_field_offset();) } // Returns the index of the field with the given offset. If the field at 'offset' @@ -80,8 +52,8 @@ int best_offset = 0; int best_index = -1; // Search the field with the given offset - for (int i = 0; i < field_count(); ++i) { - int field_offset = field_offset_by_index(i); + for (int i = 0; i < nof_declared_nonstatic_fields(); ++i) { + int field_offset = _declared_nonstatic_fields->at(i)->offset(); if (field_offset == offset) { // Exact match return i; @@ -94,48 +66,18 @@ } } assert(best_index >= 0, "field not found"); - assert(best_offset == offset || field_type_by_index(best_index)->is_valuetype(), "offset should match for non-VTs"); + assert(best_offset == offset || _declared_nonstatic_fields->at(best_index)->type()->is_valuetype(), "offset should match for non-VTs"); return best_index; } -// Returns the field offset of the field with the given index -int ciValueKlass::field_offset_by_index(int index) { - if (_field_index_map == NULL) { - compute_field_index_map(); - } - GUARDED_VM_ENTRY( - ValueKlass* vklass = ValueKlass::cast(get_Klass()); - return vklass->field_offset(_field_index_map->at(index)); - ) -} - -// Returns the field type of the field with the given index -ciType* ciValueKlass::field_type_by_index(int index) { - int offset = field_offset_by_index(index); - VM_ENTRY_MARK; - return get_field_type_by_offset(offset); -} - -// Offset of the first field in the value type -int ciValueKlass::first_field_offset() const { - GUARDED_VM_ENTRY( - ValueKlass* vklass = ValueKlass::cast(get_Klass()); - return vklass->first_field_offset(); - ) -} - +// Are arrays containing this value type flattened? bool ciValueKlass::flatten_array() const { - GUARDED_VM_ENTRY( - ValueKlass* vklass = ValueKlass::cast(get_Klass()); - return vklass->flatten_array(); - ) + GUARDED_VM_ENTRY(return ValueKlass::cast(get_Klass())->flatten_array();) } -bool ciValueKlass::contains_oops() const { - GUARDED_VM_ENTRY( - ValueKlass* vklass = ValueKlass::cast(get_Klass()); - return vklass->contains_oops(); - ) +// Can this value type be returned as multiple values? +bool ciValueKlass::can_be_returned_as_fields() const { + GUARDED_VM_ENTRY(return !is__Value() && ValueKlass::cast(get_Klass())->return_regs() != NULL;) } // When passing a value type's fields as arguments, count the number @@ -145,7 +87,8 @@ for (int j = 0; j < nof_nonstatic_fields(); j++) { ciField* f = nonstatic_field_at(j); BasicType bt = f->type()->basic_type(); - assert(bt != T_VALUETYPE, "embedded"); + // TODO re-enable when using T_VALUETYPEPTR + //assert(bt != T_VALUETYPE, "embedded"); if (bt == T_LONG || bt == T_DOUBLE) { slots++; } --- old/src/share/vm/ci/ciValueKlass.hpp 2017-09-21 09:17:56.556998343 +0200 +++ new/src/share/vm/ci/ciValueKlass.hpp 2017-09-21 09:17:56.448998345 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,48 +39,40 @@ CI_PACKAGE_ACCESS private: - // Index fields of a value type, indeces range from 0 to the number of fields of the - // value type - 1. - // For each index constructed, _field_index_map records the field's index - // in InstanceKlass::_fields (i.e., _field_index_map records the value returned by - // fieldDescriptor::index() for each field). - GrowableArray* _field_index_map; + // Fields declared in the bytecode (without flattened value type fields) + GrowableArray* _declared_nonstatic_fields; protected: - ciValueKlass(Klass* h_k) : ciInstanceKlass(h_k), _field_index_map(NULL) { + ciValueKlass(Klass* h_k) : ciInstanceKlass(h_k), _declared_nonstatic_fields(NULL) { assert(is_final(), "ValueKlass must be final"); }; + int compute_nonstatic_fields(); const char* type_string() { return "ciValueKlass"; } - int compute_field_index_map(); - ValueKlass* get_valueKlass() const { - return ValueKlass::cast(get_Klass()); +public: + bool is_valuetype() const { return true; } + + int nof_declared_nonstatic_fields() { + if (_declared_nonstatic_fields == NULL) { + compute_nonstatic_fields(); + } + return _declared_nonstatic_fields->length(); } -public: - bool is_valuetype() const { return true; } - bool flatten_array() const; - bool contains_oops() const; + // ith non-static declared field (presented by ascending address) + ciField* declared_nonstatic_field_at(int i) { + assert(_declared_nonstatic_fields != NULL, "should be initialized"); + return _declared_nonstatic_fields->at(i); + } // Value type fields - int field_count(); - int field_size(); - int flattened_field_count() { - return nof_nonstatic_fields(); - } - int field_index_by_offset(int offset); - int field_offset_by_index(int index); - ciType* field_type_by_index(int index); - int first_field_offset() const; + int first_field_offset() const; + int field_index_by_offset(int offset); + bool flatten_array() const; + bool can_be_returned_as_fields() const; int value_arg_slots(); - - // Can a value type instance of this type be returned as multiple - // returned values? - bool can_be_returned_as_fields() const { - return this != ciEnv::current()->___Value_klass() && get_valueKlass()->return_regs() != NULL; - } }; #endif // SHARE_VM_CI_CIVALUEKLASS_HPP --- old/src/share/vm/classfile/classFileParser.cpp 2017-09-21 09:17:56.936998338 +0200 +++ new/src/share/vm/classfile/classFileParser.cpp 2017-09-21 09:17:56.836998339 +0200 @@ -3903,7 +3903,7 @@ // Calculate the starting byte offsets int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields(); - // Value types in static fields are nor embedded, they are handled with oops + // Value types in static fields are not embedded, they are handled with oops int next_static_double_offset = next_static_oop_offset + ((fac->count[STATIC_OOP] + fac->count[STATIC_VALUETYPE]) * heapOopSize); if ( fac->count[STATIC_DOUBLE] && @@ -3987,9 +3987,8 @@ assert(klass != NULL, "Sanity check"); assert(klass->access_flags().is_value_type(), "Value type expected"); ValueKlass* vk = ValueKlass::cast(klass); - // Conditions to apply flattening or not should be defined - //in a single place - if (vk->size_helper() <= ValueArrayElemMaxFlatSize) { + // Conditions to apply flattening or not should be defined in a single place + if ((ValueFieldMaxFlatSize < 0) || vk->size_helper() <= ValueFieldMaxFlatSize) { nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index(); nonstatic_value_type_klasses[nonstatic_value_type_count] = klass; nonstatic_value_type_count++; --- old/src/share/vm/oops/instanceKlass.hpp 2017-09-21 09:17:57.276998333 +0200 +++ new/src/share/vm/oops/instanceKlass.hpp 2017-09-21 09:17:57.192998334 +0200 @@ -467,6 +467,7 @@ public: int field_offset (int index) const { return field(index)->offset(); } + bool field_flattened (int index) const { return field(index)->is_flatten(); } int field_access_flags(int index) const { return field(index)->access_flags(); } Symbol* field_name (int index) const { return field(index)->name(constants()); } Symbol* field_signature (int index) const { return field(index)->signature(constants()); } --- old/src/share/vm/oops/valueKlass.cpp 2017-09-21 09:17:57.588998329 +0200 +++ new/src/share/vm/oops/valueKlass.cpp 2017-09-21 09:17:57.500998330 +0200 @@ -309,27 +309,27 @@ GrowableArray sig_extended; sig_extended.push(SigEntry(T_VALUETYPE, base_off)); for (JavaFieldStream fs(this); !fs.done(); fs.next()) { - if (fs.access_flags().is_static()) continue; + if (fs.access_flags().is_static()) continue; fieldDescriptor& fd = fs.field_descriptor(); BasicType bt = fd.field_type(); int offset = base_off + fd.offset() - (base_off > 0 ? first_field_offset() : 0); if (bt == T_VALUETYPE) { if (fd.is_flatten()) { - Symbol* signature = fd.signature(); - JavaThread* THREAD = JavaThread::current(); - oop loader = class_loader(); - oop domain = protection_domain(); - ResetNoHandleMark rnhm; - HandleMark hm; - NoSafepointVerifier nsv; - Klass* klass = SystemDictionary::resolve_or_null(signature, - Handle(THREAD, loader), Handle(THREAD, domain), - THREAD); - assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail"); - const GrowableArray& embedded = ValueKlass::cast(klass)->collect_fields(offset); - sig_extended.appendAll(&embedded); + Symbol* signature = fd.signature(); + JavaThread* THREAD = JavaThread::current(); + oop loader = class_loader(); + oop domain = protection_domain(); + ResetNoHandleMark rnhm; + HandleMark hm; + NoSafepointVerifier nsv; + Klass* klass = SystemDictionary::resolve_or_null(signature, + Handle(THREAD, loader), Handle(THREAD, domain), + THREAD); + assert(klass != NULL && !HAS_PENDING_EXCEPTION, "lookup shouldn't fail"); + const GrowableArray& embedded = ValueKlass::cast(klass)->collect_fields(offset); + sig_extended.appendAll(&embedded); } else { - sig_extended.push(SigEntry(T_OBJECT, offset)); + sig_extended.push(SigEntry(T_VALUETYPEPTR, offset)); } } else { sig_extended.push(SigEntry(bt, offset)); @@ -433,7 +433,7 @@ for (int i = 0; i < sig_vk->length(); i++) { BasicType bt = sig_vk->at(i)._bt; - if (bt == T_OBJECT || bt == T_ARRAY) { + if (bt == T_OBJECT || bt == T_VALUETYPEPTR || bt == T_ARRAY) { int off = sig_vk->at(i)._offset; VMRegPair pair = regs->at(j); address loc = reg_map.location(pair.first()); @@ -551,6 +551,7 @@ break; } case T_OBJECT: + case T_VALUETYPEPTR: case T_ARRAY: { Handle handle = handles.at(k++); oop v = handle(); --- old/src/share/vm/opto/callGenerator.cpp 2017-09-21 09:17:57.904998324 +0200 +++ new/src/share/vm/opto/callGenerator.cpp 2017-09-21 09:17:57.804998326 +0200 @@ -126,7 +126,7 @@ _separate_io_proj(separate_io_proj) { if (method->is_method_handle_intrinsic() && - method->signature()->return_type() == ciEnv::current()->___Value_klass()) { + method->signature()->return_type()->is__Value()) { // If that call has not been optimized by the time optimizations // are over, we'll need to add a call to create a value type // instance from the klass returned by the call. Separating @@ -184,9 +184,9 @@ // Check if return value is a value type pointer const TypeValueTypePtr* vtptr = gvn.type(ret)->isa_valuetypeptr(); if (vtptr != NULL) { - if (vtptr->klass() != kit.C->env()->___Value_klass()) { + if (!vtptr->is__Value()) { // Create ValueTypeNode from the oop and replace the return value - Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); + Node* vt = ValueTypeNode::make(&kit, ret); kit.push_node(T_VALUETYPE, vt); } else { kit.push_node(T_VALUETYPE, ret); @@ -279,7 +279,9 @@ // Check if return value is a value type pointer if (gvn.type(ret)->isa_valuetypeptr()) { // Create ValueTypeNode from the oop and replace the return value - Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), ret); + Node* ctl = kit.control(); + Node* vt = ValueTypeNode::make(&kit, ret); + kit.set_control(ctl); kit.push_node(T_VALUETYPE, vt); } else { kit.push_node(method()->return_type()->basic_type(), ret); @@ -437,13 +439,17 @@ if (!ValueTypePassFieldsAsArgs) { Node* arg = call->in(TypeFunc::Parms + i1); if (t->isa_valuetypeptr()) { - arg = ValueTypeNode::make(gvn, map->memory(), arg); + Node* ctl = map->control(); + arg = ValueTypeNode::make(gvn, ctl, map->memory(), arg); + map->set_control(ctl); } map->set_argument(jvms, i1, arg); } else { - if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { + if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); - Node* vt = ValueTypeNode::make(gvn, call, vk, j, true); + Node* ctl = map->control(); + Node* vt = ValueTypeNode::make(gvn, ctl, map->memory(), call, vk, j, true); + map->set_control(ctl); map->set_argument(jvms, i1, gvn.transform(vt)); j += vk->value_arg_slots(); } else { @@ -496,39 +502,39 @@ if (return_type->is_valuetype()) { const Type* vt_t = call->_tf->range_sig()->field_at(TypeFunc::Parms); + bool returned_as_fields = call->tf()->returns_value_type_as_fields(); if (result->is_ValueType()) { ValueTypeNode* vt = result->as_ValueType(); - if (!call->tf()->returns_value_type_as_fields()) { - result = vt->allocate(&kit); - result = C->initial_gvn()->transform(new ValueTypePtrNode(vt, result, C)); + if (!returned_as_fields) { + result = vt->allocate(&kit)->get_oop(); + result = gvn.transform(new ValueTypePtrNode(vt, result, C)); } else { // Return of multiple values (the fields of a value type) - vt->replace_call_results(call, C); + vt->replace_call_results(&kit, call, C); if (gvn.type(vt->get_oop()) == TypePtr::NULL_PTR) { result = vt->tagged_klass(gvn); } else { result = vt->get_oop(); } } - } else { - if (vt_t->is_valuetypeptr()->value_type()->value_klass() != C->env()->___Value_klass()) { - if (gvn.type(result)->isa_valuetypeptr() && call->tf()->returns_value_type_as_fields()) { - Node* cast = new CheckCastPPNode(NULL, result, vt_t); - gvn.record_for_igvn(cast); - ValueTypePtrNode* vtptr = ValueTypePtrNode::make(gvn, kit.merged_memory(), gvn.transform(cast)); - vtptr->replace_call_results(call, C); - result = cast; - } else { - assert(result->is_top(), "what else?"); - for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { - ProjNode *pn = call->fast_out(i)->as_Proj(); - uint con = pn->_con; - if (con >= TypeFunc::Parms) { - C->initial_gvn()->hash_delete(pn); - pn->set_req(0, C->top()); - --i; --imax; - } - } + } else if (gvn.type(result)->isa_valuetypeptr() && returned_as_fields) { + assert(!vt_t->is_valuetypeptr()->is__Value(), "__Value not supported"); + Node* cast = new CheckCastPPNode(NULL, result, vt_t); + gvn.record_for_igvn(cast); + Node* ctl = kit.control(); + ValueTypePtrNode* vtptr = ValueTypePtrNode::make(gvn, ctl, kit.merged_memory(), gvn.transform(cast)); + kit.set_control(ctl); + vtptr->replace_call_results(&kit, call, C); + result = cast; + } else if (!return_type->is__Value()) { + assert(result->is_top(), "what else?"); + for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { + ProjNode *pn = call->fast_out(i)->as_Proj(); + uint con = pn->_con; + if (con >= TypeFunc::Parms) { + gvn.hash_delete(pn); + pn->set_req(0, C->top()); + --i; --imax; } } } @@ -910,13 +916,13 @@ const Type* arg_type = arg->bottom_type(); const Type* sig_type = TypeOopPtr::make_from_klass(t->as_klass()); if (t->is_valuetype()) { - assert(!(arg_type->isa_valuetype() && t == kit.C->env()->___Value_klass()), "need a pointer to the value type"); - if (arg_type->isa_valuetypeptr() && t != kit.C->env()->___Value_klass()) { + assert(!(arg_type->isa_valuetype() && t->is__Value()), "need a pointer to the value type"); + if (arg_type->isa_valuetypeptr() && !t->is__Value()) { Node* cast = gvn.transform(new CheckCastPPNode(kit.control(), arg, sig_type)); - Node* vt = ValueTypeNode::make(gvn, kit.merged_memory(), cast); + Node* vt = ValueTypeNode::make(&kit, cast); kit.set_argument(arg_nb, vt); } else { - assert(t == kit.C->env()->___Value_klass() || arg->is_ValueType(), "inconsistent argument"); + assert(t->is__Value() || arg->is_ValueType(), "inconsistent argument"); } } else { if (arg_type->isa_oopptr() && !arg_type->higher_equal(sig_type)) { --- old/src/share/vm/opto/callnode.cpp 2017-09-21 09:17:58.252998320 +0200 +++ new/src/share/vm/opto/callnode.cpp 2017-09-21 09:17:58.144998321 +0200 @@ -1395,7 +1395,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, Node *size, Node *klass_node, - Node* initial_test, ValueTypeNode* value_node) + Node* initial_test, ValueTypeBaseNode* value_node) : CallNode(atype, NULL, TypeRawPtr::BOTTOM) { init_class_id(Class_Allocate); --- old/src/share/vm/opto/callnode.hpp 2017-09-21 09:17:58.560998315 +0200 +++ new/src/share/vm/opto/callnode.hpp 2017-09-21 09:17:58.468998317 +0200 @@ -710,7 +710,7 @@ method->is_method_handle_intrinsic() && r->cnt() > TypeFunc::Parms && r->field_at(TypeFunc::Parms)->isa_valuetypeptr() && - r->field_at(TypeFunc::Parms)->is_valuetypeptr()->value_type()->value_klass() == C->env()->___Value_klass()) { + r->field_at(TypeFunc::Parms)->is_valuetypeptr()->is__Value()) { init_flags(Flag_is_macro); C->add_macro_node(this); } @@ -879,7 +879,7 @@ virtual uint size_of() const; // Size is bigger AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, - Node *size, Node *klass_node, Node *initial_test, ValueTypeNode* value_node = NULL); + Node *size, Node *klass_node, Node *initial_test, ValueTypeBaseNode* value_node = NULL); // Expansion modifies the JVMState, so we need to clone it virtual void clone_jvms(Compile* C) { if (jvms() != NULL) { --- old/src/share/vm/opto/castnode.cpp 2017-09-21 09:17:58.868998311 +0200 +++ new/src/share/vm/opto/castnode.cpp 2017-09-21 09:17:58.776998312 +0200 @@ -27,6 +27,7 @@ #include "opto/callnode.hpp" #include "opto/castnode.hpp" #include "opto/connode.hpp" +#include "opto/graphKit.hpp" #include "opto/matcher.hpp" #include "opto/phaseX.hpp" #include "opto/rootnode.hpp" @@ -416,157 +417,153 @@ in(1)->in(0) != NULL && in(1)->in(0)->is_CallStaticJava() && in(1)->in(0)->as_CallStaticJava()->method() != NULL && in(1)->as_Proj()->_con == TypeFunc::Parms) { - ciValueKlass* vk = type()->is_valuetypeptr()->value_type()->value_klass(); - assert(vk != phase->C->env()->___Value_klass(), "why cast to __Value?"); - PhaseIterGVN *igvn = phase->is_IterGVN(); + const TypeValueTypePtr* cast_type = type()->is_valuetypeptr(); + ciValueKlass* vk = cast_type->value_type()->value_klass(); + assert(!vk->is__Value(), "why cast to __Value?"); + PhaseIterGVN* igvn = phase->is_IterGVN(); if (ValueTypeReturnedAsFields && vk->can_be_returned_as_fields()) { igvn->set_delay_transform(true); CallNode* call = in(1)->in(0)->as_Call(); - phase->C->remove_macro_node(call); + igvn->C->remove_macro_node(call); // We now know the return type of the call - const TypeTuple *range_sig = TypeTuple::make_range(vk, false); - const TypeTuple *range_cc = TypeTuple::make_range(vk, true); + const TypeTuple* range_sig = TypeTuple::make_range(vk, false); + const TypeTuple* range_cc = TypeTuple::make_range(vk, true); assert(range_sig != call->_tf->range_sig() && range_cc != call->_tf->range_cc(), "type should change"); call->_tf = TypeFunc::make(call->_tf->domain_sig(), call->_tf->domain_cc(), range_sig, range_cc); - phase->set_type(call, call->Value(phase)); - phase->set_type(in(1), in(1)->Value(phase)); + igvn->set_type(call, call->Value(igvn)); + igvn->set_type(in(1), in(1)->Value(igvn)); + + Node* ctl_hook = new Node(1); + Node* mem_hook = new Node(1); + Node* io_hook = new Node(1); + Node* res_hook = new Node(1); + Node* ex_ctl_hook = new Node(1); + Node* ex_mem_hook = new Node(1); + Node* ex_io_hook = new Node(1); + // Extract projections from the call and hook users to temporary nodes. + // We will re-attach them to newly created PhiNodes below. CallProjections projs; call->extract_projections(&projs, true, true); + igvn->replace_in_uses(projs.fallthrough_catchproj, ctl_hook); + igvn->replace_in_uses(projs.fallthrough_memproj, mem_hook); + igvn->replace_in_uses(projs.fallthrough_ioproj, io_hook); + igvn->replace_in_uses(projs.resproj, res_hook); + igvn->replace_in_uses(projs.catchall_catchproj, ex_ctl_hook); + igvn->replace_in_uses(projs.catchall_memproj, ex_mem_hook); + igvn->replace_in_uses(projs.catchall_ioproj, ex_io_hook); + + // Restore IO input of the CatchNode + CatchNode* catchp = projs.fallthrough_catchproj->in(0)->as_Catch(); + catchp->set_req(TypeFunc::I_O, projs.catchall_ioproj); + igvn->rehash_node_delayed(catchp); + + // Rebuild the output JVMState from the call and use it to initialize a GraphKit + JVMState* new_jvms = call->jvms()->clone_shallow(igvn->C); + SafePointNode* new_map = new SafePointNode(call->req(), new_jvms); + for (uint i = TypeFunc::FramePtr; i < call->req(); i++) { + new_map->init_req(i, call->in(i)); + } + new_map->set_control(projs.fallthrough_catchproj); + new_map->set_memory(MergeMemNode::make(projs.fallthrough_memproj)); + new_map->set_i_o(projs.fallthrough_ioproj); + new_jvms->set_map(new_map); - Node* init_ctl = new Node(1); - Node* init_mem = new Node(1); - Node* init_io = new Node(1); - Node* init_ex_ctl = new Node(1); - Node* init_ex_mem = new Node(1); - Node* init_ex_io = new Node(1); - Node* res = new Node(1); - - Node* ctl = init_ctl; - Node* mem = init_mem; - Node* io = init_io; - Node* ex_ctl = init_ex_ctl; - Node* ex_mem = init_ex_mem; - Node* ex_io = init_ex_io; + GraphKit kit(new_jvms, igvn); // Either we get a buffered value pointer and we can case use it - // or we get a tagged klass pointer and we need to allocate a - // value. - Node* cast = phase->transform(new CastP2XNode(ctl, res)); - Node* masked = phase->transform(new AndXNode(cast, phase->MakeConX(0x1))); - Node* cmp = phase->transform(new CmpXNode(masked, phase->MakeConX(0x1))); - Node* bol = phase->transform(new BoolNode(cmp, BoolTest::eq)); - IfNode* iff = phase->transform(new IfNode(ctl, bol, PROB_MAX, COUNT_UNKNOWN))->as_If(); - Node* iftrue = phase->transform(new IfTrueNode(iff)); - Node* iffalse = phase->transform(new IfFalseNode(iff)); - - ctl = iftrue; - - Node* ex_r = new RegionNode(3); - Node* ex_mem_phi = new PhiNode(ex_r, Type::MEMORY, TypePtr::BOTTOM); - Node* ex_io_phi = new PhiNode(ex_r, Type::ABIO); - - ex_r->init_req(2, ex_ctl); - ex_mem_phi->init_req(2, ex_mem); - ex_io_phi->init_req(2, ex_io); - - // We need an oop pointer in case allocation elimination - // fails. Allocate a new instance here. - Node* javaoop = ValueTypeBaseNode::allocate(type(), ctl, mem, io, - call->in(TypeFunc::FramePtr), - ex_ctl, ex_mem, ex_io, - call->jvms(), igvn); - - - - ex_r->init_req(1, ex_ctl); - ex_mem_phi->init_req(1, ex_mem); - ex_io_phi->init_req(1, ex_io); - - ex_r = igvn->transform(ex_r); - ex_mem_phi = igvn->transform(ex_mem_phi); - ex_io_phi = igvn->transform(ex_io_phi); - - // Create the ValueTypePtrNode. This will add extra projections - // to the call. - ValueTypePtrNode* vtptr = ValueTypePtrNode::make(igvn, this); - // Newly allocated value type must be initialized - vtptr->store(igvn, ctl, mem->as_MergeMem(), javaoop); - vtptr->set_oop(javaoop); - - Node* r = new RegionNode(3); - Node* mem_phi = new PhiNode(r, Type::MEMORY, TypePtr::BOTTOM); - Node* io_phi = new PhiNode(r, Type::ABIO); - Node* res_phi = new PhiNode(r, type()); - - r->init_req(1, ctl); - mem_phi->init_req(1, mem); - io_phi->init_req(1, io); - res_phi->init_req(1, igvn->transform(vtptr)); - - ctl = iffalse; - mem = init_mem; - io = init_io; - - Node* castnotnull = new CastPPNode(res, TypePtr::NOTNULL); - castnotnull->set_req(0, ctl); - castnotnull = phase->transform(castnotnull); - Node* ccast = clone(); - ccast->set_req(0, ctl); - ccast->set_req(1, castnotnull); - ccast = phase->transform(ccast); - - vtptr = ValueTypePtrNode::make(*phase, mem, ccast); - - r->init_req(2, ctl); - mem_phi->init_req(2, mem); - io_phi->init_req(2, io); - res_phi->init_req(2, igvn->transform(vtptr)); - - r = igvn->transform(r); - mem_phi = igvn->transform(mem_phi); - io_phi = igvn->transform(io_phi); - res_phi = igvn->transform(res_phi); - - igvn->replace_in_uses(projs.fallthrough_catchproj, r); - igvn->replace_in_uses(projs.fallthrough_memproj, mem_phi); - igvn->replace_in_uses(projs.fallthrough_ioproj, io_phi); - igvn->replace_in_uses(projs.resproj, res_phi); - igvn->replace_in_uses(projs.catchall_catchproj, ex_r); - igvn->replace_in_uses(projs.catchall_memproj, ex_mem_phi); - igvn->replace_in_uses(projs.catchall_ioproj, ex_io_phi); + // or we get a tagged klass pointer and we need to allocate a value. + Node* cast = igvn->transform(new CastP2XNode(kit.control(), projs.resproj)); + Node* masked = igvn->transform(new AndXNode(cast, igvn->MakeConX(0x1))); + Node* cmp = igvn->transform(new CmpXNode(masked, igvn->MakeConX(0x1))); + Node* bol = kit.Bool(cmp, BoolTest::eq); + IfNode* iff = kit.create_and_map_if(kit.control(), bol, PROB_MAX, COUNT_UNKNOWN); + Node* iftrue = kit.IfTrue(iff); + Node* iffalse = kit.IfFalse(iff); + + Node* region = new RegionNode(3); + Node* mem_phi = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); + Node* io_phi = new PhiNode(region, Type::ABIO); + Node* res_phi = new PhiNode(region, cast_type); + Node* ex_region = new RegionNode(3); + Node* ex_mem_phi = new PhiNode(ex_region, Type::MEMORY, TypePtr::BOTTOM); + Node* ex_io_phi = new PhiNode(ex_region, Type::ABIO); + + // True branch: result is a tagged klass pointer + // Allocate a value type (will add extra projections to the call) + kit.set_control(iftrue); + Node* res = igvn->transform(ValueTypePtrNode::make(&kit, vk, call)); + res = res->isa_ValueTypePtr()->allocate(&kit); + + // Get exception state + GraphKit ekit(kit.transfer_exceptions_into_jvms(), igvn); + SafePointNode* ex_map = ekit.combine_and_pop_all_exception_states(); + Node* ex_oop = ekit.use_exception_state(ex_map); + + region->init_req(1, kit.control()); + mem_phi->init_req(1, kit.reset_memory()); + io_phi->init_req(1, kit.i_o()); + res_phi->init_req(1, res); + ex_region->init_req(1, ekit.control()); + ex_mem_phi->init_req(1, ekit.reset_memory()); + ex_io_phi->init_req(1, ekit.i_o()); + + // False branch: result is not tagged + // Load buffered value type from returned oop + kit.set_control(iffalse); + kit.set_all_memory(projs.fallthrough_memproj); + kit.set_i_o(projs.fallthrough_ioproj); + // Cast oop to NotNull + ConstraintCastNode* res_cast = clone()->as_ConstraintCast(); + res_cast->set_req(0, kit.control()); + res_cast->set_req(1, projs.resproj); + res_cast->set_type(cast_type->cast_to_ptr_type(TypePtr::NotNull)); + Node* ctl = kit.control(); // Control may get updated below + res = ValueTypePtrNode::make(*igvn, ctl, kit.merged_memory(), igvn->transform(res_cast)); + + region->init_req(2, ctl); + mem_phi->init_req(2, kit.reset_memory()); + io_phi->init_req(2, kit.i_o()); + res_phi->init_req(2, igvn->transform(res)); + ex_region->init_req(2, projs.catchall_catchproj); + ex_mem_phi->init_req(2, projs.catchall_memproj); + ex_io_phi->init_req(2, projs.catchall_ioproj); igvn->set_delay_transform(false); - igvn->replace_node(init_ctl, projs.fallthrough_catchproj); - igvn->replace_node(init_mem, projs.fallthrough_memproj); - igvn->replace_node(init_io, projs.fallthrough_ioproj); - igvn->replace_node(res, projs.resproj); - igvn->replace_node(init_ex_ctl, projs.catchall_catchproj); - igvn->replace_node(init_ex_mem, projs.catchall_memproj); - igvn->replace_node(init_ex_io, projs.catchall_ioproj); - + // Re-attach users to newly created PhiNodes + igvn->replace_node(ctl_hook, igvn->transform(region)); + igvn->replace_node(mem_hook, igvn->transform(mem_phi)); + igvn->replace_node(io_hook, igvn->transform(io_phi)); + igvn->replace_node(res_hook, igvn->transform(res_phi)); + igvn->replace_node(ex_ctl_hook, igvn->transform(ex_region)); + igvn->replace_node(ex_mem_hook, igvn->transform(ex_mem_phi)); + igvn->replace_node(ex_io_hook, igvn->transform(ex_io_phi)); return this; } else { CallNode* call = in(1)->in(0)->as_Call(); // We now know the return type of the call - const TypeTuple *range = TypeTuple::make_range(vk, false); + const TypeTuple* range = TypeTuple::make_range(vk, false); if (range != call->_tf->range_sig()) { - // Build the ValueTypePtrNode by loading the fields. Use call - // return as oop edge in the ValueTypePtrNode. + // Build the ValueTypePtrNode by loading the fields call->_tf = TypeFunc::make(call->_tf->domain_sig(), call->_tf->domain_cc(), range, range); phase->set_type(call, call->Value(phase)); phase->set_type(in(1), in(1)->Value(phase)); uint last = phase->C->unique(); CallNode* call = in(1)->in(0)->as_Call(); + // Extract projections from the call and hook control users to temporary node CallProjections projs; call->extract_projections(&projs, true, true); + Node* ctl = projs.fallthrough_catchproj; Node* mem = projs.fallthrough_memproj; - Node* vtptr = ValueTypePtrNode::make(*phase, mem, in(1)); - + Node* ctl_hook = new Node(1); + igvn->replace_in_uses(ctl, ctl_hook); + Node* vtptr = ValueTypePtrNode::make(*phase, ctl, mem, in(1)); + // Attach users to updated control + igvn->replace_node(ctl_hook, ctl); return vtptr; } } --- old/src/share/vm/opto/cfgnode.cpp 2017-09-21 09:17:59.164998307 +0200 +++ new/src/share/vm/opto/cfgnode.cpp 2017-09-21 09:17:59.076998308 +0200 @@ -1663,6 +1663,7 @@ for (uint i = 1; i < req(); i++) { if (in(i) != NULL && in(i)->is_ValueTypePtr()) { const TypeValueTypePtr* t = phase->type(in(i))->is_valuetypeptr(); + t = t->cast_to_ptr_type(TypePtr::BotPTR)->is_valuetypeptr(); if (vtptr == NULL) { vtptr = t; } else { @@ -1670,7 +1671,7 @@ } } else { assert(in(i) == NULL || vtptr == NULL || phase->type(in(i))->higher_equal(vtptr) || phase->type(in(i)) == Type::TOP || - phase->type(in(i))->is_valuetypeptr()->value_type()->value_klass() == phase->C->env()->___Value_klass(), "bad type"); + phase->type(in(i))->is_valuetypeptr()->is__Value(), "bad type"); } } if (vtptr != NULL) { @@ -1678,7 +1679,7 @@ bool progress = false; PhaseIterGVN* igvn = phase->is_IterGVN(); for (uint i = 1; i < req(); i++) { - if (in(i) != NULL && !phase->type(in(i))->higher_equal(vtptr)) { + if (in(i) != NULL && !in(i)->is_Con() && !phase->type(in(i))->higher_equal(vtptr)) { // Can't transform because CheckCastPPNode::Identity can // push the cast up through another Phi and cause this same // transformation to run again, indefinitely --- old/src/share/vm/opto/doCall.cpp 2017-09-21 09:17:59.512998302 +0200 +++ new/src/share/vm/opto/doCall.cpp 2017-09-21 09:17:59.404998304 +0200 @@ -660,18 +660,18 @@ } } else if (rt == T_VALUETYPE) { assert(ct == T_VALUETYPE, "value type expected but got rt=%s, ct=%s", type2name(rt), type2name(ct)); - if (rtype == C->env()->___Value_klass()) { + if (rtype->is__Value()) { const Type* sig_type = TypeOopPtr::make_from_klass(ctype->as_klass()); Node* retnode = pop(); Node* cast = _gvn.transform(new CheckCastPPNode(control(), retnode, sig_type)); - Node* vt = ValueTypeNode::make(_gvn, merged_memory(), cast); + Node* vt = ValueTypeNode::make(this, cast); push(vt); } else { - assert(ctype == C->env()->___Value_klass(), "unexpected value type klass"); + assert(ctype->is__Value(), "unexpected value type klass"); Node* retnode = pop(); assert(retnode->is_ValueType(), "inconsistent"); ValueTypeNode* vt = retnode->as_ValueType(); - Node* alloc = vt->allocate(this); + Node* alloc = vt->allocate(this)->get_oop(); Node* vtptr = _gvn.transform(new ValueTypePtrNode(vt, alloc, C)); push(vtptr); } --- old/src/share/vm/opto/escape.cpp 2017-09-21 09:17:59.824998298 +0200 +++ new/src/share/vm/opto/escape.cpp 2017-09-21 09:17:59.736998299 +0200 @@ -2097,8 +2097,7 @@ // Ignore first AddP. } else { const Type* elemtype = adr_type->isa_aryptr()->elem(); - if (elemtype->isa_valuetype()) { - assert(field_offset != Type::OffsetBot, "invalid field offset"); + if (elemtype->isa_valuetype() && field_offset != Type::OffsetBot) { ciValueKlass* vk = elemtype->is_valuetype()->value_klass(); field_offset += vk->first_field_offset(); bt = vk->get_field_by_offset(field_offset, false)->layout_type(); @@ -2115,7 +2114,9 @@ } } } - return (bt == T_OBJECT || bt == T_NARROWOOP || bt == T_ARRAY); + // TODO enable when using T_VALUETYPEPTR + //assert(bt != T_VALUETYPE, "should not have valuetype here"); + return (bt == T_OBJECT || bt == T_VALUETYPE || bt == T_VALUETYPEPTR || bt == T_NARROWOOP || bt == T_ARRAY); } // Returns unique pointed java object or NULL. @@ -3183,7 +3184,7 @@ assert(_compile->tf()->returns_value_type_as_fields(), "must return a value type"); // Get ValueKlass by removing the tag bit from the metadata pointer Node* klass = use->in(TypeFunc::Parms); - intptr_t ptr = (intptr_t)igvn->find_intptr_t_con(klass, -1); + intptr_t ptr = igvn->type(klass)->isa_rawptr()->get_con(); clear_nth_bit(ptr, 0); assert(Metaspace::contains((void*)ptr), "should be klass"); assert(((ValueKlass*)ptr)->contains_oops(), "returned value type must contain a reference field"); --- old/src/share/vm/opto/graphKit.cpp 2017-09-21 09:18:00.196998293 +0200 +++ new/src/share/vm/opto/graphKit.cpp 2017-09-21 09:18:00.088998294 +0200 @@ -49,14 +49,20 @@ //----------------------------GraphKit----------------------------------------- // Main utility constructor. -GraphKit::GraphKit(JVMState* jvms) +GraphKit::GraphKit(JVMState* jvms, PhaseGVN* gvn) : Phase(Phase::Parser), _env(C->env()), - _gvn(*C->initial_gvn()) + _gvn((gvn != NULL) ? *gvn : *C->initial_gvn()) { _exceptions = jvms->map()->next_exception(); if (_exceptions != NULL) jvms->map()->set_next_exception(NULL); set_jvms(jvms); +#ifdef ASSERT + if (_gvn.is_IterGVN() != NULL) { + // Save the initial size of _for_igvn worklist for verification (see ~GraphKit) + _worklist_size = _gvn.C->for_igvn()->size(); + } +#endif } // Private constructor for parser. @@ -1383,18 +1389,8 @@ } ld = _gvn.transform(ld); if (bt == T_VALUETYPE) { - // Load non-flattened value type from memory. Add a null check and let the - // interpreter take care of initializing the field to the default value type. - Node* null_ctl = top(); - ld = null_check_common(ld, bt, false, &null_ctl, false); - if (null_ctl != top()) { - assert(!adr_type->isa_aryptr(), "value type array must be initialized"); - PreserveJVMState pjvms(this); - set_control(null_ctl); - uncommon_trap(Deoptimization::reason_null_check(false), Deoptimization::Action_maybe_recompile, - t->is_valuetypeptr()->value_type()->value_klass(), "uninitialized non-flattened value type"); - } - ld = ValueTypeNode::make(gvn(), map()->memory(), ld); + // Loading a non-flattened value type from memory requires a null check. + ld = ValueTypeNode::make(this, ld, true /* null check */); } else if (((bt == T_OBJECT) && C->do_escape_analysis()) || C->eliminate_boxing()) { // Improve graph before escape analysis and boxing elimination. record_for_igvn(ld); @@ -1535,9 +1531,9 @@ uint adr_idx = C->get_alias_index(adr_type); assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" ); - if (bt == T_VALUETYPE) { - // Allocate value type and store oop - val = val->as_ValueType()->allocate(this); + if (val->is_ValueType()) { + // Allocate value type and get oop + val = val->as_ValueType()->allocate(this)->get_oop(); } pre_barrier(true /* do_load */, @@ -1630,7 +1626,7 @@ if (ValueTypePassFieldsAsArgs) { if (arg->is_ValueType()) { ValueTypeNode* vt = arg->as_ValueType(); - if (domain->field_at(i)->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { + if (!domain->field_at(i)->is_valuetypeptr()->is__Value()) { // We don't pass value type arguments by reference but instead // pass each field of the value type idx += vt->pass_fields(call, idx, *this); @@ -1639,7 +1635,7 @@ // For example, see CompiledMethod::preserve_callee_argument_oops(). call->set_override_symbolic_info(true); } else { - arg = arg->as_ValueType()->allocate(this); + arg = arg->as_ValueType()->allocate(this)->get_oop(); call->init_req(idx, arg); idx++; } @@ -1650,7 +1646,7 @@ } else { if (arg->is_ValueType()) { // Pass value type argument via oop to callee - arg = arg->as_ValueType()->allocate(this); + arg = arg->as_ValueType()->allocate(this)->get_oop(); } call->init_req(i, arg); } @@ -1691,37 +1687,40 @@ Node* GraphKit::set_results_for_java_call(CallJavaNode* call, bool separate_io_proj) { if (stopped()) return top(); // maybe the call folded up? + // Note: Since any out-of-line call can produce an exception, + // we always insert an I_O projection from the call into the result. + + make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj); + + if (separate_io_proj) { + // The caller requested separate projections be used by the fall + // through and exceptional paths, so replace the projections for + // the fall through path. + set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); + set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); + } + // Capture the return value, if any. Node* ret; if (call->method() == NULL || - call->method()->return_type()->basic_type() == T_VOID) - ret = top(); - else { + call->method()->return_type()->basic_type() == T_VOID) { + ret = top(); + } else { if (!call->tf()->returns_value_type_as_fields()) { ret = _gvn.transform(new ProjNode(call, TypeFunc::Parms)); } else { // Return of multiple values (value type fields): we create a // ValueType node, each field is a projection from the call. - const TypeTuple *range_sig = call->tf()->range_sig(); + const TypeTuple* range_sig = call->tf()->range_sig(); const Type* t = range_sig->field_at(TypeFunc::Parms); assert(t->isa_valuetypeptr(), "only value types for multiple return values"); ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); - ret = ValueTypeNode::make(_gvn, call, vk, TypeFunc::Parms+1, false); + Node* ctl = control(); + ret = ValueTypeNode::make(_gvn, ctl, merged_memory(), call, vk, TypeFunc::Parms+1, false); + set_control(ctl); } } - // Note: Since any out-of-line call can produce an exception, - // we always insert an I_O projection from the call into the result. - - make_slow_call_ex(call, env()->Throwable_klass(), separate_io_proj); - - if (separate_io_proj) { - // The caller requested separate projections be used by the fall - // through and exceptional paths, so replace the projections for - // the fall through path. - set_i_o(_gvn.transform( new ProjNode(call, TypeFunc::I_O) )); - set_all_memory(_gvn.transform( new ProjNode(call, TypeFunc::Memory) )); - } return ret; } @@ -3397,7 +3396,7 @@ Node* extra_slow_test, Node* *return_size_val, bool deoptimize_on_exception, - ValueTypeNode* value_node) { + ValueTypeBaseNode* value_node) { // Compute size in doublewords // The size is always an integral number of doublewords, represented // as a positive bytewise size stored in the klass's layout_helper. @@ -3693,7 +3692,7 @@ PreserveJVMState pjvms(this); // Create default value type and store it to memory Node* oop = ValueTypeNode::make_default(gvn(), vk); - oop = oop->as_ValueType()->allocate(this); + oop = oop->as_ValueType()->allocate(this)->get_oop(); length = SubI(length, intcon(1)); add_predicate(nargs); @@ -4564,7 +4563,7 @@ Node* con = makecon(con_type); if (field->layout_type() == T_VALUETYPE) { // Load value type from constant oop - con = ValueTypeNode::make(gvn(), map()->memory(), con); + con = ValueTypeNode::make(this, con); } return con; } --- old/src/share/vm/opto/graphKit.hpp 2017-09-21 09:18:00.552998288 +0200 +++ new/src/share/vm/opto/graphKit.hpp 2017-09-21 09:18:00.460998289 +0200 @@ -63,6 +63,9 @@ SafePointNode* _exceptions;// Parser map(s) for exception state(s) int _bci; // JVM Bytecode Pointer ciMethod* _method; // JVM Current Method +#ifdef ASSERT + uint _worklist_size; +#endif private: int _sp; // JVM Expression Stack Pointer; don't modify directly! @@ -75,11 +78,16 @@ public: GraphKit(); // empty constructor - GraphKit(JVMState* jvms); // the JVM state on which to operate + GraphKit(JVMState* jvms, PhaseGVN* gvn = NULL); // the JVM state on which to operate #ifdef ASSERT ~GraphKit() { assert(!has_exceptions(), "user must call transfer_exceptions_into_jvms"); + // During incremental inlining, the Node_Array of the C->for_igvn() worklist and the IGVN + // worklist are shared but the _in_worklist VectorSet is not. To avoid inconsistencies, + // we should not add nodes to the _for_igvn worklist when using IGVN for the GraphKit. + assert((_gvn.is_IterGVN() == NULL) || (_gvn.C->for_igvn()->size() == _worklist_size), + "GraphKit should not modify _for_igvn worklist after parsing"); } #endif @@ -89,7 +97,7 @@ ciEnv* env() const { return _env; } PhaseGVN& gvn() const { return _gvn; } - void record_for_igvn(Node* n) const { C->record_for_igvn(n); } // delegate to Compile + void record_for_igvn(Node* n) const { _gvn.record_for_igvn(n); } // Handy well-known nodes: Node* null() const { return zerocon(T_OBJECT); } @@ -880,7 +888,7 @@ Node* slow_test = NULL, Node* *return_size_val = NULL, bool deoptimize_on_exception = false, - ValueTypeNode* value_node = NULL); + ValueTypeBaseNode* value_node = NULL); Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* *return_size_val = NULL, bool deoptimize_on_exception = false); --- old/src/share/vm/opto/idealKit.cpp 2017-09-21 09:18:00.864998283 +0200 +++ new/src/share/vm/opto/idealKit.cpp 2017-09-21 09:18:00.752998285 +0200 @@ -48,7 +48,6 @@ _cvstate = NULL; // We can go memory state free or else we need the entire memory state assert(_initial_memory == NULL || _initial_memory->Opcode() == Op_MergeMem, "memory must be pre-split"); - assert(!_gvn.is_IterGVN(), "IdealKit can't be used during Optimize phase"); int init_size = 5; _pending_cvstates = new (C->node_arena()) GrowableArray(C->node_arena(), init_size, 0, 0); DEBUG_ONLY(_state = new (C->node_arena()) GrowableArray(C->node_arena(), init_size, 0, 0)); @@ -296,7 +295,7 @@ return delay_transform(n); } else { n = gvn().transform(n); - C->record_for_igvn(n); + gvn().record_for_igvn(n); return n; } } @@ -305,7 +304,7 @@ Node* IdealKit::delay_transform(Node* n) { // Delay transform until IterativeGVN gvn().set_type(n, n->bottom_type()); - C->record_for_igvn(n); + gvn().record_for_igvn(n); return n; } --- old/src/share/vm/opto/loopopts.cpp 2017-09-21 09:18:01.176998279 +0200 +++ new/src/share/vm/opto/loopopts.cpp 2017-09-21 09:18:01.096998280 +0200 @@ -1015,6 +1015,11 @@ Node* m = n->fast_out(j); if (m->is_FastLock()) return false; + if (m->is_ValueType()) { + // TODO this breaks optimizations! + // Value types should not be split through phis + //return false; + } #ifdef _LP64 if (m->Opcode() == Op_ConvI2L) return false; --- old/src/share/vm/opto/macro.cpp 2017-09-21 09:18:01.524998274 +0200 +++ new/src/share/vm/opto/macro.cpp 2017-09-21 09:18:01.432998276 +0200 @@ -571,7 +571,6 @@ // Search the last value stored into the object's field. Node *PhaseMacroExpand::value_from_mem(Node *sfpt_mem, Node *sfpt_ctl, BasicType ft, const Type *ftype, const TypeOopPtr *adr_t, AllocateNode *alloc) { assert(adr_t->is_known_instance_field(), "instance required"); - assert(ft != T_VALUETYPE, "should not be used for value type fields"); int instance_id = adr_t->instance_id(); assert((uint)instance_id == alloc->_idx, "wrong allocation"); @@ -675,16 +674,26 @@ offset -= vk->first_field_offset(); // Create a new ValueTypeNode and retrieve the field values from memory ValueTypeNode* vt = ValueTypeNode::make(_igvn, vk)->as_ValueType(); - for (int i = 0; i < vk->field_count(); ++i) { + for (int i = 0; i < vk->nof_declared_nonstatic_fields(); ++i) { ciType* field_type = vt->field_type(i); int field_offset = offset + vt->field_offset(i); // Each value type field has its own memory slice adr_type = adr_type->with_field_offset(field_offset); Node* value = NULL; - if (field_type->basic_type() == T_VALUETYPE) { + if (field_type->is_valuetype() && vt->field_is_flattened(i)) { value = value_type_from_mem(mem, ctl, field_type->as_value_klass(), adr_type, field_offset, alloc); } else { - value = value_from_mem(mem, ctl, field_type->basic_type(), Type::get_const_type(field_type), adr_type, alloc); + const Type* ft = Type::get_const_type(field_type); + BasicType bt = field_type->basic_type(); + if (UseCompressedOops && !is_java_primitive(bt)) { + ft = ft->make_narrowoop(); + bt = T_NARROWOOP; + } + value = value_from_mem(mem, ctl, bt, ft, adr_type, alloc); + if (ft->isa_narrowoop()) { + assert(UseCompressedOops, "unexpected narrow oop"); + value = transform_later(new DecodeNNode(value, value->get_ptr_type())); + } } vt->set_field_value(i, value); } @@ -885,8 +894,7 @@ offset = field->offset(); elem_type = field->type(); basic_elem_type = field->layout_type(); - // Value type fields should not have safepoint uses - assert(basic_elem_type != T_VALUETYPE, "value type fields are flattened"); + assert(!field->is_flattened(), "flattened value type fields should not have safepoint uses"); } else { offset = array_base + j * (intptr_t)element_size; } @@ -2650,7 +2658,7 @@ if (ret == NULL) { return; } - assert(ret->bottom_type()->is_valuetypeptr()->klass() == C->env()->___Value_klass(), "unexpected return type from MH intrinsic"); + assert(ret->bottom_type()->is_valuetypeptr()->is__Value(), "unexpected return type from MH intrinsic"); const TypeFunc* tf = call->_tf; const TypeTuple* domain = OptoRuntime::store_value_type_fields_Type()->domain_cc(); const TypeFunc* new_tf = TypeFunc::make(tf->domain_sig(), tf->domain_cc(), tf->range_sig(), domain); @@ -2767,7 +2775,9 @@ } rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::mark_offset_in_bytes(), mark_node, T_ADDRESS); rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_offset_in_bytes(), klass_node, T_METADATA); - rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT); + if (UseCompressedClassPointers) { + rawmem = make_store(slowpath_false, rawmem, old_top, oopDesc::klass_gap_offset_in_bytes(), intcon(0), T_INT); + } Node* pack_handler = make_load(slowpath_false, rawmem, klass_node, in_bytes(ValueKlass::pack_handler_offset()), TypeRawPtr::BOTTOM, T_ADDRESS); CallLeafNoFPNode* handler_call = new CallLeafNoFPNode(OptoRuntime::pack_value_type_Type(), --- old/src/share/vm/opto/matcher.cpp 2017-09-21 09:18:01.904998269 +0200 +++ new/src/share/vm/opto/matcher.cpp 2017-09-21 09:18:01.808998270 +0200 @@ -191,8 +191,8 @@ mask[0].Insert(regs.second()); } } else { - BasicType *sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt); - VMRegPair *vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt); + BasicType* sig_bt = NEW_RESOURCE_ARRAY(BasicType, cnt); + VMRegPair* vm_parm_regs = NEW_RESOURCE_ARRAY(VMRegPair, cnt); for (uint i = 0; i < cnt; i++) { sig_bt[i] = range->field_at(i+TypeFunc::Parms)->basic_type(); --- old/src/share/vm/opto/memnode.cpp 2017-09-21 09:18:02.244998264 +0200 +++ new/src/share/vm/opto/memnode.cpp 2017-09-21 09:18:02.144998266 +0200 @@ -1112,11 +1112,24 @@ Node* base = AddPNode::Ideal_base_and_offset(addr, phase, offset); if (base != NULL && base->is_ValueTypePtr()) { Node* value = base->as_ValueTypePtr()->field_value_by_offset((int)offset, true); - if (bottom_type()->isa_narrowoop()) { - assert(!phase->type(value)->isa_narrowoop(), "should already be decoded"); - value = phase->transform(new EncodePNode(value, bottom_type())); + if (value->is_ValueType()) { + // Non-flattened value type field + ValueTypeNode* vt = value->as_ValueType(); + if (vt->is_allocated(phase)) { + value = vt->get_oop(); + } else { + // Not yet allocated, bail out + value = NULL; + } + } + if (value != NULL) { + if (Opcode() == Op_LoadN) { + // Encode oop value if we are loading a narrow oop + assert(!phase->type(value)->isa_narrowoop(), "should already be decoded"); + value = phase->transform(new EncodePNode(value, bottom_type())); + } + return value; } - return value; } // If the previous store-maker is the right kind of Store, and the store is --- old/src/share/vm/opto/narrowptrnode.cpp 2017-09-21 09:18:02.584998260 +0200 +++ new/src/share/vm/opto/narrowptrnode.cpp 2017-09-21 09:18:02.496998261 +0200 @@ -42,7 +42,7 @@ if (t == Type::TOP) return Type::TOP; if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR; - assert(t->isa_narrowoop(), "only narrowoop here"); + assert(t->isa_narrowoop(), "only narrowoop here"); return t->make_ptr(); } --- old/src/share/vm/opto/parse1.cpp 2017-09-21 09:18:02.884998256 +0200 +++ new/src/share/vm/opto/parse1.cpp 2017-09-21 09:18:02.792998257 +0200 @@ -127,7 +127,7 @@ // Load oop and create a new ValueTypeNode const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(type->is_valuetype(), TypePtr::NotNull); l = _gvn.transform(new LoadPNode(ctl, mem, adr, TypeRawPtr::BOTTOM, vtptr_type, MemNode::unordered)); - l = ValueTypeNode::make(gvn(), mem, l); + l = ValueTypeNode::make(this, l); break; } case T_VALUETYPEPTR: { @@ -205,7 +205,6 @@ int max_locals = jvms()->loc_size(); int max_stack = jvms()->stk_size(); - // Mismatch between method and jvms can occur since map briefly held // an OSR entry state (which takes up one RawPtr word). assert(max_locals == method()->max_locals(), "sanity"); @@ -243,7 +242,6 @@ // Make a BoxLockNode for the monitor. Node *box = _gvn.transform(new BoxLockNode(next_monitor())); - // Displaced headers and locked objects are interleaved in the // temp OSR buffer. We only copy the locked objects out here. // Fetch the locked object from the OSR temp buffer and copy to our fastlock node. @@ -251,7 +249,6 @@ // Try and copy the displaced header to the BoxNode Node* displaced_hdr = fetch_interpreter_state((index*2) + 1, Type::get_const_basic_type(T_ADDRESS), monitors_addr, osr_buf); - store_to_memory(control(), box, displaced_hdr, T_ADDRESS, Compile::AliasIdxRaw, MemNode::unordered); // Build a bogus FastLockNode (no code will be generated) and push the @@ -808,7 +805,7 @@ } if ((_caller->has_method() || tf()->returns_value_type_as_fields()) && ret_type->isa_valuetypeptr() && - ret_type->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { + !ret_type->is_valuetypeptr()->is__Value()) { // When inlining or with multiple return values: return value // type as ValueTypeNode not as oop ret_type = ret_type->is_valuetypeptr()->value_type(); @@ -860,9 +857,11 @@ // argument per field of the value type. Build ValueTypeNodes // from the value type arguments. const Type* t = tf->domain_sig()->field_at(i); - if (t->isa_valuetypeptr() && t->is_valuetypeptr()->klass() != C->env()->___Value_klass()) { + if (t->isa_valuetypeptr() && !t->is_valuetypeptr()->is__Value()) { ciValueKlass* vk = t->is_valuetypeptr()->value_type()->value_klass(); - Node* vt = ValueTypeNode::make(gvn, start, vk, j, true); + Node* ctl = map->control(); + Node* vt = ValueTypeNode::make(gvn, ctl, map->memory(), start, vk, j, true); + map->set_control(ctl); map->init_req(i, gvn.transform(vt)); j += vk->value_arg_slots(); } else { @@ -874,16 +873,18 @@ } } } else { - Node* parm = gvn.transform(new ParmNode(start, i)); - // Check if parameter is a value type pointer - if (gvn.type(parm)->isa_valuetypeptr()) { - // Create ValueTypeNode from the oop and replace the parameter - parm = ValueTypeNode::make(gvn, map->memory(), parm); - } - map->init_req(i, parm); - // Record all these guys for later GVN. - record_for_igvn(parm); - j++; + Node* parm = gvn.transform(new ParmNode(start, i)); + // Check if parameter is a value type pointer + if (gvn.type(parm)->isa_valuetypeptr()) { + // Create ValueTypeNode from the oop and replace the parameter + Node* ctl = map->control(); + parm = ValueTypeNode::make(gvn, ctl, map->memory(), parm); + map->set_control(ctl); + } + map->init_req(i, parm); + // Record all these guys for later GVN. + record_for_igvn(parm); + j++; } } for (; j < map->req(); j++) { @@ -931,7 +932,7 @@ ValueTypeNode* vt = res->as_ValueType(); ret->add_req_batch(NULL, tf()->range_cc()->cnt() - TypeFunc::Parms); vt->pass_klass(ret, TypeFunc::Parms, kit); - vt->pass_fields(ret, TypeFunc::Parms+1, kit); + vt->pass_fields(ret, TypeFunc::Parms+1, kit, /* assert_allocated */ true); } else { ret->add_req(res); // Note: The second dummy edge is not needed by a ReturnNode. @@ -2272,11 +2273,15 @@ //------------------------------return_current--------------------------------- // Append current _map to _exit_return void Parse::return_current(Node* value) { - if (value != NULL && value->is_ValueType() && !_caller->has_method() && - !tf()->returns_value_type_as_fields()) { - // Returning from root JVMState without multiple returned values, - // make sure value type is allocated - value = value->as_ValueType()->allocate(this); + if (value != NULL && value->is_ValueType() && !_caller->has_method()) { + // Returning a value type from root JVMState + if (tf()->returns_value_type_as_fields()) { + // Value type is returned as fields, make sure non-flattened value type fields are allocated + value = value->as_ValueType()->allocate_fields(this); + } else { + // Value type is returned as oop, make sure it's allocated + value = value->as_ValueType()->allocate(this)->get_oop(); + } } if (RegisterFinalizersAtInit && --- old/src/share/vm/opto/parse2.cpp 2017-09-21 09:18:03.212998251 +0200 +++ new/src/share/vm/opto/parse2.cpp 2017-09-21 09:18:03.116998252 +0200 @@ -62,7 +62,7 @@ const TypeAryPtr* arytype = _gvn.type(ary)->is_aryptr(); if (arytype->klass()->is_value_array_klass()) { ciValueArrayKlass* vak = arytype->klass()->as_value_array_klass(); - Node* vt = ValueTypeNode::make(gvn(), vak->element_klass()->as_value_klass(), map()->memory(), ary, adr); + Node* vt = ValueTypeNode::make(this, vak->element_klass()->as_value_klass(), ary, adr); push(vt); return; } @@ -1761,7 +1761,7 @@ } const TypeAryPtr* adr_type = TypeAryPtr::OOPS; - Node* oop = c->as_ValueType()->allocate(this); + Node* oop = c->as_ValueType()->allocate(this)->get_oop(); Node* store = store_oop_to_array(control(), a, d, adr_type, oop, elemtype->make_oopptr(), T_OBJECT, StoreNode::release_if_reference(T_OBJECT)); break; --- old/src/share/vm/opto/parse3.cpp 2017-09-21 09:18:03.548998246 +0200 +++ new/src/share/vm/opto/parse3.cpp 2017-09-21 09:18:03.460998248 +0200 @@ -176,8 +176,7 @@ ciType* field_klass = field->type(); bool is_vol = field->is_volatile(); - // TODO change this when we support non-flattened value type fields that are non-static - bool flattened = (bt == T_VALUETYPE) && !field->is_static(); + bool flattened = field->is_flattened(); // Compute address and memory type. int offset = field->offset_in_bytes(); @@ -235,7 +234,7 @@ Node* ld = NULL; if (flattened) { // Load flattened value type - ld = ValueTypeNode::make(_gvn, field_klass->as_value_klass(), map()->memory(), obj, obj, field->holder(), offset); + ld = ValueTypeNode::make(this, field_klass->as_value_klass(), obj, obj, field->holder(), offset); } else { ld = make_load(NULL, adr, type, bt, adr_type, mo, LoadNode::DependsOnlyOnTest, needs_atomic_access); } @@ -281,6 +280,7 @@ void Parse::do_put_xxx(Node* obj, ciField* field, bool is_field) { bool is_vol = field->is_volatile(); + bool is_flattened = field->is_flattened(); // If reference is volatile, prevent following memory ops from // floating down past the volatile write. Also prevents commoning // another volatile read. @@ -314,10 +314,14 @@ } else { field_type = TypeOopPtr::make_from_klass(field->type()->as_klass()); } - if (bt == T_VALUETYPE && !field->is_static()) { - // Store flattened value type to non-static field + if (is_flattened) { + // Store flattened value type to a non-static field + assert(bt == T_VALUETYPE, "flattening is only supported for value type fields"); val->as_ValueType()->store_flattened(this, obj, obj, field->holder(), offset); } else { + if (bt == T_VALUETYPE) { + field_type = field_type->cast_to_ptr_type(TypePtr::BotPTR)->is_oopptr(); + } store_oop_to_object(control(), obj, adr, adr_type, val, field_type, bt, mo); } } else { @@ -651,7 +655,7 @@ // Create a value type node with the corresponding type ciValueKlass* vk = target_dvt_klass->as_value_klass(); - Node* vt = ValueTypeNode::make(gvn(), vk, map()->memory(), not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset()); + Node* vt = ValueTypeNode::make(this, vk, not_null_obj, not_null_obj, target_vcc_klass, vk->first_field_offset()); // Push the value type onto the stack push(vt); --- old/src/share/vm/opto/phaseX.cpp 2017-09-21 09:18:03.856998242 +0200 +++ new/src/share/vm/opto/phaseX.cpp 2017-09-21 09:18:03.772998243 +0200 @@ -1178,18 +1178,18 @@ //------------------------------transform-------------------------------------- // Non-recursive: idealize Node 'n' with respect to its inputs and its value Node *PhaseIterGVN::transform( Node *n ) { - if (_delay_transform) { - // Register the node but don't optimize for now - register_new_node_with_optimizer(n); - return n; - } - // If brand new node, make space in type array, and give it a type. ensure_type_or_null(n); if (type_or_null(n) == NULL) { set_type_bottom(n); } + if (_delay_transform) { + // Add the node to the worklist but don't optimize for now + _worklist.push(n); + return n; + } + return transform_old(n); } --- old/src/share/vm/opto/phaseX.hpp 2017-09-21 09:18:04.200998237 +0200 +++ new/src/share/vm/opto/phaseX.hpp 2017-09-21 09:18:04.096998239 +0200 @@ -462,7 +462,7 @@ // Idealize new Node 'n' with respect to its inputs and its value virtual Node *transform( Node *a_node ); - virtual void record_for_igvn(Node *n) { } + virtual void record_for_igvn(Node *n) { _worklist.push(n); } virtual PhaseIterGVN *is_IterGVN() { return this; } --- old/src/share/vm/opto/type.cpp 2017-09-21 09:18:04.532998233 +0200 +++ new/src/share/vm/opto/type.cpp 2017-09-21 09:18:04.428998234 +0200 @@ -261,7 +261,7 @@ return TypeRawPtr::make((address)(intptr_t)type->as_return_address()->bci()); case T_VALUETYPE: - if (type == ciEnv::current()->___Value_klass()) { + if (type->is__Value()) { return TypeValueTypePtr::NOTNULL; } else { return TypeValueType::make(type->as_value_klass()); @@ -637,6 +637,7 @@ TypeAryPtr::_array_body_type[T_OBJECT] = TypeAryPtr::OOPS; TypeAryPtr::_array_body_type[T_ARRAY] = TypeAryPtr::OOPS; // arrays are stored in oop arrays TypeAryPtr::_array_body_type[T_VALUETYPE] = TypeAryPtr::OOPS; + TypeAryPtr::_array_body_type[T_VALUETYPEPTR] = NULL; TypeAryPtr::_array_body_type[T_BYTE] = TypeAryPtr::BYTES; TypeAryPtr::_array_body_type[T_BOOLEAN] = TypeAryPtr::BYTES; // boolean[] is a byte array TypeAryPtr::_array_body_type[T_SHORT] = TypeAryPtr::SHORTS; @@ -687,6 +688,7 @@ _const_basic_type[T_FLOAT] = Type::FLOAT; _const_basic_type[T_DOUBLE] = Type::DOUBLE; _const_basic_type[T_OBJECT] = TypeInstPtr::BOTTOM; + _const_basic_type[T_VALUETYPEPTR]= TypeInstPtr::BOTTOM; _const_basic_type[T_ARRAY] = TypeInstPtr::BOTTOM; // there is no separate bottom for arrays _const_basic_type[T_VALUETYPE] = TypeInstPtr::BOTTOM; _const_basic_type[T_VOID] = TypePtr::NULL_PTR; // reflection represents void this way @@ -704,6 +706,7 @@ _zero_type[T_FLOAT] = TypeF::ZERO; _zero_type[T_DOUBLE] = TypeD::ZERO; _zero_type[T_OBJECT] = TypePtr::NULL_PTR; + _zero_type[T_VALUETYPEPTR]= TypePtr::NULL_PTR; _zero_type[T_ARRAY] = TypePtr::NULL_PTR; // null array is null oop _zero_type[T_VALUETYPE] = TypePtr::NULL_PTR; _zero_type[T_ADDRESS] = TypePtr::NULL_PTR; // raw pointers use the same null @@ -1928,10 +1931,13 @@ static void collect_value_fields(ciValueKlass* vk, const Type** field_array, uint& pos) { for (int j = 0; j < vk->nof_nonstatic_fields(); j++) { - ciField* f = vk->nonstatic_field_at(j); - BasicType bt = f->type()->basic_type(); - assert(bt < T_VALUETYPE && bt >= T_BOOLEAN, "not yet supported"); - field_array[pos++] = Type::get_const_type(f->type()); + ciField* field = vk->nonstatic_field_at(j); + BasicType bt = field->type()->basic_type(); + const Type* ft = Type::get_const_type(field->type()); + if (bt == T_VALUETYPE) { + ft = ft->isa_valuetypeptr()->cast_to_ptr_type(TypePtr::BotPTR); + } + field_array[pos++] = ft; if (bt == T_LONG || bt == T_DOUBLE) { field_array[pos++] = Type::HALF; } @@ -2004,7 +2010,7 @@ if (vt_fields_as_args) { for (int i = 0; i < sig->count(); i++) { ciType* type = sig->type_at(i); - if (type->basic_type() == T_VALUETYPE && type != ciEnv::current()->___Value_klass()) { + if (type->basic_type() == T_VALUETYPE && !type->is__Value()) { assert(type->is_valuetype(), "inconsistent type"); ciValueKlass* vk = (ciValueKlass*)type; vt_extra += vk->value_arg_slots()-1; @@ -2017,8 +2023,7 @@ const Type **field_array; if (recv != NULL) { arg_cnt++; - bool vt_fields_for_recv = vt_fields_as_args && recv->is_valuetype() && - recv != ciEnv::current()->___Value_klass(); + bool vt_fields_for_recv = vt_fields_as_args && recv->is_valuetype() && !recv->is__Value(); if (vt_fields_for_recv) { ciValueKlass* vk = (ciValueKlass*)recv; vt_extra += vk->value_arg_slots()-1; @@ -2062,7 +2067,7 @@ break; case T_VALUETYPE: { assert(type->is_valuetype(), "inconsistent type"); - if (vt_fields_as_args && type != ciEnv::current()->___Value_klass()) { + if (vt_fields_as_args && !type->is__Value()) { ciValueKlass* vk = (ciValueKlass*)type; collect_value_fields(vk, field_array, pos); } else { @@ -2432,10 +2437,11 @@ //------------------------------dump2------------------------------------------ #ifndef PRODUCT void TypeValueType::dump2(Dict &d, uint depth, outputStream* st) const { - st->print("valuetype[%d]:{", _vk->field_count()); - st->print("%s", _vk->field_count() != 0 ? _vk->field_type_by_index(0)->name() : "empty"); - for (int i = 1; i < _vk->field_count(); ++i) { - st->print(", %s", _vk->field_type_by_index(i)->name()); + int count = _vk->nof_declared_nonstatic_fields(); + st->print("valuetype[%d]:{", count); + st->print("%s", count != 0 ? _vk->declared_nonstatic_field_at(0)->type()->name() : "empty"); + for (int i = 1; i < count; ++i) { + st->print(", %s", _vk->declared_nonstatic_field_at(i)->type()->name()); } st->print("}"); } @@ -3177,8 +3183,8 @@ ciField* field = vk->get_field_by_offset(foffset, false); assert(field != NULL, "missing field"); BasicType bt = field->layout_type(); - assert(bt != T_VALUETYPE, "should be flattened"); - _is_ptr_to_narrowoop = (bt == T_OBJECT || bt == T_ARRAY); + assert(bt != T_VALUETYPEPTR, "unexpected type"); + _is_ptr_to_narrowoop = (bt == T_OBJECT || bt == T_ARRAY || T_VALUETYPE); } } else if (klass()->is_instance_klass()) { ciInstanceKlass* ik = klass()->as_instance_klass(); @@ -3213,7 +3219,9 @@ if (field != NULL) { BasicType basic_elem_type = field->layout_type(); _is_ptr_to_narrowoop = UseCompressedOops && (basic_elem_type == T_OBJECT || + basic_elem_type == T_VALUETYPE || basic_elem_type == T_ARRAY); + assert(basic_elem_type != T_VALUETYPEPTR, "unexpected type"); } else if (klass()->equals(ciEnv::current()->Object_klass())) { // Compile::find_alias_type() cast exactness on all types to verify // that it does not affect alias type. @@ -4748,12 +4756,21 @@ if (elemtype->isa_valuetype()) { uint header = arrayOopDesc::base_offset_in_bytes(T_OBJECT); if (offset >= (intptr_t)header) { + // Try to get the field of the value type array element we are pointing to ciKlass* arytype_klass = klass(); ciValueArrayKlass* vak = arytype_klass->as_value_array_klass(); + ciValueKlass* vk = vak->element_klass()->as_value_klass(); int shift = vak->log2_element_size(); - intptr_t field_offset = ((offset - header) & ((1 << shift) - 1)); - - return with_field_offset(field_offset)->add_offset(offset - field_offset); + int mask = (1 << shift) - 1; + intptr_t field_offset = ((offset - header) & mask); + ciField* field = vk->get_field_by_offset(field_offset + vk->first_field_offset(), false); + if (field == NULL) { + // This may happen with nested AddP(base, AddP(base, base, offset), longcon(16)) + return add_offset(offset); + } else { + assert(_field_offset.get() <= 0, "should not have field_offset"); + return with_field_offset(field_offset)->add_offset(offset - field_offset); + } } } } @@ -4767,7 +4784,7 @@ const TypeValueTypePtr* TypeValueTypePtr::NOTNULL; //------------------------------make------------------------------------------- -const TypeValueTypePtr* TypeValueTypePtr::make(const TypeValueType* vt, PTR ptr, ciObject* o, Offset offset, int instance_id, const TypePtr* speculative, int inline_depth) { +const TypeValueTypePtr* TypeValueTypePtr::make(const TypeValueType* vt, PTR ptr, ciObject* o, Offset offset, int instance_id, const TypePtr* speculative, int inline_depth, bool narrow) { return (TypeValueTypePtr*)(new TypeValueTypePtr(vt, ptr, o, offset, instance_id, speculative, inline_depth))->hashcons(); } @@ -4878,16 +4895,15 @@ ciObject* tp_oop = tp->const_oop(); const TypeValueType* vt = NULL; if (_vt != tp->_vt) { - ciKlass* __value_klass = ciEnv::current()->___Value_klass(); - assert(klass() == __value_klass || tp->klass() == __value_klass, "impossible meet"); + assert(is__Value() || tp->is__Value(), "impossible meet"); if (above_centerline(ptr)) { - vt = klass() == __value_klass ? tp->_vt : _vt; + vt = is__Value() ? tp->_vt : _vt; } else if (above_centerline(this->_ptr) && !above_centerline(tp->_ptr)) { vt = tp->_vt; } else if (above_centerline(tp->_ptr) && !above_centerline(this->_ptr)) { vt = _vt; } else { - vt = klass() == __value_klass ? _vt : tp->_vt; + vt = is__Value() ? _vt : tp->_vt; } } else { vt = _vt; @@ -4927,11 +4943,9 @@ return java_add(_vt->hash(), TypeOopPtr::hash()); } -//------------------------------empty------------------------------------------ -// TRUE if Type is a type with no values, FALSE otherwise. -bool TypeValueTypePtr::empty(void) const { - // FIXME - return false; +//------------------------------is__Value-------------------------------------- +bool TypeValueTypePtr::is__Value() const { + return klass()->equals(TypeKlassPtr::VALUE->klass()); } //------------------------------dump2------------------------------------------ @@ -5713,8 +5727,7 @@ domain_cc = TypeTuple::make_domain(method->holder(), method->signature(), ValueTypePassFieldsAsArgs); } const TypeTuple *range_sig = TypeTuple::make_range(method->signature(), false); - bool as_fields = ValueTypeReturnedAsFields; - const TypeTuple *range_cc = TypeTuple::make_range(method->signature(), as_fields); + const TypeTuple *range_cc = TypeTuple::make_range(method->signature(), ValueTypeReturnedAsFields); tf = TypeFunc::make(domain_sig, domain_cc, range_sig, range_cc); C->set_last_tf(method, tf); // fill cache return tf; --- old/src/share/vm/opto/type.hpp 2017-09-21 09:18:04.876998228 +0200 +++ new/src/share/vm/opto/type.hpp 2017-09-21 09:18:04.792998229 +0200 @@ -1328,7 +1328,7 @@ public: // Make a pointer to a value type static const TypeValueTypePtr* make(const TypeValueType* vt, PTR ptr = TypePtr::BotPTR, ciObject* o = NULL, Offset offset = Offset(0), - int instance_id = InstanceBot, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom); + int instance_id = InstanceBot, const TypePtr* speculative = NULL, int inline_depth = InlineDepthBottom, bool narrow = false); // Make a pointer to a value type static const TypeValueTypePtr* make(PTR ptr, ciValueKlass* vk, ciObject* o = NULL) { return make(TypeValueType::make(vk), ptr, o); } // Make a pointer to a constant value type @@ -1343,7 +1343,7 @@ virtual bool eq(const Type* t) const; virtual int hash() const; // Type specific hashing - virtual bool empty(void) const; // TRUE if type is vacuous + bool is__Value() const; virtual const Type* xmeet_helper(const Type* t) const; virtual const Type* xdual() const; --- old/src/share/vm/opto/valuetypenode.cpp 2017-09-21 09:18:05.180998224 +0200 +++ new/src/share/vm/opto/valuetypenode.cpp 2017-09-21 09:18:05.092998225 +0200 @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -39,6 +39,7 @@ // Create a PhiNode for merging the oop values const TypeValueTypePtr* vtptr = value_type_ptr(); + vtptr = vtptr->cast_to_ptr_type(TypePtr::BotPTR)->is_valuetypeptr(); PhiNode* oop = PhiNode::make(region, vt->get_oop(), vtptr); gvn->set_type(oop, vtptr); vt->set_oop(oop); @@ -123,11 +124,17 @@ int index = value_klass()->field_index_by_offset(offset); int sub_offset = offset - field_offset(index); Node* value = field_value(index); + assert(value != NULL, "field value not found"); if (recursive && value->is_ValueType()) { - // Flattened value type field ValueTypeNode* vt = value->as_ValueType(); - sub_offset += vt->value_klass()->first_field_offset(); // Add header size - return vt->field_value_by_offset(sub_offset); + if (field_is_flattened(index)) { + // Flattened value type field + sub_offset += vt->value_klass()->first_field_offset(); // Add header size + return vt->field_value_by_offset(sub_offset, recursive); + } else { + assert(sub_offset == 0, "should not have a sub offset"); + return vt; + } } assert(!(recursive && value->is_ValueType()), "should not be a value type"); assert(sub_offset == 0, "offset mismatch"); @@ -141,17 +148,22 @@ int ValueTypeBaseNode::field_offset(uint index) const { assert(index < field_count(), "index out of bounds"); - return value_klass()->field_offset_by_index(index); + return value_klass()->declared_nonstatic_field_at(index)->offset(); } ciType* ValueTypeBaseNode::field_type(uint index) const { assert(index < field_count(), "index out of bounds"); - return value_klass()->field_type_by_index(index); + return value_klass()->declared_nonstatic_field_at(index)->type(); +} + +bool ValueTypeBaseNode::field_is_flattened(uint index) const { + assert(index < field_count(), "index out of bounds"); + return value_klass()->declared_nonstatic_field_at(index)->is_flattened(); } -int ValueTypeBaseNode::make_scalar_in_safepoint(SafePointNode* sfpt, Node* root, PhaseGVN* gvn) { +int ValueTypeBaseNode::make_scalar_in_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt, Node* root, PhaseGVN* gvn) { ciValueKlass* vk = value_klass(); - uint nfields = vk->flattened_field_count(); + uint nfields = vk->nof_nonstatic_fields(); JVMState* jvms = sfpt->jvms(); int start = jvms->debug_start(); int end = jvms->debug_end(); @@ -170,7 +182,14 @@ for (uint j = 0; j < nfields; ++j) { int offset = vk->nonstatic_field_at(j)->offset(); Node* value = field_value_by_offset(offset, true /* include flattened value type fields */); - assert(value != NULL, ""); + if (value->is_ValueType()) { + if (value->as_ValueType()->is_allocated(gvn)) { + value = value->as_ValueType()->get_oop(); + } else { + // Add non-flattened value type field to the worklist to process later + worklist.push(value); + } + } sfpt->add_req(value); } jvms->set_endoff(sfpt->req()); @@ -182,28 +201,34 @@ } void ValueTypeBaseNode::make_scalar_in_safepoints(Node* root, PhaseGVN* gvn) { + Unique_Node_List worklist; for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { Node* u = fast_out(i); if (u->is_SafePoint() && (!u->is_Call() || u->as_Call()->has_debug_use(this))) { SafePointNode* sfpt = u->as_SafePoint(); Node* in_oop = get_oop(); const Type* oop_type = in_oop->bottom_type(); - assert(Opcode() == Op_ValueTypePtr || TypePtr::NULL_PTR->higher_equal(oop_type), "already heap allocated value type should be linked directly"); - int nb = make_scalar_in_safepoint(sfpt, root, gvn); + assert(Opcode() == Op_ValueTypePtr || !isa_ValueType()->is_allocated(gvn), "already heap allocated value types should be linked directly"); + int nb = make_scalar_in_safepoint(worklist, sfpt, root, gvn); --i; imax -= nb; } } + + for (uint next = 0; next < worklist.size(); ++next) { + Node* vt = worklist.at(next); + vt->as_ValueType()->make_scalar_in_safepoints(root, gvn); + } } -void ValueTypeBaseNode::make(PhaseGVN* gvn, Node* n, ValueTypeBaseNode* vt, ciValueKlass* base_vk, int base_offset, int base_input, bool in) { +void ValueTypeBaseNode::make(PhaseGVN* gvn, Node*& ctl, Node* mem, Node* n, ValueTypeBaseNode* vt, ciValueKlass* base_vk, int base_offset, int base_input, bool in) { assert(base_offset >= 0, "offset in value type always positive"); for (uint i = 0; i < vt->field_count(); i++) { ciType* field_type = vt->field_type(i); int offset = base_offset + vt->field_offset(i); - if (field_type->is_valuetype()) { + if (field_type->is_valuetype() && vt->field_is_flattened(i)) { ciValueKlass* embedded_vk = field_type->as_value_klass(); ValueTypeNode* embedded_vt = ValueTypeNode::make(*gvn, embedded_vk); - ValueTypeBaseNode::make(gvn, n, embedded_vt, base_vk, offset - vt->value_klass()->first_field_offset(), base_input, in); + ValueTypeBaseNode::make(gvn, ctl, mem, n, embedded_vt, base_vk, offset - vt->value_klass()->first_field_offset(), base_input, in); vt->set_field_value(i, gvn->transform(embedded_vt)); } else { int j = 0; int extra = 0; @@ -231,6 +256,11 @@ parm = gvn->transform(new ProjNode(n->as_Call(), base_input + j + extra)); } } + if (field_type->is_valuetype()) { + // Non-flattened value type field, check for null + parm = ValueTypeNode::make(*gvn, ctl, mem, parm, /* null_check */ true); + + } vt->set_field_value(i, parm); // Record all these guys for later GVN. gvn->record_for_igvn(parm); @@ -238,16 +268,16 @@ } } -void ValueTypeBaseNode::load(PhaseGVN& gvn, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) { +void ValueTypeBaseNode::load(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) { // Initialize the value type by loading its field values from // memory and adding the values as input edges to the node. for (uint i = 0; i < field_count(); ++i) { int offset = holder_offset + field_offset(i); ciType* ftype = field_type(i); Node* value = NULL; - if (ftype->is_valuetype()) { + if (ftype->is_valuetype() && field_is_flattened(i)) { // Recursively load the flattened value type field - value = ValueTypeNode::make(gvn, ftype->as_value_klass(), mem, base, ptr, holder, offset); + value = ValueTypeNode::make(gvn, ftype->as_value_klass(), ctl, mem, base, ptr, holder, offset); } else { const Type* con_type = NULL; if (base->is_Con()) { @@ -261,14 +291,17 @@ } if (con_type != NULL) { // Found a constant field value - value = gvn.makecon(con_type); + value = gvn.transform(gvn.makecon(con_type)); + if (con_type->isa_valuetypeptr()) { + // Constant, non-flattened value type field + value = ValueTypeNode::make(gvn, ctl, mem, value); + } } else { // Load field value from memory const Type* base_type = gvn.type(base); const TypePtr* adr_type = NULL; if (base_type->isa_aryptr()) { - // In the case of a flattened value type array, each field - // has its own slice + // In the case of a flattened value type array, each field has its own slice adr_type = base_type->is_aryptr()->with_field_offset(offset)->add_offset(Type::OffsetBot); } else { ciField* field = holder->get_field_by_offset(offset, false); @@ -276,21 +309,30 @@ } Node* adr = gvn.transform(new AddPNode(base, ptr, gvn.MakeConX(offset))); BasicType bt = type2field[ftype->basic_type()]; - value = LoadNode::make(gvn, NULL, mem, adr, adr_type, Type::get_const_type(ftype), bt, MemNode::unordered); + const Type* ft = Type::get_const_type(ftype); + if (bt == T_VALUETYPE) { + ft = ft->is_valuetypeptr()->cast_to_ptr_type(TypePtr::BotPTR); + } + assert(is_java_primitive(bt) || adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); + value = gvn.transform(LoadNode::make(gvn, NULL, mem, adr, adr_type, ft, bt, MemNode::unordered)); + if (bt == T_VALUETYPE) { + // Non-flattened value type field, check for null + value = ValueTypeNode::make(gvn, ctl, mem, value, /* null_check */ true); + } } } - set_field_value(i, gvn.transform(value)); + set_field_value(i, value); } } -void ValueTypeBaseNode::store_flattened(PhaseGVN* gvn, Node* ctl, MergeMemNode* mem, Node* base, ciValueKlass* holder, int holder_offset) const { +void ValueTypeBaseNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const { // The value type is embedded into the object without an oop header. Subtract the // offset of the first field to account for the missing header when storing the values. holder_offset -= value_klass()->first_field_offset(); - store(gvn, ctl, mem, base, holder, holder_offset); + store(kit, base, ptr, holder, holder_offset); } -void ValueTypeBaseNode::store(PhaseGVN* gvn, Node* ctl, MergeMemNode* mem, Node* base, ciValueKlass* holder, int holder_offset) const { +void ValueTypeBaseNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const { if (holder == NULL) { holder = value_klass(); } @@ -298,36 +340,107 @@ for (uint i = 0; i < field_count(); ++i) { int offset = holder_offset + field_offset(i); Node* value = field_value(i); - if (value->is_ValueType()) { + if (value->is_ValueType() && field_is_flattened(i)) { // Recursively store the flattened value type field - value->isa_ValueTypeBase()->store_flattened(gvn, ctl, mem, base, holder, offset); + value->isa_ValueType()->store_flattened(kit, base, ptr, holder, offset); } else { - const Type* base_type = gvn->type(base); + const Type* base_type = kit->gvn().type(base); const TypePtr* adr_type = NULL; if (base_type->isa_aryptr()) { // In the case of a flattened value type array, each field has its own slice adr_type = base_type->is_aryptr()->with_field_offset(offset)->add_offset(Type::OffsetBot); } else { ciField* field = holder->get_field_by_offset(offset, false); - adr_type = gvn->C->alias_type(field)->adr_type(); + adr_type = kit->C->alias_type(field)->adr_type(); } - Node* adr = gvn->transform(new AddPNode(base, base, gvn->MakeConX(offset))); + Node* adr = kit->basic_plus_adr(base, ptr, offset); BasicType bt = type2field[field_type(i)->basic_type()]; - uint alias_idx = gvn->C->get_alias_index(adr_type); - Node* st = StoreNode::make(*gvn, ctl, mem->memory_at(alias_idx), adr, adr_type, value, bt, MemNode::unordered); - mem->set_memory_at(alias_idx, gvn->transform(st)); + if (is_java_primitive(bt)) { + kit->store_to_memory(kit->control(), adr, value, bt, adr_type, MemNode::unordered); + } else { + const TypeOopPtr* ft = TypeOopPtr::make_from_klass(field_type(i)->as_klass()); + // Field may be NULL + ft = ft->cast_to_ptr_type(TypePtr::BotPTR)->is_oopptr(); + assert(adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); + bool is_array = base_type->isa_aryptr() != NULL; + kit->store_oop(kit->control(), base, adr, adr_type, value, ft, bt, is_array, MemNode::unordered); + } } } } +ValueTypeBaseNode* ValueTypeBaseNode::allocate(GraphKit* kit) { + Node* in_oop = get_oop(); + Node* null_ctl = kit->top(); + // Check if value type is already allocated + Node* not_null_oop = kit->null_check_oop(in_oop, &null_ctl); + if (null_ctl->is_top()) { + // Value type is allocated + return this; + } + // Not able to prove that value type is allocated. + // Emit runtime check that may be folded later. + assert(!is_allocated(&kit->gvn()), "should not be allocated"); + const TypeValueTypePtr* vtptr_type = bottom_type()->isa_valuetypeptr(); + if (vtptr_type == NULL) { + vtptr_type = TypeValueTypePtr::make(bottom_type()->isa_valuetype(), TypePtr::NotNull); + } + RegionNode* region = new RegionNode(3); + PhiNode* oop = new PhiNode(region, vtptr_type); + PhiNode* io = new PhiNode(region, Type::ABIO); + PhiNode* mem = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); + + // Oop is non-NULL, use it + region->init_req(1, kit->control()); + oop ->init_req(1, not_null_oop); + io ->init_req(1, kit->i_o()); + mem ->init_req(1, kit->merged_memory()); + + // Oop is NULL, allocate value type + kit->set_control(null_ctl); + kit->kill_dead_locals(); + ciValueKlass* vk = value_klass(); + Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); + Node* alloc_oop = kit->new_instance(klass_node, NULL, NULL, false, this); + // Write field values to memory + store(kit, alloc_oop, alloc_oop, vk); + region->init_req(2, kit->control()); + oop ->init_req(2, alloc_oop); + io ->init_req(2, kit->i_o()); + mem ->init_req(2, kit->merged_memory()); + + // Update GraphKit + kit->set_control(kit->gvn().transform(region)); + kit->set_i_o(kit->gvn().transform(io)); + kit->set_all_memory(kit->gvn().transform(mem)); + kit->record_for_igvn(region); + kit->record_for_igvn(oop); + kit->record_for_igvn(io); + kit->record_for_igvn(mem); + + // Use cloned ValueTypeNode to propagate oop from now on + Node* res_oop = kit->gvn().transform(oop); + ValueTypeBaseNode* vt = clone()->as_ValueTypeBase(); + vt->set_oop(res_oop); + vt = kit->gvn().transform(vt)->as_ValueTypeBase(); + kit->replace_in_map(this, vt); + return vt; +} + +bool ValueTypeBaseNode::is_allocated(PhaseGVN* phase) const { + Node* oop = get_oop(); + const Type* oop_type = (phase != NULL) ? phase->type(oop) : oop->bottom_type(); + return oop_type->meet(TypePtr::NULL_PTR) != oop_type; +} + // When a call returns multiple values, it has several result // projections, one per field. Replacing the result of the call by a // value type node (after late inlining) requires that for each result // projection, we find the corresponding value type field. -void ValueTypeBaseNode::replace_call_results(Node* call, Compile* C) { +void ValueTypeBaseNode::replace_call_results(GraphKit* kit, Node* call, Compile* C) { ciValueKlass* vk = value_klass(); for (DUIterator_Fast imax, i = call->fast_outs(imax); i < imax; i++) { - ProjNode *pn = call->fast_out(i)->as_Proj(); + ProjNode* pn = call->fast_out(i)->as_Proj(); uint con = pn->_con; if (con >= TypeFunc::Parms+1) { uint field_nb = con - (TypeFunc::Parms+1); @@ -341,7 +454,10 @@ } ciField* f = vk->nonstatic_field_at(field_nb - extra); Node* field = field_value_by_offset(f->offset(), true); - + if (field->is_ValueType()) { + assert(f->is_flattened(), "should be flattened"); + field = field->as_ValueType()->allocate(kit)->get_oop(); + } C->gvn_replace_by(pn, field); C->initial_gvn()->hash_delete(pn); pn->set_req(0, C->top()); @@ -350,79 +466,6 @@ } } -Node* ValueTypeBaseNode::allocate(const Type* type, Node*& ctl, Node*& mem, Node*& io, Node* frameptr, Node*& ex_ctl, Node*& ex_mem, Node*& ex_io, JVMState* jvms, PhaseIterGVN *igvn) { - ciValueKlass* vk = type->is_valuetypeptr()->value_type()->value_klass(); - Node* initial_mem = mem; - uint last = igvn->C->unique(); - MergeMemNode* all_mem = MergeMemNode::make(mem); - jint lhelper = vk->layout_helper(); - assert(lhelper != Klass::_lh_neutral_value, "unsupported"); - - AllocateNode* alloc = new AllocateNode(igvn->C, - AllocateNode::alloc_type(Type::TOP), - ctl, - mem, - io, - igvn->MakeConX(Klass::layout_helper_size_in_bytes(lhelper)), - igvn->makecon(TypeKlassPtr::make(vk)), - igvn->intcon(0), - NULL); - alloc->set_req(TypeFunc::FramePtr, frameptr); - igvn->C->add_safepoint_edges(alloc, jvms); - Node* n = igvn->transform(alloc); - assert(n == alloc, "node shouldn't go away"); - - ctl = igvn->transform(new ProjNode(alloc, TypeFunc::Control)); - mem = igvn->transform(new ProjNode(alloc, TypeFunc::Memory, true)); - all_mem->set_memory_at(Compile::AliasIdxRaw, mem); - - io = igvn->transform(new ProjNode(alloc, TypeFunc::I_O, true)); - Node* catc = igvn->transform(new CatchNode(ctl, io, 2)); - Node* norm = igvn->transform(new CatchProjNode(catc, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci)); - Node* excp = igvn->transform(new CatchProjNode(catc, CatchProjNode::catch_all_index, CatchProjNode::no_handler_bci)); - - ex_ctl = excp; - ex_mem = igvn->transform(all_mem); - ex_io = io; - - ctl = norm; - mem = igvn->transform(new ProjNode(alloc, TypeFunc::Memory)); - io = igvn->transform(new ProjNode(alloc, TypeFunc::I_O, false)); - Node* rawoop = igvn->transform(new ProjNode(alloc, TypeFunc::Parms)); - - MemBarNode* membar = MemBarNode::make(igvn->C, Op_Initialize, Compile::AliasIdxRaw, rawoop); - membar->set_req(TypeFunc::Control, ctl); - - InitializeNode* init = membar->as_Initialize(); - - const TypeOopPtr* oop_type = type->is_oopptr(); - MergeMemNode* minit_in = MergeMemNode::make(mem); - init->set_req(InitializeNode::Memory, minit_in); - n = igvn->transform(membar); - assert(n == membar, "node shouldn't go away"); - ctl = igvn->transform(new ProjNode(membar, TypeFunc::Control)); - mem = igvn->transform(new ProjNode(membar, TypeFunc::Memory)); - - MergeMemNode* out_mem_merge = MergeMemNode::make(initial_mem); - for (int i = 0, len = vk->nof_nonstatic_fields(); i < len; i++) { - ciField* field = vk->nonstatic_field_at(i); - if (field->offset() >= TrackedInitializationLimit * HeapWordSize) - continue; - int fieldidx = igvn->C->alias_type(field)->index(); - minit_in->set_memory_at(fieldidx, initial_mem); - out_mem_merge->set_memory_at(fieldidx, mem); - } - - n = igvn->transform(minit_in); - assert(n == minit_in, "node shouldn't go away"); - out_mem_merge->set_memory_at(Compile::AliasIdxRaw, mem); - - Node* javaoop = igvn->transform(new CheckCastPPNode(ctl, rawoop, oop_type)); - mem = igvn->transform(out_mem_merge); - - return javaoop; -} - ValueTypeNode* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* klass) { // Create a new ValueTypeNode with uninitialized values and NULL oop const TypeValueType* type = TypeValueType::make(klass); @@ -446,38 +489,82 @@ return gvn.transform(vt); } -Node* ValueTypeNode::make(PhaseGVN& gvn, Node* mem, Node* oop) { +Node* ValueTypeNode::make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* oop, bool null_check) { // Create and initialize a ValueTypeNode by loading all field // values from a heap-allocated version and also save the oop. const TypeValueType* type = gvn.type(oop)->is_valuetypeptr()->value_type(); ValueTypeNode* vt = new ValueTypeNode(type, oop); - vt->load(gvn, mem, oop, oop, type->value_klass()); - assert(vt->is_allocated(&gvn), "value type should be allocated"); - assert(oop->is_Con() || oop->is_CheckCastPP() || oop->Opcode() == Op_ValueTypePtr || vt->is_loaded(&gvn, type) == oop, "value type should be loaded"); - return gvn.transform(vt); + + if (null_check && !vt->is_allocated(&gvn)) { + // Add oop null check + Node* chk = gvn.transform(new CmpPNode(oop, gvn.zerocon(T_VALUETYPE))); + Node* tst = gvn.transform(new BoolNode(chk, BoolTest::ne)); + IfNode* iff = gvn.transform(new IfNode(ctl, tst, PROB_MAX, COUNT_UNKNOWN))->as_If(); + Node* not_null = gvn.transform(new IfTrueNode(iff)); + Node* null = gvn.transform(new IfFalseNode(iff)); + Node* region = new RegionNode(3); + + // Load value type from memory if oop is non-null + oop = new CastPPNode(oop, TypePtr::NOTNULL); + oop->set_req(0, not_null); + oop = gvn.transform(oop); + vt->load(gvn, not_null, mem, oop, oop, type->value_klass()); + region->init_req(1, not_null); + + // Use default value type if oop is null + Node* def = make_default(gvn, type->value_klass()); + region->init_req(2, null); + + // Merge the two value types and update control + vt = vt->clone_with_phis(&gvn, region)->as_ValueType(); + vt->merge_with(&gvn, def->as_ValueType(), 2, true); + ctl = gvn.transform(region); + } else { + Node* init_ctl = ctl; + vt->load(gvn, ctl, mem, oop, oop, type->value_klass()); + vt = gvn.transform(vt)->as_ValueType(); + assert(vt->is_allocated(&gvn), "value type should be allocated"); + assert(init_ctl != ctl || oop->is_Con() || oop->is_CheckCastPP() || oop->Opcode() == Op_ValueTypePtr || + vt->is_loaded(&gvn, type) == oop, "value type should be loaded"); + } + return vt; } -Node* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* vk, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) { +Node* ValueTypeNode::make(GraphKit* kit, Node* oop, bool null_check) { + Node* ctl = kit->control(); + Node* vt = make(kit->gvn(), ctl, kit->merged_memory(), oop, null_check); + kit->set_control(ctl); + return vt; +} + +Node* ValueTypeNode::make(PhaseGVN& gvn, ciValueKlass* vk, Node*& ctl, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) { // Create and initialize a ValueTypeNode by loading all field values from // a flattened value type field at 'holder_offset' or from a value type array. ValueTypeNode* vt = make(gvn, vk); // The value type is flattened into the object without an oop header. Subtract the // offset of the first field to account for the missing header when loading the values. holder_offset -= vk->first_field_offset(); - vt->load(gvn, mem, obj, ptr, holder, holder_offset); + vt->load(gvn, ctl, mem, obj, ptr, holder, holder_offset); assert(vt->is_loaded(&gvn, vt->type()->isa_valuetype()) != obj, "holder oop should not be used as flattened value type oop"); return gvn.transform(vt)->as_ValueType(); } -Node* ValueTypeNode::make(PhaseGVN& gvn, Node* n, ciValueKlass* vk, int base_input, bool in) { +Node* ValueTypeNode::make(GraphKit* kit, ciValueKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder, int holder_offset) { + Node* ctl = kit->control(); + Node* vt = make(kit->gvn(), vk, ctl, kit->merged_memory(), obj, ptr, holder, holder_offset); + kit->set_control(ctl); + return vt; +} + +Node* ValueTypeNode::make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* n, ciValueKlass* vk, int base_input, bool in) { ValueTypeNode* vt = ValueTypeNode::make(gvn, vk); - ValueTypeBaseNode::make(&gvn, n, vt, vk, 0, base_input, in); + ValueTypeBaseNode::make(&gvn, ctl, mem, n, vt, vk, 0, base_input, in); return gvn.transform(vt); } Node* ValueTypeNode::is_loaded(PhaseGVN* phase, const TypeValueType* t, Node* base, int holder_offset) { if (field_count() == 0) { - assert(t->value_klass() == phase->C->env()->___Value_klass(), "unexpected value type klass"); + assert(t->value_klass()->is__Value(), "unexpected value type klass"); assert(is_allocated(phase), "must be allocated"); return get_oop(); } @@ -516,102 +603,23 @@ return base; } -void ValueTypeNode::store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const { - // The value type is embedded into the object without an oop header. Subtract the - // offset of the first field to account for the missing header when storing the values. - holder_offset -= value_klass()->first_field_offset(); - store(kit, base, ptr, holder, holder_offset); -} - -void ValueTypeNode::store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset) const { - // Write field values to memory - for (uint i = 0; i < field_count(); ++i) { - int offset = holder_offset + field_offset(i); +Node* ValueTypeNode::allocate_fields(GraphKit* kit) { + ValueTypeNode* vt = clone()->as_ValueType(); + for (uint i = 0; i < field_count(); i++) { Node* value = field_value(i); if (value->is_ValueType()) { - // Recursively store the flattened value type field - value->isa_ValueType()->store_flattened(kit, base, ptr, holder, offset); - } else { - const Type* base_type = kit->gvn().type(base); - const TypePtr* adr_type = NULL; - if (base_type->isa_aryptr()) { - // In the case of a flattened value type array, each field has its own slice - adr_type = base_type->is_aryptr()->with_field_offset(offset)->add_offset(Type::OffsetBot); + if (field_is_flattened(i)) { + value = value->as_ValueType()->allocate_fields(kit); } else { - ciField* field = holder->get_field_by_offset(offset, false); - adr_type = kit->C->alias_type(field)->adr_type(); - } - Node* adr = kit->basic_plus_adr(base, ptr, offset); - BasicType bt = type2field[field_type(i)->basic_type()]; - if (is_java_primitive(bt)) { - kit->store_to_memory(kit->control(), adr, value, bt, adr_type, MemNode::unordered); - } else { - const TypeOopPtr* ft = TypeOopPtr::make_from_klass(field_type(i)->as_klass()); - assert(adr->bottom_type()->is_ptr_to_narrowoop() == UseCompressedOops, "inconsistent"); - bool is_array = base_type->isa_aryptr() != NULL; - kit->store_oop(kit->control(), base, adr, adr_type, value, ft, bt, is_array, MemNode::unordered); + // Non-flattened value type field + value = value->as_ValueType()->allocate(kit); } + vt->set_field_value(i, value); } } -} - -Node* ValueTypeNode::allocate(GraphKit* kit) { - Node* in_oop = get_oop(); - Node* null_ctl = kit->top(); - // Check if value type is already allocated - Node* not_null_oop = kit->null_check_oop(in_oop, &null_ctl); - if (null_ctl->is_top()) { - // Value type is allocated - return not_null_oop; - } - // Not able to prove that value type is allocated. - // Emit runtime check that may be folded later. - assert(!is_allocated(&kit->gvn()), "should not be allocated"); - const TypeValueTypePtr* vtptr_type = TypeValueTypePtr::make(bottom_type()->isa_valuetype(), TypePtr::NotNull); - RegionNode* region = new RegionNode(3); - PhiNode* oop = new PhiNode(region, vtptr_type); - PhiNode* io = new PhiNode(region, Type::ABIO); - PhiNode* mem = new PhiNode(region, Type::MEMORY, TypePtr::BOTTOM); - - // Oop is non-NULL, use it - region->init_req(1, kit->control()); - oop ->init_req(1, not_null_oop); - io ->init_req(1, kit->i_o()); - mem ->init_req(1, kit->merged_memory()); - - // Oop is NULL, allocate value type - kit->set_control(null_ctl); - kit->kill_dead_locals(); - ciValueKlass* vk = value_klass(); - Node* klass_node = kit->makecon(TypeKlassPtr::make(vk)); - Node* alloc_oop = kit->new_instance(klass_node, NULL, NULL, false, this); - // Write field values to memory - store(kit, alloc_oop, alloc_oop, vk); - region->init_req(2, kit->control()); - oop ->init_req(2, alloc_oop); - io ->init_req(2, kit->i_o()); - mem ->init_req(2, kit->merged_memory()); - - // Update GraphKit - kit->set_control(kit->gvn().transform(region)); - kit->set_i_o(kit->gvn().transform(io)); - kit->set_all_memory(kit->gvn().transform(mem)); - kit->record_for_igvn(region); - kit->record_for_igvn(oop); - kit->record_for_igvn(io); - kit->record_for_igvn(mem); - - // Use cloned ValueTypeNode to propagate oop from now on - Node* res_oop = kit->gvn().transform(oop); - ValueTypeNode* vt = clone()->as_ValueType(); - vt->set_oop(res_oop); - kit->replace_in_map(this, kit->gvn().transform(vt)); - return res_oop; -} - -bool ValueTypeNode::is_allocated(PhaseGVN* phase) const { - const Type* oop_type = phase->type(get_oop()); - return oop_type->meet(TypePtr::NULL_PTR) != oop_type; + vt = kit->gvn().transform(vt)->as_ValueType(); + kit->replace_in_map(this, vt); + return vt; } Node* ValueTypeNode::tagged_klass(PhaseGVN& gvn) { @@ -626,7 +634,7 @@ n->init_req(pos, tagged_klass(kit.gvn())); } -uint ValueTypeNode::pass_fields(Node* n, int base_input, const GraphKit& kit, ciValueKlass* base_vk, int base_offset) { +uint ValueTypeNode::pass_fields(Node* n, int base_input, GraphKit& kit, bool assert_allocated, ciValueKlass* base_vk, int base_offset) { ciValueKlass* vk = value_klass(); if (base_vk == NULL) { base_vk = vk; @@ -636,9 +644,9 @@ ciType* f_type = field_type(i); int offset = base_offset + field_offset(i) - (base_offset > 0 ? vk->first_field_offset() : 0); Node* arg = field_value(i); - if (f_type->is_valuetype()) { + if (f_type->is_valuetype() && field_is_flattened(i)) { ciValueKlass* embedded_vk = f_type->as_value_klass(); - edges += arg->as_ValueType()->pass_fields(n, base_input, kit, base_vk, offset); + edges += arg->as_ValueType()->pass_fields(n, base_input, kit, assert_allocated, base_vk, offset); } else { int j = 0; int extra = 0; for (; j < base_vk->nof_nonstatic_fields(); j++) { @@ -652,6 +660,12 @@ extra++; } } + if (arg->is_ValueType()) { + // non-flattened value type field + ValueTypeNode* vt = arg->as_ValueType(); + assert(!assert_allocated || vt->is_allocated(&kit.gvn()), "value type field should be allocated"); + arg = vt->allocate(&kit)->get_oop(); + } n->init_req(base_input + j + extra, arg); edges++; BasicType bt = f_type->basic_type(); @@ -700,9 +714,8 @@ Node_List dead_allocations; // Search for allocations of this value type for (DUIterator_Fast imax, i = fast_outs(imax); i < imax; i++) { - Node* out1 = fast_out(i); - if (out1->is_Allocate() && out1->in(AllocateNode::ValueNode) == this) { - AllocateNode* alloc = out1->as_Allocate(); + AllocateNode* alloc = fast_out(i)->isa_Allocate(); + if (alloc != NULL && alloc->result_cast() != NULL && alloc->in(AllocateNode::ValueNode) == this) { Node* res_dom = NULL; if (is_allocated(igvn)) { // The value type is already allocated but still connected to an AllocateNode. @@ -723,10 +736,8 @@ } } if (res_dom != NULL) { - // Found a dominating allocation - Node* res = alloc->result_cast(); - assert(res != NULL, "value type allocation should not be dead"); // Move users to dominating allocation + Node* res = alloc->result_cast(); igvn->replace_node(res, res_dom); // The dominated allocation is now dead, remove the // value type node connection and adjust the iterator. @@ -760,7 +771,6 @@ } } - #ifndef PRODUCT void ValueTypeNode::dump_spec(outputStream* st) const { @@ -769,20 +779,19 @@ #endif -ValueTypePtrNode* ValueTypePtrNode::make(PhaseGVN* gvn, CheckCastPPNode* cast) { - ciValueKlass* vk = cast->type()->is_valuetypeptr()->value_type()->value_klass(); - ValueTypePtrNode* vt = new ValueTypePtrNode(vk, gvn->C); - assert(cast->in(1)->is_Proj(), "bad graph shape"); - ValueTypeBaseNode::make(gvn, cast->in(1)->in(0), vt, vk, 0, TypeFunc::Parms+1, false); +ValueTypePtrNode* ValueTypePtrNode::make(GraphKit* kit, ciValueKlass* vk, CallNode* call) { + ValueTypePtrNode* vt = new ValueTypePtrNode(vk, kit->zerocon(T_VALUETYPE), kit->C); + Node* ctl = kit->control(); + ValueTypeBaseNode::make(&kit->gvn(), ctl, kit->merged_memory(), call, vt, vk, 0, TypeFunc::Parms+1, false); + kit->set_control(ctl); return vt; } -ValueTypePtrNode* ValueTypePtrNode::make(PhaseGVN& gvn, Node* mem, Node* oop) { +ValueTypePtrNode* ValueTypePtrNode::make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* oop) { // Create and initialize a ValueTypePtrNode by loading all field // values from a heap-allocated version and also save the oop. ciValueKlass* vk = gvn.type(oop)->is_valuetypeptr()->value_type()->value_klass(); - ValueTypePtrNode* vtptr = new ValueTypePtrNode(vk, gvn.C); - vtptr->set_oop(oop); - vtptr->load(gvn, mem, oop, oop, vk); + ValueTypePtrNode* vtptr = new ValueTypePtrNode(vk, oop, gvn.C); + vtptr->load(gvn, ctl, mem, oop, oop, vk); return vtptr; } --- old/src/share/vm/opto/valuetypenode.hpp 2017-09-21 09:18:05.504998219 +0200 +++ new/src/share/vm/opto/valuetypenode.hpp 2017-09-21 09:18:05.404998221 +0200 @@ -47,13 +47,13 @@ virtual const TypeValueTypePtr* value_type_ptr() const = 0; // Get the klass defining the field layout of the value type virtual ciValueKlass* value_klass() const = 0; - int make_scalar_in_safepoint(SafePointNode* sfpt, Node* root, PhaseGVN* gvn); + int make_scalar_in_safepoint(Unique_Node_List& worklist, SafePointNode* sfpt, Node* root, PhaseGVN* gvn); - static void make(PhaseGVN* gvn, Node* n, ValueTypeBaseNode* vt, ciValueKlass* base_vk, int base_offset, int base_input, bool in); + static void make(PhaseGVN* gvn, Node*& ctl, Node* mem, Node* n, ValueTypeBaseNode* vt, ciValueKlass* base_vk, int base_offset, int base_input, bool in); public: // Support for control flow merges - bool has_phi_inputs(Node* region); + bool has_phi_inputs(Node* region); ValueTypeBaseNode* clone_with_phis(PhaseGVN* gvn, Node* region); ValueTypeBaseNode* merge_with(PhaseGVN* gvn, const ValueTypeBaseNode* other, int pnum, bool transform); @@ -68,23 +68,24 @@ void set_field_value(uint index, Node* value); int field_offset(uint index) const; ciType* field_type(uint index) const; + bool field_is_flattened(uint index) const; // Replace ValueTypeNodes in debug info at safepoints with SafePointScalarObjectNodes void make_scalar_in_safepoints(Node* root, PhaseGVN* gvn); - void store_flattened(PhaseGVN* gvn, Node* ctl, MergeMemNode* mem, Node* base, ciValueKlass* holder, int holder_offset) const; - void store(PhaseGVN* gvn, Node* ctl, MergeMemNode* mem, Node* base, ciValueKlass* holder = NULL, int holder_offset = 0) const; + // Store the value type as a flattened (headerless) representation + void store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0) const; + // Store the field values to memory + void store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0) const; // Initialize the value type by loading its field values from memory - void load(PhaseGVN& gvn, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0); + void load(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0); - void replace_call_results(Node* call, Compile* C); + // Allocates the value type (if not yet allocated) and returns the oop + ValueTypeBaseNode* allocate(GraphKit* kit); + bool is_allocated(PhaseGVN* phase) const; - static Node* allocate(const Type* type, - Node*& ctl, Node*& mem, Node*& io, - Node* frameptr, - Node*& ex_ctl, Node*& ex_mem, Node*& ex_io, - JVMState* jvms, PhaseIterGVN *igvn); + void replace_call_results(GraphKit* kit, Node* call, Compile* C); }; //------------------------------ValueTypeNode------------------------------------- @@ -98,12 +99,6 @@ init_req(Oop, oop); } - ValueTypeNode(const TypeValueType* t, Node* oop, int field_count) - : ValueTypeBaseNode(t, Values + field_count) { - init_class_id(Class_ValueType); - init_req(Oop, oop); - } - // Checks if the value type is loaded from memory and if so returns the oop Node* is_loaded(PhaseGVN* phase, const TypeValueType* t, Node* base = NULL, int holder_offset = 0); @@ -117,24 +112,20 @@ // Create a new ValueTypeNode with default values static Node* make_default(PhaseGVN& gvn, ciValueKlass* vk); // Create a new ValueTypeNode and load its values from an oop - static Node* make(PhaseGVN& gvn, Node* mem, Node* oop); + static Node* make(GraphKit* kit, Node* oop, bool null_check = false); + static Node* make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* oop, bool null_check = false); // Create a new ValueTypeNode and load its values from a flattened value type field or array - static Node* make(PhaseGVN& gvn, ciValueKlass* vk, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0); + static Node* make(GraphKit* kit, ciValueKlass* vk, Node* obj, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0); + static Node* make(PhaseGVN& gvn, ciValueKlass* vk, Node*& ctl, Node* mem, Node* obj, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0); // Create value type node from arguments at method entry and calls - static Node* make(PhaseGVN& gvn, Node* n, ciValueKlass* vk, int base_input, bool in); - - // Store the value type as a flattened (headerless) representation - void store_flattened(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder = NULL, int holder_offset = 0) const; - // Store the field values to memory - void store(GraphKit* kit, Node* base, Node* ptr, ciInstanceKlass* holder, int holder_offset = 0) const; + static Node* make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* n, ciValueKlass* vk, int base_input, bool in); - // Allocates the value type (if not yet allocated) and returns the oop - Node* allocate(GraphKit* kit); - bool is_allocated(PhaseGVN* phase) const; + // Allocate all non-flattened value type fields + Node* allocate_fields(GraphKit* kit); Node* tagged_klass(PhaseGVN& gvn); void pass_klass(Node* n, uint pos, const GraphKit& kit); - uint pass_fields(Node* call, int base_input, const GraphKit& kit, ciValueKlass* base_vk = NULL, int base_offset = 0); + uint pass_fields(Node* call, int base_input, GraphKit& kit, bool assert_allocated = false, ciValueKlass* base_vk = NULL, int base_offset = 0); // Allocation optimizations void remove_redundant_allocations(PhaseIterGVN* igvn, PhaseIdealLoop* phase); @@ -154,9 +145,10 @@ ciValueKlass* value_klass() const { return type()->is_valuetypeptr()->value_type()->value_klass(); } const TypeValueTypePtr* value_type_ptr() const { return bottom_type()->isa_valuetypeptr(); } - ValueTypePtrNode(ciValueKlass* vk, Compile* C) + ValueTypePtrNode(ciValueKlass* vk, Node* oop, Compile* C) : ValueTypeBaseNode(TypeValueTypePtr::make(TypePtr::NotNull, vk), Values + vk->nof_declared_nonstatic_fields()) { init_class_id(Class_ValueTypePtr); + init_req(Oop, oop); C->add_value_type_ptr(this); } public: @@ -171,8 +163,8 @@ C->add_value_type_ptr(this); } - static ValueTypePtrNode* make(PhaseGVN* gvn, CheckCastPPNode* cast); - static ValueTypePtrNode* make(PhaseGVN& gvn, Node* mem, Node* oop); + static ValueTypePtrNode* make(GraphKit* kit, ciValueKlass* vk, CallNode* call); + static ValueTypePtrNode* make(PhaseGVN& gvn, Node*& ctl, Node* mem, Node* oop); virtual int Opcode() const; }; --- old/src/share/vm/runtime/deoptimization.cpp 2017-09-21 09:18:05.840998215 +0200 +++ new/src/share/vm/runtime/deoptimization.cpp 2017-09-21 09:18:05.748998216 +0200 @@ -1027,12 +1027,18 @@ field._offset = fs.offset(); field._type = FieldType::basic_type(fs.signature()); if (field._type == T_VALUETYPE) { - // Resolve klass of flattened value type field - SignatureStream ss(fs.signature(), false); - Klass* vk = ss.as_klass(Handle(THREAD, klass->class_loader()), Handle(THREAD, klass->protection_domain()), SignatureStream::NCDFError, THREAD); - guarantee(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); - assert(vk->is_value(), "must be a ValueKlass"); - field._klass = InstanceKlass::cast(vk); + if (fs.is_flatten()) { + // Resolve klass of flattened value type field + SignatureStream ss(fs.signature(), false); + Klass* vk = ss.as_klass(Handle(THREAD, klass->class_loader()), Handle(THREAD, klass->protection_domain()), SignatureStream::NCDFError, THREAD); + guarantee(!HAS_PENDING_EXCEPTION, "Should not have any exceptions pending"); + assert(vk->is_value(), "must be a ValueKlass"); + field._klass = InstanceKlass::cast(vk); + } else { + // Non-flattened value type field + // TODO change this when we use T_VALUETYPEPTR + field._type = T_OBJECT; + } } fields->append(field); } --- old/src/share/vm/runtime/fieldDescriptor.cpp 2017-09-21 09:18:06.152998210 +0200 +++ new/src/share/vm/runtime/fieldDescriptor.cpp 2017-09-21 09:18:06.076998211 +0200 @@ -199,7 +199,7 @@ obj->obj_field(offset())->print_value_on(st); break; case T_VALUETYPE: - { + if (is_flatten()) { // Resolve klass of flattened value type field Thread* THREAD = Thread::current(); ResourceMark rm(THREAD); @@ -216,8 +216,12 @@ FieldPrinter print_field(st, obj); vk->do_nonstatic_fields(&print_field); return; // Do not print underlying representation - break; + } else { + st->print(" "); + NOT_LP64(as_int = obj->int_field(offset())); + obj->obj_field(offset())->print_value_on(st); } + break; default: ShouldNotReachHere(); break; --- old/src/share/vm/runtime/globals.hpp 2017-09-21 09:18:06.408998207 +0200 +++ new/src/share/vm/runtime/globals.hpp 2017-09-21 09:18:06.324998208 +0200 @@ -1310,6 +1310,9 @@ product(intx, ValueArrayElemMaxFlatSize, -1, \ "Max size for flattening value array elements, <0 no limit") \ \ + product(intx, ValueFieldMaxFlatSize, -1, \ + "Max size for flattening value type fields, <0 no limit") \ + \ product(intx, ValueArrayElemMaxFlatOops, 4, \ "Max nof embedded object references in a value type to flatten, <0 no limit") \ \ --- old/src/share/vm/runtime/sharedRuntime.cpp 2017-09-21 09:18:06.676998203 +0200 +++ new/src/share/vm/runtime/sharedRuntime.cpp 2017-09-21 09:18:06.600998204 +0200 @@ -2958,14 +2958,17 @@ SignatureStream ss(method->signature()); for (; !ss.at_return_type(); ss.next()) { BasicType bt = ss.type(); - if (bt == T_VALUETYPE) { + if (bt == T_VALUETYPE) { #ifdef ASSERT Thread* THREAD = Thread::current(); - Handle class_loader(THREAD, method->method_holder()->class_loader()); - Handle protection_domain(THREAD, method->method_holder()->protection_domain()); - Klass* k = ss.as_klass(class_loader, protection_domain, SignatureStream::ReturnNull, THREAD); - assert(k != NULL && !HAS_PENDING_EXCEPTION, "can't resolve klass"); - assert(k == SystemDictionary::___Value_klass(), "other values not supported"); + // Avoid class loading from compiler thread + if (THREAD->can_call_java()) { + Handle class_loader(THREAD, method->method_holder()->class_loader()); + Handle protection_domain(THREAD, method->method_holder()->protection_domain()); + Klass* k = ss.as_klass(class_loader, protection_domain, SignatureStream::ReturnNull, THREAD); + assert(k != NULL && !HAS_PENDING_EXCEPTION, "can't resolve klass"); + assert(k == SystemDictionary::___Value_klass(), "other values not supported"); + } #endif bt = T_VALUETYPEPTR; } --- old/src/share/vm/runtime/signature.cpp 2017-09-21 09:18:06.940998199 +0200 +++ new/src/share/vm/runtime/signature.cpp 2017-09-21 09:18:06.864998200 +0200 @@ -553,6 +553,7 @@ for (int i = 0; i < sig_extended.length(); i++) { if (!skip_vt) { BasicType bt = sig_extended.at(i)._bt; + // TODO change this when we use T_VALUETYPEPTR if (bt == T_VALUETYPE) { bt = T_VALUETYPEPTR; } --- old/src/share/vm/utilities/globalDefinitions.cpp 2017-09-21 09:18:07.184998196 +0200 +++ new/src/share/vm/utilities/globalDefinitions.cpp 2017-09-21 09:18:07.108998197 +0200 @@ -263,13 +263,13 @@ T_LONG, // T_LONG = 11, T_OBJECT, // T_OBJECT = 12, T_OBJECT, // T_ARRAY = 13, - T_VALUETYPE, // T_VALUETYPE =14 + T_VALUETYPE, // T_VALUETYPE = 14 T_VOID, // T_VOID = 15, T_ADDRESS, // T_ADDRESS = 16, T_NARROWOOP, // T_NARROWOOP = 17, T_METADATA, // T_METADATA = 18, T_NARROWKLASS, // T_NARROWKLASS = 19, - T_VALUETYPEPTR,// T_VALUETYPEPTR =20, + T_VALUETYPEPTR,// T_VALUETYPEPTR = 20, T_CONFLICT // T_CONFLICT = 21, }; @@ -295,7 +295,7 @@ T_NARROWOOP_aelem_bytes, // T_NARROWOOP= 17, T_OBJECT_aelem_bytes, // T_METADATA = 18, T_NARROWKLASS_aelem_bytes, // T_NARROWKLASS= 19, - T_VALUETYPEPTR_aelem_bytes,// T_VALUETYPE = 20 + T_VALUETYPEPTR_aelem_bytes,// T_VALUETYPEPTR = 20 0 // T_CONFLICT = 21, }; --- old/src/share/vm/utilities/globalDefinitions.hpp 2017-09-21 09:18:07.440998193 +0200 +++ new/src/share/vm/utilities/globalDefinitions.hpp 2017-09-21 09:18:07.364998194 +0200 @@ -621,7 +621,7 @@ case 'V': return T_VOID; case 'L': return T_OBJECT; case '[': return T_ARRAY; - case 'Q':return T_VALUETYPE; + case 'Q': return T_VALUETYPE; } return T_ILLEGAL; } @@ -686,9 +686,9 @@ T_NARROWKLASS_aelem_bytes = 4, T_VOID_aelem_bytes = 0, #ifdef _LP64 - T_VALUETYPEPTR_aelem_bytes= 4 -#else T_VALUETYPEPTR_aelem_bytes= 8 +#else + T_VALUETYPEPTR_aelem_bytes= 4 #endif }; --- old/test/compiler/valhalla/valuetypes/ValueTypeTestBench.java 2017-09-21 09:18:07.696998189 +0200 +++ new/test/compiler/valhalla/valuetypes/ValueTypeTestBench.java 2017-09-21 09:18:07.620998190 +0200 @@ -35,24 +35,39 @@ * @run main ClassFileInstaller sun.hotspot.WhiteBox * @run main ClassFileInstaller jdk.test.lib.Platform * @run main/othervm/timeout=120 -Xbootclasspath/a:. -ea -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions - * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:+VerifyAdapterSharing -XX:+VerifyStack + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:+AlwaysIncrementalInline * -XX:+EnableValhalla -XX:+EnableMVT -XX:+ValueTypePassFieldsAsArgs -XX:+ValueTypeReturnedAsFields -XX:+ValueArrayFlatten - * -XX:ValueArrayElemMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatOops=-1 - * -Djdk.lang.reflect.DVT=true - * compiler.valhalla.valuetypes.ValueTypeTestBench + * -XX:ValueFieldMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatOops=-1 + * -XX:ValueTypesBufferMaxMemory=0 + * -Djdk.lang.reflect.DVT=true compiler.valhalla.valuetypes.ValueTypeTestBench * @run main/othervm/timeout=120 -Xbootclasspath/a:. -ea -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions - * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:+VerifyStack - * -XX:+EnableValhalla -XX:+EnableMVT -XX:-ValueTypePassFieldsAsArgs -XX:-ValueTypeReturnedAsFields -XX:-ValueArrayFlatten - * -Djdk.lang.reflect.DVT=true - * compiler.valhalla.valuetypes.ValueTypeTestBench + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:-UseCompressedOops + * -XX:+EnableValhalla -XX:+EnableMVT -XX:-ValueTypePassFieldsAsArgs -XX:-ValueTypeReturnedAsFields -XX:+ValueArrayFlatten + * -XX:ValueFieldMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatOops=-1 + * -XX:ValueTypesBufferMaxMemory=0 + * -Djdk.lang.reflect.DVT=true compiler.valhalla.valuetypes.ValueTypeTestBench + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -ea -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:-UseCompressedOops + * -XX:+EnableValhalla -XX:+EnableMVT -XX:+ValueTypePassFieldsAsArgs -XX:+ValueTypeReturnedAsFields -XX:-ValueArrayFlatten + * -XX:ValueFieldMaxFlatSize=0 -XX:ValueArrayElemMaxFlatSize=0 -XX:ValueArrayElemMaxFlatOops=0 + * -XX:ValueTypesBufferMaxMemory=0 + * -Djdk.lang.reflect.DVT=true -DVerifyIR=false compiler.valhalla.valuetypes.ValueTypeTestBench * @run main/othervm/timeout=120 -Xbootclasspath/a:. -ea -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation -XX:+AlwaysIncrementalInline - * -XX:+EnableValhalla -XX:+EnableMVT -XX:+ValueTypePassFieldsAsArgs -XX:+ValueTypeReturnedAsFields -XX:+ValueArrayFlatten - * -XX:ValueArrayElemMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatOops=-1 - * -Djdk.lang.reflect.DVT=true - * compiler.valhalla.valuetypes.ValueTypeTestBench + * -XX:+EnableValhalla -XX:+EnableMVT -XX:-ValueTypePassFieldsAsArgs -XX:-ValueTypeReturnedAsFields -XX:-ValueArrayFlatten + * -XX:ValueFieldMaxFlatSize=0 -XX:ValueArrayElemMaxFlatSize=0 -XX:ValueArrayElemMaxFlatOops=0 + * -XX:ValueTypesBufferMaxMemory=0 + * -Djdk.lang.reflect.DVT=true -DVerifyIR=false compiler.valhalla.valuetypes.ValueTypeTestBench + * @run main/othervm/timeout=120 -Xbootclasspath/a:. -ea -XX:+IgnoreUnrecognizedVMOptions -XX:+UnlockDiagnosticVMOptions + * -XX:+UnlockExperimentalVMOptions -XX:+WhiteBoxAPI -XX:-TieredCompilation + * -XX:+EnableValhalla -XX:+EnableMVT -XX:+ValueTypePassFieldsAsArgs -XX:-ValueTypeReturnedAsFields -XX:+ValueArrayFlatten + * -XX:ValueFieldMaxFlatSize=0 -XX:ValueArrayElemMaxFlatSize=-1 -XX:ValueArrayElemMaxFlatOops=-1 + * -XX:ValueTypesBufferMaxMemory=0 + * -Djdk.lang.reflect.DVT=true -DVerifyIR=false compiler.valhalla.valuetypes.ValueTypeTestBench */ +// TODO remove -XX:ValueTypesBufferMaxMemory=0 when interpreter buffering is fixed + package compiler.valhalla.valuetypes; import compiler.whitebox.CompilerWhiteBoxTest; @@ -81,6 +96,7 @@ import java.util.Map; import java.util.regex.Matcher; import java.util.regex.Pattern; +import java.util.TreeMap; // Test value types __ByValue final class MyValue1 { @@ -132,8 +148,8 @@ v = setO(v, new Integer(x)); int[] oa = {x}; v = setOA(v, oa); - v = setV1(v, MyValue2.createWithFieldsInline(x, x < y)); - v = setV2(v, MyValue2.createWithFieldsInline(x, x > y)); + v = setV1(v, MyValue2.createWithFieldsInline(x, true)); + v = setV2(v, MyValue2.createWithFieldsInline(x, false)); v = setC(v, ValueTypeTestBench.rI); return v; } @@ -156,7 +172,7 @@ @ForceInline public void print() { - System.out.print("s=" + s + ", sf=" + sf + ", x=" + x + ", y=" + y + ", z=" + z + ", o=" + (o != null ? (Integer)o : "NULL") + ", v1["); + System.out.print("s=" + s + ", sf=" + sf + ", x=" + x + ", y=" + y + ", z=" + z + ", o=" + (o != null ? (Integer)o : "NULL") + ", oa=" + oa[0] + ", v1["); v1.print(); System.out.print("], v2["); v2.print(); @@ -214,17 +230,50 @@ } } +__ByValue final class MyValue2Inline { + final boolean b; + final long c; + + private MyValue2Inline() { + this.b = false; + this.c = 0; + } + + @ForceInline + __ValueFactory static MyValue2Inline setB(MyValue2Inline v, boolean b) { + v.b = b; + return v; + } + + @ForceInline + __ValueFactory static MyValue2Inline setC(MyValue2Inline v, long c) { + v.c = c; + return v; + } + + @ForceInline + __ValueFactory public static MyValue2Inline createDefault() { + return __MakeDefault MyValue2Inline(); + } + + @ForceInline + public static MyValue2Inline createWithFieldsInline(boolean b, long c) { + MyValue2Inline v = MyValue2Inline.createDefault(); + v = MyValue2Inline.setB(v, b); + v = MyValue2Inline.setC(v, c); + return v; + } +} + __ByValue final class MyValue2 { final int x; final byte y; - final boolean b; - final long c; + final MyValue2Inline v1; private MyValue2() { this.x = 0; this.y = 0; - this.b = false; - this.c = 0; + this.v1 = MyValue2Inline.createDefault(); } @ForceInline @@ -237,24 +286,23 @@ MyValue2 v = createDefaultInline(); v = setX(v, x); v = setY(v, (byte)x); - v = setB(v, b); - v = setC(v, ValueTypeTestBench.rL); + v = setV1(v, MyValue2Inline.createWithFieldsInline(b, ValueTypeTestBench.rL)); return v; } @ForceInline public long hash() { - return x + y + (b ? 0 : 1) + c; + return x + y + (v1.b ? 0 : 1) + v1.c; } @DontInline public long hashInterpreted() { - return x + y + (b ? 0 : 1) + c; + return x + y + (v1.b ? 0 : 1) + v1.c; } @ForceInline public void print() { - System.out.print("x=" + x + "y=" + y + ", b=" + b + ", c=" + c); + System.out.print("x=" + x + ", y=" + y + ", b=" + v1.b + ", c=" + v1.c); } @ForceInline @@ -270,14 +318,43 @@ } @ForceInline - __ValueFactory static MyValue2 setC(MyValue2 v, long c) { - v.c = c; + __ValueFactory static MyValue2 setV1(MyValue2 v, MyValue2Inline v1) { + v.v1 = v1; return v; } +} + +__ByValue final class MyValue3Inline { + final float f7; + final double f8; + + private MyValue3Inline() { + this.f7 = 0; + this.f8 = 0; + } @ForceInline - __ValueFactory static MyValue2 setB(MyValue2 v, boolean b) { - v.b = b; + __ValueFactory static MyValue3Inline setF7(MyValue3Inline v, float f7) { + v.f7 = f7; + return v; + } + + @ForceInline + __ValueFactory static MyValue3Inline setF8(MyValue3Inline v, double f8) { + v.f8 = f8; + return v; + } + + @ForceInline + __ValueFactory public static MyValue3Inline createDefault() { + return __MakeDefault MyValue3Inline(); + } + + @ForceInline + public static MyValue3Inline createWithFieldsInline(float f7, double f8) { + MyValue3Inline v = createDefault(); + v = setF7(v, f7); + v = setF8(v, f8); return v; } } @@ -297,38 +374,7 @@ final double f4; final float f5; final double f6; - final float f7; - final double f8; - - private MyValue3(char c, - byte bb, - short s, - int i, - long l, - Object o, - float f1, - double f2, - float f3, - double f4, - float f5, - double f6, - float f7, - double f8) { - this.c = c; - this.bb = bb; - this.s = s; - this.i = i; - this.l = l; - this.o = o; - this.f1 = f1; - this.f2 = f2; - this.f3 = f3; - this.f4 = f4; - this.f5 = f5; - this.f6 = f6; - this.f7 = f7; - this.f8 = f8; - } + final MyValue3Inline v1; private MyValue3() { this.c = 0; @@ -343,8 +389,7 @@ this.f4 = 0; this.f5 = 0; this.f6 = 0; - this.f7 = 0; - this.f8 = 0; + this.v1 = MyValue3Inline.createDefault(); } @ForceInline @@ -420,14 +465,8 @@ } @ForceInline - __ValueFactory static MyValue3 setF7(MyValue3 v, float f7) { - v.f7 = f7; - return v; - } - - @ForceInline - __ValueFactory static MyValue3 setF8(MyValue3 v, double f8) { - v.f8 = f8; + __ValueFactory static MyValue3 setV1(MyValue3 v, MyValue3Inline v1) { + v.v1 = v1; return v; } @@ -452,8 +491,7 @@ v = setF4(v, r.nextDouble()); v = setF5(v, r.nextFloat()); v = setF6(v, r.nextDouble()); - v = setF7(v, r.nextFloat()); - v = setF8(v, r.nextDouble()); + v = setV1(v, MyValue3Inline.createWithFieldsInline(r.nextFloat(), r.nextDouble())); return v; } @@ -477,8 +515,7 @@ v = setF4(v, other.f4); v = setF5(v, other.f5); v = setF6(v, other.f6); - v = setF7(v, other.f7); - v = setF8(v, other.f8); + v = setV1(v, other.v1); return v; } @@ -496,8 +533,8 @@ Asserts.assertEQ(f4, other.f4); Asserts.assertEQ(f5, other.f5); Asserts.assertEQ(f6, other.f6); - Asserts.assertEQ(f7, other.f7); - Asserts.assertEQ(f8, other.f8); + Asserts.assertEQ(v1.f7, other.v1.f7); + Asserts.assertEQ(v1.f8, other.v1.f8); } } @@ -506,11 +543,6 @@ final MyValue3 v1; final MyValue3 v2; - private MyValue4(MyValue3 v1, MyValue3 v2) { - this.v1 = v1; - this.v2 = v2; - } - private MyValue4() { this.v1 = MyValue3.createDefault(); this.v2 = MyValue3.createDefault(); @@ -1006,6 +1038,7 @@ // Test OSR compilation @Test() + @Slow public long test23() { MyValue1 v = MyValue1.createWithFieldsInline(rI, rL); MyValue1[] va = new MyValue1[Math.abs(rI) % 3]; @@ -1679,6 +1712,7 @@ // Test loop peeling @Test(failOn = ALLOC + LOAD + STORE) + @Slow public void test57() { MyValue1 v = MyValue1.createWithFieldsInline(0, 1); // Trigger OSR compilation and loop peeling @@ -1698,6 +1732,7 @@ // Test loop peeling and unrolling @Test() + @Slow public void test58() { MyValue1 v1 = MyValue1.createWithFieldsInline(0, 0); MyValue1 v2 = MyValue1.createWithFieldsInline(1, 1); @@ -2574,6 +2609,7 @@ // Test correct handling of __Value merges through PhiNodes @Test() + @Slow public long test93() throws Throwable { // Create a new value type final MethodHandle dvt = MethodHandleBuilder.loadCode(MethodHandles.lookup(), "createValueType", @@ -3040,6 +3076,7 @@ } @Test() + @Slow public __Value test106() throws Throwable { __Value vt = test106_init(); for (int i = 0; i < 50_000; i++) { @@ -3057,6 +3094,35 @@ // ========== Test infrastructure ========== + // User defined settings + private static final boolean SKIP_SLOW = Boolean.parseBoolean(System.getProperty("SkipSlow", "false")); + private static final boolean PRINT_TIMES = Boolean.parseBoolean(System.getProperty("PrintTimes", "false")); + private static final boolean VERIFY_IR = Boolean.parseBoolean(System.getProperty("VerifyIR", "true")); + private static final boolean VERIFY_VM = Boolean.parseBoolean(System.getProperty("VerifyVM", "false")); + private static final String TESTLIST = System.getProperty("Testlist", ""); + private static final int WARMUP = Integer.parseInt(System.getProperty("Warmup", "251")); + + // Pre defined settings + private static final List defaultFlags = Arrays.asList( + "-XX:-BackgroundCompilation", "-XX:CICompilerCount=1", + "-XX:+PrintCompilation", "-XX:+PrintInlining", "-XX:+PrintIdeal", "-XX:+PrintOptoAssembly", + "-XX:CompileCommand=quiet", + "-XX:CompileCommand=compileonly,java.lang.invoke.*::*", + "-XX:CompileCommand=compileonly,java.lang.Long::sum", + "-XX:CompileCommand=compileonly,java.lang.Object::", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue1::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue2::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue2Inline::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue3::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue3Inline::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue4::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.ValueCapableClass2_*::*", + "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.ValueTypeTestBench::*", + "-XX:CompileCommand=inline,java.lang.__Value::hashCode"); + private static final List verifyFlags = Arrays.asList( + "-XX:+VerifyOops", "-XX:+VerifyStack", "-XX:+VerifyLastFrame", "-XX:+VerifyBeforeGC", "-XX:+VerifyAfterGC", + "-XX:+VerifyDuringGC", "-XX:+VerifyAdapterSharing", "-XX:+StressValueTypeReturnedAsFields"); + private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox(); private static final int ValueTypePassFieldsAsArgsOn = 0x1; private static final int ValueTypePassFieldsAsArgsOff = 0x2; @@ -3071,10 +3137,9 @@ private static final int COMP_LEVEL_ANY = -2; private static final int COMP_LEVEL_FULL_OPTIMIZATION = 4; private static final Hashtable tests = new Hashtable(); - private static final int WARMUP = 251; - private static boolean USE_COMPILER = WHITE_BOX.getBooleanVMFlag("UseCompiler"); - private static boolean PRINT_IDEAL = WHITE_BOX.getBooleanVMFlag("PrintIdeal"); - private static boolean XCOMP = Platform.isComp(); + private static final boolean USE_COMPILER = WHITE_BOX.getBooleanVMFlag("UseCompiler"); + private static final boolean PRINT_IDEAL = WHITE_BOX.getBooleanVMFlag("PrintIdeal"); + private static final boolean XCOMP = Platform.isComp(); // Regular expressions used to match nodes in the PrintIdeal output private static final String START = "(\\d+\\t(.*"; @@ -3098,54 +3163,52 @@ private static final String SCOBJ = "(.*# ScObj.*" + END; static { + List list = null; + if (!TESTLIST.isEmpty()) { + list = Arrays.asList(TESTLIST.split(",")); + } // Gather all test methods and put them in Hashtable for (Method m : ValueTypeTestBench.class.getDeclaredMethods()) { Test[] annos = m.getAnnotationsByType(Test.class); - if (annos.length != 0) { + if (annos.length != 0 && + (list == null || list.contains(m.getName())) && + !(SKIP_SLOW && m.isAnnotationPresent(Slow.class))) { tests.put("ValueTypeTestBench::" + m.getName(), m); } } } - private static void execute_vm(String... args) throws Throwable { + private static void execute_vm() throws Throwable { Asserts.assertFalse(tests.isEmpty(), "no tests to execute"); - ArrayList all_args = new ArrayList(List.of(args)); + ArrayList args = new ArrayList(defaultFlags); + if (VERIFY_VM) { + args.addAll(verifyFlags); + } // Run tests in own process and verify output - all_args.add(ValueTypeTestBench.class.getName()); - all_args.add("run"); + args.add(ValueTypeTestBench.class.getName()); + args.add("run"); // Spawn process with default JVM options from the test's run command String[] vmInputArgs = InputArguments.getVmInputArgs(); - String[] cmds = Arrays.copyOf(vmInputArgs, vmInputArgs.length + all_args.size()); - System.arraycopy(all_args.toArray(), 0, cmds, vmInputArgs.length, all_args.size()); + String[] cmds = Arrays.copyOf(vmInputArgs, vmInputArgs.length + args.size()); + System.arraycopy(args.toArray(), 0, cmds, vmInputArgs.length, args.size()); OutputAnalyzer oa = ProcessTools.executeTestJvm(cmds); // If ideal graph printing is enabled/supported, verify output String output = oa.getOutput(); oa.shouldHaveExitValue(0); - boolean verifyIR = output.contains("PrintIdeal enabled") && + boolean verifyIR = VERIFY_IR && output.contains("PrintIdeal enabled") && !output.contains("ValueTypePassFieldsAsArgs is not supported on this platform"); if (verifyIR) { parseOutput(output); } else { + System.out.println(output); System.out.println("WARNING: IR verification disabled! Running with -Xint, -Xcomp or release build?"); } } public static void main(String[] args) throws Throwable { - //tests.values().removeIf(p -> !p.getName().equals("test106")); // Run single test if (args.length == 0) { - execute_vm("-XX:+IgnoreUnrecognizedVMOptions", "-XX:-BackgroundCompilation", - "-XX:+PrintCompilation", "-XX:+PrintInlining", "-XX:+PrintIdeal", "-XX:+PrintOptoAssembly", - "-XX:CICompilerCount=1", - "-XX:CompileCommand=quiet", "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.ValueTypeTestBench::*", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue1::*", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue2::*", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue3::*", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.MyValue4::*", - "-XX:CompileCommand=compileonly,java.lang.Object::", - "-XX:CompileCommand=inline,java.lang.__Value::hashCode", - "-XX:CompileCommand=compileonly,java.lang.invoke.*::*", - "-XX:CompileCommand=compileonly,compiler.valhalla.valuetypes.ValueCapableClass2_*::*", - "-XX:CompileCommand=compileonly,java.lang.Long::sum"); + // Spawn a new VM instance + execute_vm(); } else { // Execute tests ValueTypeTestBench bench = new ValueTypeTestBench(); @@ -3153,7 +3216,7 @@ } } - public static void parseOutput(String output) throws Exception { + private static void parseOutput(String output) throws Exception { Pattern comp_re = Pattern.compile("\\n\\s+\\d+\\s+\\d+\\s+(%| )(s| )(!| )b(n| )\\s+\\S+\\.(?[^.]+::\\S+)\\s+(?@ \\d+\\s+)?[(]\\d+ bytes[)]\\n"); Matcher m = comp_re.matcher(output); Map compilations = new LinkedHashMap<>(); @@ -3253,8 +3316,8 @@ public void setup(Method[] methods) { if (XCOMP) { - // Don't control compilation if -Xcomp is enabled - return; + // Don't control compilation if -Xcomp is enabled + return; } for (Method m : methods) { if (m.isAnnotationPresent(Test.class)) { @@ -3282,18 +3345,24 @@ setup(this.getClass().getDeclaredMethods()); setup(MyValue1.class.getDeclaredMethods()); setup(MyValue2.class.getDeclaredMethods()); + setup(MyValue2Inline.class.getDeclaredMethods()); setup(MyValue3.class.getDeclaredMethods()); + setup(MyValue3Inline.class.getDeclaredMethods()); setup(MyValue4.class.getDeclaredMethods()); // Compile class initializers WHITE_BOX.enqueueInitializerForCompilation(this.getClass(), COMP_LEVEL_FULL_OPTIMIZATION); WHITE_BOX.enqueueInitializerForCompilation(MyValue1.class, COMP_LEVEL_FULL_OPTIMIZATION); WHITE_BOX.enqueueInitializerForCompilation(MyValue2.class, COMP_LEVEL_FULL_OPTIMIZATION); + WHITE_BOX.enqueueInitializerForCompilation(MyValue2Inline.class, COMP_LEVEL_FULL_OPTIMIZATION); WHITE_BOX.enqueueInitializerForCompilation(MyValue3.class, COMP_LEVEL_FULL_OPTIMIZATION); + WHITE_BOX.enqueueInitializerForCompilation(MyValue3Inline.class, COMP_LEVEL_FULL_OPTIMIZATION); WHITE_BOX.enqueueInitializerForCompilation(MyValue4.class, COMP_LEVEL_FULL_OPTIMIZATION); // Execute tests + TreeMap durations = PRINT_TIMES ? new TreeMap() : null; for (Method test : tests.values()) { + long startTime = System.nanoTime(); Method verifier = getClass().getDeclaredMethod(test.getName() + "_verifier", boolean.class); // Warmup using verifier method Warmup anno = test.getAnnotation(Warmup.class); @@ -3306,6 +3375,19 @@ Asserts.assertTrue(!USE_COMPILER || WHITE_BOX.isMethodCompiled(test, false), test + " not compiled"); // Check result verifier.invoke(this, false); + if (PRINT_TIMES) { + long endTime = System.nanoTime(); + long duration = (endTime - startTime); + durations.put(duration, test.getName()); + } + } + + // Print execution times + if (PRINT_TIMES) { + System.out.println("\n\nTest execution times:"); + for (Map.Entry entry : durations.entrySet()) { + System.out.format("%-10s%15d ns\n", entry.getValue() + ":", entry.getKey()); + } } } } @@ -3340,7 +3422,12 @@ @Retention(RetentionPolicy.RUNTIME) @interface DontCompile { } +// Number of warmup iterations @Retention(RetentionPolicy.RUNTIME) @interface Warmup { int value(); } + +// Mark test as slow +@Retention(RetentionPolicy.RUNTIME) +@interface Slow { }