--- old/src/hotspot/share/classfile/classFileParser.cpp 2019-03-11 14:25:35.262355582 +0100 +++ new/src/hotspot/share/classfile/classFileParser.cpp 2019-03-11 14:25:35.058355585 +0100 @@ -55,6 +55,7 @@ #include "oops/method.hpp" #include "oops/oop.inline.hpp" #include "oops/symbol.hpp" +#include "oops/valueKlass.hpp" #include "prims/jvmtiExport.hpp" #include "prims/jvmtiThreadState.hpp" #include "runtime/arguments.hpp" @@ -122,6 +123,8 @@ #define JAVA_13_VERSION 57 +#define CONSTANT_CLASS_DESCRIPTORS 57 + void ClassFileParser::set_class_bad_constant_seen(short bad_constant) { assert((bad_constant == 19 || bad_constant == 20) && _major_version >= JAVA_9_VERSION, "Unexpected bad constant pool entry"); @@ -160,7 +163,7 @@ // so we don't need bounds-check for reading tag. const u1 tag = cfs->get_u1_fast(); switch (tag) { - case JVM_CONSTANT_Class : { + case JVM_CONSTANT_Class: { cfs->guarantee_more(3, CHECK); // name_index, tag/access_flags const u2 name_index = cfs->get_u2_fast(); cp->klass_index_at_put(index, name_index); @@ -492,7 +495,14 @@ check_property(valid_symbol_at(class_index), "Invalid constant pool index %u in class file %s", class_index, CHECK); - cp->unresolved_klass_at_put(index, class_index, num_klasses++); + + Symbol* const name = cp->symbol_at(class_index); + const unsigned int name_len = name->utf8_length(); + if (name->is_Q_signature()) { + cp->unresolved_qdescriptor_at_put(index, class_index, num_klasses++); + } else { + cp->unresolved_klass_at_put(index, class_index, num_klasses++); + } break; } case JVM_CONSTANT_StringIndex: { @@ -1461,11 +1471,13 @@ STATIC_SHORT, // shorts STATIC_WORD, // ints STATIC_DOUBLE, // aligned long or double + STATIC_FLATTENABLE, // flattenable field NONSTATIC_OOP, NONSTATIC_BYTE, NONSTATIC_SHORT, NONSTATIC_WORD, NONSTATIC_DOUBLE, + NONSTATIC_FLATTENABLE, MAX_FIELD_ALLOCATION_TYPE, BAD_ALLOCATION_TYPE = -1 }; @@ -1485,12 +1497,13 @@ NONSTATIC_DOUBLE, // T_LONG = 11, NONSTATIC_OOP, // T_OBJECT = 12, NONSTATIC_OOP, // T_ARRAY = 13, - BAD_ALLOCATION_TYPE, // T_VOID = 14, - BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, - BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, - BAD_ALLOCATION_TYPE, // T_METADATA = 17, - BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, - BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, + NONSTATIC_OOP, // T_VALUETYPE = 14, + BAD_ALLOCATION_TYPE, // T_VOID = 15, + BAD_ALLOCATION_TYPE, // T_ADDRESS = 16, + BAD_ALLOCATION_TYPE, // T_NARROWOOP = 17, + BAD_ALLOCATION_TYPE, // T_METADATA = 18, + BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 19, + BAD_ALLOCATION_TYPE, // T_CONFLICT = 20, BAD_ALLOCATION_TYPE, // 0 BAD_ALLOCATION_TYPE, // 1 BAD_ALLOCATION_TYPE, // 2 @@ -1505,18 +1518,22 @@ STATIC_DOUBLE, // T_LONG = 11, STATIC_OOP, // T_OBJECT = 12, STATIC_OOP, // T_ARRAY = 13, - BAD_ALLOCATION_TYPE, // T_VOID = 14, - BAD_ALLOCATION_TYPE, // T_ADDRESS = 15, - BAD_ALLOCATION_TYPE, // T_NARROWOOP = 16, - BAD_ALLOCATION_TYPE, // T_METADATA = 17, - BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 18, - BAD_ALLOCATION_TYPE, // T_CONFLICT = 19, + STATIC_OOP, // T_VALUETYPE = 14, + BAD_ALLOCATION_TYPE, // T_VOID = 15, + BAD_ALLOCATION_TYPE, // T_ADDRESS = 16, + BAD_ALLOCATION_TYPE, // T_NARROWOOP = 17, + BAD_ALLOCATION_TYPE, // T_METADATA = 18, + BAD_ALLOCATION_TYPE, // T_NARROWKLASS = 19, + BAD_ALLOCATION_TYPE, // T_CONFLICT = 20 }; -static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type) { +static FieldAllocationType basic_type_to_atype(bool is_static, BasicType type, bool is_flattenable) { assert(type >= T_BOOLEAN && type < T_VOID, "only allowable values"); FieldAllocationType result = _basic_type_to_atype[type + (is_static ? (T_CONFLICT + 1) : 0)]; assert(result != BAD_ALLOCATION_TYPE, "bad type"); + if (is_flattenable) { + result = is_static ? STATIC_FLATTENABLE : NONSTATIC_FLATTENABLE; + } return result; } @@ -1530,8 +1547,8 @@ } } - FieldAllocationType update(bool is_static, BasicType type) { - FieldAllocationType atype = basic_type_to_atype(is_static, type); + FieldAllocationType update(bool is_static, BasicType type, bool is_flattenable) { + FieldAllocationType atype = basic_type_to_atype(is_static, type, is_flattenable); if (atype != BAD_ALLOCATION_TYPE) { // Make sure there is no overflow with injected fields. assert(count[atype] < 0xFFFF, "More than 65535 fields"); @@ -1545,6 +1562,7 @@ // _fields_type_annotations fields void ClassFileParser::parse_fields(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, FieldAllocationCount* const fac, ConstantPool* cp, const int cp_size, @@ -1567,7 +1585,8 @@ int num_injected = 0; const InjectedField* const injected = JavaClasses::get_injected(_class_name, &num_injected); - const int total_fields = length + num_injected; + + const int total_fields = length + num_injected + (is_value_type ? 1 : 0); // The field array starts with tuples of shorts // [access, name index, sig index, initial value index, byte offset]. @@ -1601,9 +1620,11 @@ // access_flags, name_index, descriptor_index, attributes_count cfs->guarantee_more(8, CHECK); + jint recognized_modifiers = JVM_RECOGNIZED_FIELD_MODIFIERS; + + const jint flags = cfs->get_u2_fast() & recognized_modifiers; + verify_legal_field_modifiers(flags, is_interface, is_value_type, CHECK); AccessFlags access_flags; - const jint flags = cfs->get_u2_fast() & JVM_RECOGNIZED_FIELD_MODIFIERS; - verify_legal_field_modifiers(flags, is_interface, CHECK); access_flags.set_flags(flags); const u2 name_index = cfs->get_u2_fast(); @@ -1619,6 +1640,22 @@ signature_index, CHECK); const Symbol* const sig = cp->symbol_at(signature_index); verify_legal_field_signature(name, sig, CHECK); + assert(!access_flags.is_flattenable(), "ACC_FLATTENABLE should have been filtered out"); + if (sig->is_Q_signature()) { + // assert(_major_version >= CONSTANT_CLASS_DESCRIPTORS, "Q-descriptors are only supported in recent classfiles"); + access_flags.set_is_flattenable(); + } + if (access_flags.is_flattenable()) { + // Array flattenability cannot be specified. Arrays of value classes are + // are always flattenable. Arrays of other classes are not flattenable. + if (sig->utf8_length() > 1 && sig->char_at(0) == '[') { + classfile_parse_error( + "Field \"%s\" with signature \"%s\" in class file %s is invalid." + " ACC_FLATTENABLE cannot be specified for an array", + name->as_C_string(), sig->as_klass_external_name(), CHECK); + } + _has_flattenable_fields = true; + } u2 constantvalue_index = 0; bool is_synthetic = false; @@ -1678,7 +1715,7 @@ const BasicType type = cp->basic_type_for_signature_at(signature_index); // Remember how many oops we encountered and compute allocation type - const FieldAllocationType atype = fac->update(is_static, type); + const FieldAllocationType atype = fac->update(is_static, type, access_flags.is_flattenable()); field->set_allocation_type(atype); // After field is initialized with type, we can augment it with aux info @@ -1719,12 +1756,25 @@ const BasicType type = FieldType::basic_type(injected[n].signature()); // Remember how many oops we encountered and compute allocation type - const FieldAllocationType atype = fac->update(false, type); + const FieldAllocationType atype = fac->update(false, type, false); field->set_allocation_type(atype); index++; } } + if (is_value_type) { + index = length + num_injected; + FieldInfo* const field = FieldInfo::from_field_array(fa, index); + field->initialize(JVM_ACC_FIELD_INTERNAL | JVM_ACC_STATIC, + vmSymbols::default_value_name_enum, + vmSymbols::java_lang_Object_enum, + 0); + const BasicType type = FieldType::basic_type(vmSymbols::object_signature()); + const FieldAllocationType atype = fac->update(true, type, false); + field->set_allocation_type(atype); + index++; + } + assert(NULL == _fields, "invariant"); _fields = @@ -2348,6 +2398,7 @@ Method* ClassFileParser::parse_method(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, const ConstantPool* cp, AccessFlags* const promoted_flags, TRAPS) { @@ -2388,11 +2439,17 @@ classfile_parse_error("Method is not static in class file %s", CHECK_NULL); } } else { - verify_legal_method_modifiers(flags, is_interface, name, CHECK_NULL); + verify_legal_method_modifiers(flags, is_interface, is_value_type, name, CHECK_NULL); } - if (name == vmSymbols::object_initializer_name() && is_interface) { - classfile_parse_error("Interface cannot have a method named , class file %s", CHECK_NULL); + if (name == vmSymbols::object_initializer_name()) { + if (is_interface) { + classfile_parse_error("Interface cannot have a method named , class file %s", CHECK_NULL); +/* TBD: uncomment when javac stops generating () for value types. + } else if (is_value_type) { + classfile_parse_error("Value Type cannot have a method named , class file %s", CHECK_NULL); +*/ + } } int args_size = -1; // only used when _need_verify is true @@ -2963,6 +3020,7 @@ // Side-effects: populates the _methods field in the parser void ClassFileParser::parse_methods(const ClassFileStream* const cfs, bool is_interface, + bool is_value_type, AccessFlags* promoted_flags, bool* has_final_method, bool* declares_nonstatic_concrete_methods, @@ -2987,6 +3045,7 @@ for (int index = 0; index < length; index++) { Method* method = parse_method(cfs, is_interface, + is_value_type, _cp, promoted_flags, CHECK); @@ -3179,14 +3238,20 @@ guarantee_property(inner_class_info_index != outer_class_info_index, "Class is both outer and inner class in class file %s", CHECK_0); } - // Access flags - jint flags; + + jint recognized_modifiers = RECOGNIZED_INNER_CLASS_MODIFIERS; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { - flags = cfs->get_u2_fast() & (RECOGNIZED_INNER_CLASS_MODIFIERS | JVM_ACC_MODULE); - } else { - flags = cfs->get_u2_fast() & RECOGNIZED_INNER_CLASS_MODIFIERS; + recognized_modifiers |= JVM_ACC_MODULE; + } + // JVM_ACC_VALUE is defined for class file version 55 and later + if (supports_value_types()) { + recognized_modifiers |= JVM_ACC_VALUE; } + + // Access flags + jint flags = cfs->get_u2_fast() & recognized_modifiers; + if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; @@ -3389,6 +3454,8 @@ bool parsed_source_debug_ext_annotations_exist = false; const u1* inner_classes_attribute_start = NULL; u4 inner_classes_attribute_length = 0; + const u1* value_types_attribute_start = NULL; + u4 value_types_attribute_length = 0; u2 enclosing_method_class_index = 0; u2 enclosing_method_method_index = 0; const u1* nest_members_attribute_start = NULL; @@ -3738,7 +3805,8 @@ const InstanceKlass* super_klass = NULL; if (super_class_index == 0) { - check_property(_class_name == vmSymbols::java_lang_Object(), + check_property(_class_name == vmSymbols::java_lang_Object() + || (_access_flags.get_flags() & JVM_ACC_VALUE), "Invalid superclass index %u in class file %s", super_class_index, CHECK_NULL); @@ -3765,39 +3833,6 @@ return super_klass; } -static unsigned int compute_oop_map_count(const InstanceKlass* super, - unsigned int nonstatic_oop_map_count, - int first_nonstatic_oop_offset) { - - unsigned int map_count = - NULL == super ? 0 : super->nonstatic_oop_map_count(); - if (nonstatic_oop_map_count > 0) { - // We have oops to add to map - if (map_count == 0) { - map_count = nonstatic_oop_map_count; - } - else { - // Check whether we should add a new map block or whether the last one can - // be extended - const OopMapBlock* const first_map = super->start_of_nonstatic_oop_maps(); - const OopMapBlock* const last_map = first_map + map_count - 1; - - const int next_offset = last_map->offset() + last_map->count() * heapOopSize; - if (next_offset == first_nonstatic_oop_offset) { - // There is no gap bettwen superklass's last oop field and first - // local oop field, merge maps. - nonstatic_oop_map_count -= 1; - } - else { - // Superklass didn't end with a oop field, add extra maps - assert(next_offset < first_nonstatic_oop_offset, "just checking"); - } - map_count += nonstatic_oop_map_count; - } - } - return map_count; -} - #ifndef PRODUCT static void print_field_layout(const Symbol* name, Array* fields, @@ -3838,16 +3873,158 @@ // Values needed for oopmap and InstanceKlass creation class ClassFileParser::FieldLayoutInfo : public ResourceObj { public: - int* nonstatic_oop_offsets; - unsigned int* nonstatic_oop_counts; - unsigned int nonstatic_oop_map_count; - unsigned int total_oop_map_count; + OopMapBlocksBuilder* oop_map_blocks; int instance_size; int nonstatic_field_size; int static_field_size; bool has_nonstatic_fields; }; +// Utility to collect and compact oop maps during layout +class ClassFileParser::OopMapBlocksBuilder : public ResourceObj { + public: + OopMapBlock* nonstatic_oop_maps; + unsigned int nonstatic_oop_map_count; + unsigned int max_nonstatic_oop_maps; + + public: + OopMapBlocksBuilder(unsigned int max_blocks, TRAPS) { + max_nonstatic_oop_maps = max_blocks; + nonstatic_oop_map_count = 0; + if (max_blocks == 0) { + nonstatic_oop_maps = NULL; + } else { + nonstatic_oop_maps = NEW_RESOURCE_ARRAY_IN_THREAD( + THREAD, OopMapBlock, max_nonstatic_oop_maps); + memset(nonstatic_oop_maps, 0, sizeof(OopMapBlock) * max_blocks); + } + } + + OopMapBlock* last_oop_map() const { + assert(nonstatic_oop_map_count > 0, "Has no oop maps"); + return nonstatic_oop_maps + (nonstatic_oop_map_count - 1); + } + + // addition of super oop maps + void initialize_inherited_blocks(OopMapBlock* blocks, unsigned int nof_blocks) { + assert(nof_blocks && nonstatic_oop_map_count == 0 && + nof_blocks <= max_nonstatic_oop_maps, "invariant"); + + memcpy(nonstatic_oop_maps, blocks, sizeof(OopMapBlock) * nof_blocks); + nonstatic_oop_map_count += nof_blocks; + } + + // collection of oops + void add(int offset, int count) { + if (nonstatic_oop_map_count == 0) { + nonstatic_oop_map_count++; + } + OopMapBlock* nonstatic_oop_map = last_oop_map(); + if (nonstatic_oop_map->count() == 0) { // Unused map, set it up + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); + } else if (nonstatic_oop_map->is_contiguous(offset)) { // contiguous, add + nonstatic_oop_map->increment_count(count); + } else { // Need a new one... + nonstatic_oop_map_count++; + assert(nonstatic_oop_map_count <= max_nonstatic_oop_maps, "range check"); + nonstatic_oop_map = last_oop_map(); + nonstatic_oop_map->set_offset(offset); + nonstatic_oop_map->set_count(count); + } + } + + // general purpose copy, e.g. into allocated instanceKlass + void copy(OopMapBlock* dst) { + if (nonstatic_oop_map_count != 0) { + memcpy(dst, nonstatic_oop_maps, sizeof(OopMapBlock) * nonstatic_oop_map_count); + } + } + + // Sort and compact adjacent blocks + void compact(TRAPS) { + if (nonstatic_oop_map_count <= 1) { + return; + } + /* + * Since field layout sneeks in oops before values, we will be able to condense + * blocks. There is potential to compact between super, own refs and values + * containing refs. + * + * Currently compaction is slightly limited due to values being 8 byte aligned. + * This may well change: FixMe if doesn't, the code below is fairly general purpose + * and maybe it doesn't need to be. + */ + qsort(nonstatic_oop_maps, nonstatic_oop_map_count, sizeof(OopMapBlock), + (_sort_Fn)OopMapBlock::compare_offset); + if (nonstatic_oop_map_count < 2) { + return; + } + + //Make a temp copy, and iterate through and copy back into the orig + ResourceMark rm(THREAD); + OopMapBlock* oop_maps_copy = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, OopMapBlock, + nonstatic_oop_map_count); + OopMapBlock* oop_maps_copy_end = oop_maps_copy + nonstatic_oop_map_count; + copy(oop_maps_copy); + OopMapBlock* nonstatic_oop_map = nonstatic_oop_maps; + unsigned int new_count = 1; + oop_maps_copy++; + while(oop_maps_copy < oop_maps_copy_end) { + assert(nonstatic_oop_map->offset() < oop_maps_copy->offset(), "invariant"); + if (nonstatic_oop_map->is_contiguous(oop_maps_copy->offset())) { + nonstatic_oop_map->increment_count(oop_maps_copy->count()); + } else { + nonstatic_oop_map++; + new_count++; + nonstatic_oop_map->set_offset(oop_maps_copy->offset()); + nonstatic_oop_map->set_count(oop_maps_copy->count()); + } + oop_maps_copy++; + } + assert(new_count <= nonstatic_oop_map_count, "end up with more maps after compact() ?"); + nonstatic_oop_map_count = new_count; + } + + void print_on(outputStream* st) const { + st->print_cr(" OopMapBlocks: %3d /%3d", nonstatic_oop_map_count, max_nonstatic_oop_maps); + if (nonstatic_oop_map_count > 0) { + OopMapBlock* map = nonstatic_oop_maps; + OopMapBlock* last_map = last_oop_map(); + assert(map <= last_map, "Last less than first"); + while (map <= last_map) { + st->print_cr(" Offset: %3d -%3d Count: %3d", map->offset(), + map->offset() + map->offset_span() - heapOopSize, map->count()); + map++; + } + } + } + + void print_value_on(outputStream* st) const { + print_on(st); + } + +}; + +void ClassFileParser::throwValueTypeLimitation(THREAD_AND_LOCATION_DECL, + const char* msg, + const Symbol* name, + const Symbol* sig) const { + + ResourceMark rm(THREAD); + if (name == NULL || sig == NULL) { + Exceptions::fthrow(THREAD_AND_LOCATION_ARGS, + vmSymbols::java_lang_ClassFormatError(), + "class: %s - %s", _class_name->as_C_string(), msg); + } + else { + Exceptions::fthrow(THREAD_AND_LOCATION_ARGS, + vmSymbols::java_lang_ClassFormatError(), + "\"%s\" sig: \"%s\" class: %s - %s", name->as_C_string(), sig->as_C_string(), + _class_name->as_C_string(), msg); + } +} + // Layout fields and fill in FieldLayoutInfo. Could use more refactoring! void ClassFileParser::layout_fields(ConstantPool* cp, const FieldAllocationCount* fac, @@ -3860,6 +4037,12 @@ // Field size and offset computation int nonstatic_field_size = _super_klass == NULL ? 0 : _super_klass->nonstatic_field_size(); + int next_nonstatic_valuetype_offset = 0; + int first_nonstatic_valuetype_offset = 0; + + // Fields that are value types are handled differently depending if they are static or not: + // - static fields are oops + // - non-static fields are embedded // Count the contended fields by type. // @@ -3880,8 +4063,9 @@ // Calculate the starting byte offsets int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields(); + // Value types in static fields are not embedded, they are handled with oops int next_static_double_offset = next_static_oop_offset + - ((fac->count[STATIC_OOP]) * heapOopSize); + ((fac->count[STATIC_OOP] + fac->count[STATIC_FLATTENABLE]) * heapOopSize); if (fac->count[STATIC_DOUBLE]) { next_static_double_offset = align_up(next_static_double_offset, BytesPerLong); } @@ -3896,6 +4080,16 @@ int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size * heapOopSize; + // First field of value types is aligned on a long boundary in order to ease + // in-lining of value types (with header removal) in packed arrays and + // flatten value types + int initial_value_type_padding = 0; + if (is_value_type()) { + int old = nonstatic_fields_start; + nonstatic_fields_start = align_up(nonstatic_fields_start, BytesPerLong); + initial_value_type_padding = nonstatic_fields_start - old; + } + int next_nonstatic_field_offset = nonstatic_fields_start; const bool is_contended_class = parsed_annotations->is_contended(); @@ -3905,6 +4099,14 @@ next_nonstatic_field_offset += ContendedPaddingWidth; } + // Temporary value types restrictions + if (is_value_type()) { + if (is_contended_class) { + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support @Contended annotation yet"); + return; + } + } + // Compute the non-contended fields count. // The packing code below relies on these counts to determine if some field // can be squeezed into the alignment gap. Contended fields are obviously @@ -3915,16 +4117,96 @@ unsigned int nonstatic_byte_count = fac->count[NONSTATIC_BYTE] - fac_contended.count[NONSTATIC_BYTE]; unsigned int nonstatic_oop_count = fac->count[NONSTATIC_OOP] - fac_contended.count[NONSTATIC_OOP]; + int static_value_type_count = 0; + int nonstatic_value_type_count = 0; + int* nonstatic_value_type_indexes = NULL; + Klass** nonstatic_value_type_klasses = NULL; + unsigned int value_type_oop_map_count = 0; + int not_flattened_value_types = 0; + + int max_nonstatic_value_type = fac->count[NONSTATIC_FLATTENABLE] + 1; + + nonstatic_value_type_indexes = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, int, + max_nonstatic_value_type); + for (int i = 0; i < max_nonstatic_value_type; i++) { + nonstatic_value_type_indexes[i] = -1; + } + nonstatic_value_type_klasses = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, Klass*, + max_nonstatic_value_type); + + for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) { + if (fs.allocation_type() == STATIC_FLATTENABLE) { + // Pre-resolve the flattenable field and check for value type circularity + // issues. Note that super-class circularity checks are not needed here + // because flattenable fields can only be in value types and value types + // only have java.lang.Object as their super class. + // Also, note that super-interface circularity checks are not needed + // because interfaces cannot be value types. + ResourceMark rm; + if (!fs.signature()->is_Q_signature()) { + THROW(vmSymbols::java_lang_ClassFormatError()); + } + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + if (!klass->access_flags().is_value_type()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } + static_value_type_count++; + } else if (fs.allocation_type() == NONSTATIC_FLATTENABLE) { + // Pre-resolve the flattenable field and check for value type circularity issues. + ResourceMark rm; + if (!fs.signature()->is_Q_signature()) { + THROW(vmSymbols::java_lang_ClassFormatError()); + } + Klass* klass = + SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + if (!klass->access_flags().is_value_type()) { + THROW(vmSymbols::java_lang_IncompatibleClassChangeError()); + } + ValueKlass* vk = ValueKlass::cast(klass); + // Conditions to apply flattening or not should be defined in a single place + if ((ValueFieldMaxFlatSize < 0) || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize) { + nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index(); + nonstatic_value_type_klasses[nonstatic_value_type_count] = klass; + nonstatic_value_type_count++; + + ValueKlass* vklass = ValueKlass::cast(klass); + if (vklass->contains_oops()) { + value_type_oop_map_count += vklass->nonstatic_oop_map_count(); + } + fs.set_flattened(true); + } else { + not_flattened_value_types++; + fs.set_flattened(false); + } + } + } + + // Adjusting non_static_oop_count to take into account not flattened value types; + nonstatic_oop_count += not_flattened_value_types; + // Total non-static fields count, including every contended field unsigned int nonstatic_fields_count = fac->count[NONSTATIC_DOUBLE] + fac->count[NONSTATIC_WORD] + fac->count[NONSTATIC_SHORT] + fac->count[NONSTATIC_BYTE] + - fac->count[NONSTATIC_OOP]; + fac->count[NONSTATIC_OOP] + fac->count[NONSTATIC_FLATTENABLE]; const bool super_has_nonstatic_fields = (_super_klass != NULL && _super_klass->has_nonstatic_fields()); const bool has_nonstatic_fields = super_has_nonstatic_fields || (nonstatic_fields_count != 0); + const bool has_nonstatic_value_fields = nonstatic_value_type_count > 0; + if (is_value_type() && (!has_nonstatic_fields)) { + // There are a number of fixes required throughout the type system and JIT + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet"); + return; + } // Prepare list of oops for oop map generation. // @@ -3934,15 +4216,18 @@ // we pessimistically allocate the maps to fit all the oops into the // distinct regions. // - // TODO: We add +1 to always allocate non-zero resource arrays; we need - // to figure out if we still need to do this. - unsigned int nonstatic_oop_map_count = 0; - unsigned int max_nonstatic_oop_maps = fac->count[NONSTATIC_OOP] + 1; - - int* nonstatic_oop_offsets = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, int, max_nonstatic_oop_maps); - unsigned int* const nonstatic_oop_counts = NEW_RESOURCE_ARRAY_IN_THREAD( - THREAD, unsigned int, max_nonstatic_oop_maps); + int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count(); + int max_oop_map_count = + super_oop_map_count + + fac->count[NONSTATIC_OOP] + + value_type_oop_map_count + + not_flattened_value_types; + + OopMapBlocksBuilder* nonstatic_oop_maps = new OopMapBlocksBuilder(max_oop_map_count, THREAD); + if (super_oop_map_count > 0) { + nonstatic_oop_maps->initialize_inherited_blocks(_super_klass->start_of_nonstatic_oop_maps(), + _super_klass->nonstatic_oop_map_count()); + } int first_nonstatic_oop_offset = 0; // will be set for first oop field @@ -3991,13 +4276,8 @@ next_nonstatic_double_offset = next_nonstatic_field_offset; } else if( allocation_style == 2 ) { // Fields allocation: oops fields in super and sub classes are together. - if( nonstatic_field_size > 0 && _super_klass != NULL && - _super_klass->nonstatic_oop_map_size() > 0 ) { - const unsigned int map_count = _super_klass->nonstatic_oop_map_count(); - const OopMapBlock* const first_map = _super_klass->start_of_nonstatic_oop_maps(); - const OopMapBlock* const last_map = first_map + map_count - 1; - const int next_offset = last_map->offset() + (last_map->count() * heapOopSize); - if (next_offset == next_nonstatic_field_offset) { + if( nonstatic_field_size > 0 && super_oop_map_count > 0 ) { + if (next_nonstatic_field_offset == nonstatic_oop_maps->last_oop_map()->end_offset()) { allocation_style = 0; // allocate oops first next_nonstatic_oop_offset = next_nonstatic_field_offset; next_nonstatic_double_offset = next_nonstatic_oop_offset + @@ -4080,6 +4360,16 @@ next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize); } + // Aligning embedded value types + // bug below, the current algorithm to layout embedded value types always put them at the + // end of the layout, which doesn't match the different allocation policies the VM is + // supposed to provide => FixMe + // Note also that the current alignment policy is to make each value type starting on a + // 64 bits boundary. This could be optimized later. For instance, it could be nice to + // align value types according to their most constrained internal type. + next_nonstatic_valuetype_offset = align_up(next_nonstatic_padded_offset, BytesPerLong); + int next_value_type_index = 0; + // Iterate over fields again and compute correct offsets. // The field allocation type was temporarily stored in the offset slot. // oop fields are located before non-oop fields (static and non-static). @@ -4096,6 +4386,8 @@ // pack the rest of the fields switch (atype) { + // Value types in static fields are handled with oops + case STATIC_FLATTENABLE: // Fallthrough case STATIC_OOP: real_offset = next_static_oop_offset; next_static_oop_offset += heapOopSize; @@ -4116,6 +4408,31 @@ real_offset = next_static_double_offset; next_static_double_offset += BytesPerLong; break; + case NONSTATIC_FLATTENABLE: + if (fs.is_flattened()) { + Klass* klass = nonstatic_value_type_klasses[next_value_type_index]; + assert(klass != NULL, "Klass should have been loaded and resolved earlier"); + assert(klass->access_flags().is_value_type(),"Must be a value type"); + ValueKlass* vklass = ValueKlass::cast(klass); + real_offset = next_nonstatic_valuetype_offset; + next_nonstatic_valuetype_offset += (vklass->size_helper()) * wordSize - vklass->first_field_offset(); + // aligning next value type on a 64 bits boundary + next_nonstatic_valuetype_offset = align_up(next_nonstatic_valuetype_offset, BytesPerLong); + next_value_type_index += 1; + + if (vklass->contains_oops()) { // add flatten oop maps + int diff = real_offset - vklass->first_field_offset(); + const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps(); + const OopMapBlock* const last_map = map + vklass->nonstatic_oop_map_count(); + while (map < last_map) { + nonstatic_oop_maps->add(map->offset() + diff, map->count()); + map++; + } + } + break; + } else { + // Fall through + } case NONSTATIC_OOP: if( nonstatic_oop_space_count > 0 ) { real_offset = nonstatic_oop_space_offset; @@ -4125,26 +4442,7 @@ real_offset = next_nonstatic_oop_offset; next_nonstatic_oop_offset += heapOopSize; } - - // Record this oop in the oop maps - if( nonstatic_oop_map_count > 0 && - nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == - real_offset - - int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * - heapOopSize ) { - // This oop is adjacent to the previous one, add to current oop map - assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; - } else { - // This oop is not adjacent to the previous one, create new oop map - assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; - nonstatic_oop_counts [nonstatic_oop_map_count] = 1; - nonstatic_oop_map_count += 1; - if( first_nonstatic_oop_offset == 0 ) { // Undefined - first_nonstatic_oop_offset = real_offset; - } - } + nonstatic_oop_maps->add(real_offset, 1); break; case NONSTATIC_BYTE: if( nonstatic_byte_space_count > 0 ) { @@ -4253,30 +4551,17 @@ next_nonstatic_padded_offset += BytesPerLong; break; + // Value types in static fields are handled with oops + case NONSTATIC_FLATTENABLE: + throwValueTypeLimitation(THREAD_AND_LOCATION, + "@Contended annotation not supported for value types yet", fs.name(), fs.signature()); + return; + case NONSTATIC_OOP: next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize); real_offset = next_nonstatic_padded_offset; next_nonstatic_padded_offset += heapOopSize; - - // Record this oop in the oop maps - if( nonstatic_oop_map_count > 0 && - nonstatic_oop_offsets[nonstatic_oop_map_count - 1] == - real_offset - - int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) * - heapOopSize ) { - // This oop is adjacent to the previous one, add to current oop map - assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1; - } else { - // This oop is not adjacent to the previous one, create new oop map - assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check"); - nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset; - nonstatic_oop_counts [nonstatic_oop_map_count] = 1; - nonstatic_oop_map_count += 1; - if( first_nonstatic_oop_offset == 0 ) { // Undefined - first_nonstatic_oop_offset = real_offset; - } - } + nonstatic_oop_maps->add(real_offset, 1); break; default: @@ -4311,12 +4596,24 @@ // This helps to alleviate memory contention effects for subclass fields // and/or adjacent object. if (is_contended_class) { + assert(!is_value_type(), "@Contended not supported for value types yet"); next_nonstatic_padded_offset += ContendedPaddingWidth; } - int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; + int notaligned_nonstatic_fields_end; + if (nonstatic_value_type_count != 0) { + notaligned_nonstatic_fields_end = next_nonstatic_valuetype_offset; + } else { + notaligned_nonstatic_fields_end = next_nonstatic_padded_offset; + } - int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize); + int nonstatic_field_sz_align = heapOopSize; + if (is_value_type()) { + if ((notaligned_nonstatic_fields_end - nonstatic_fields_start) > heapOopSize) { + nonstatic_field_sz_align = BytesPerLong; // value copy of fields only uses jlong copy + } + } + int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, nonstatic_field_sz_align); int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize); int static_fields_end = align_up(next_static_byte_offset, wordSize); @@ -4328,8 +4625,9 @@ int instance_size = align_object_size(instance_end / wordSize); assert(instance_size == align_object_size(align_up( - (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize), - wordSize) / wordSize), "consistent layout helper value"); + (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize) + + initial_value_type_padding, wordSize) / wordSize), "consistent layout helper value"); + // Invariant: nonstatic_field end/start should only change if there are // nonstatic fields in the class, or if the class is contended. We compare @@ -4340,12 +4638,11 @@ (nonstatic_fields_count > 0), "double-check nonstatic start/end"); // Number of non-static oop map blocks allocated at end of klass. - const unsigned int total_oop_map_count = - compute_oop_map_count(_super_klass, nonstatic_oop_map_count, - first_nonstatic_oop_offset); + nonstatic_oop_maps->compact(THREAD); #ifndef PRODUCT - if (PrintFieldLayout) { + if ((PrintFieldLayout && !is_value_type()) || + (PrintValueLayout && (is_value_type() || has_nonstatic_value_fields))) { print_field_layout(_class_name, _fields, cp, @@ -4353,63 +4650,20 @@ nonstatic_fields_start, nonstatic_fields_end, static_fields_end); + nonstatic_oop_maps->print_on(tty); + tty->print("\n"); } #endif // Pass back information needed for InstanceKlass creation - info->nonstatic_oop_offsets = nonstatic_oop_offsets; - info->nonstatic_oop_counts = nonstatic_oop_counts; - info->nonstatic_oop_map_count = nonstatic_oop_map_count; - info->total_oop_map_count = total_oop_map_count; + info->oop_map_blocks = nonstatic_oop_maps; info->instance_size = instance_size; info->static_field_size = static_field_size; info->nonstatic_field_size = nonstatic_field_size; info->has_nonstatic_fields = has_nonstatic_fields; } -static void fill_oop_maps(const InstanceKlass* k, - unsigned int nonstatic_oop_map_count, - const int* nonstatic_oop_offsets, - const unsigned int* nonstatic_oop_counts) { - - assert(k != NULL, "invariant"); - - OopMapBlock* this_oop_map = k->start_of_nonstatic_oop_maps(); - const InstanceKlass* const super = k->superklass(); - const unsigned int super_count = super ? super->nonstatic_oop_map_count() : 0; - if (super_count > 0) { - // Copy maps from superklass - OopMapBlock* super_oop_map = super->start_of_nonstatic_oop_maps(); - for (unsigned int i = 0; i < super_count; ++i) { - *this_oop_map++ = *super_oop_map++; - } - } - - if (nonstatic_oop_map_count > 0) { - if (super_count + nonstatic_oop_map_count > k->nonstatic_oop_map_count()) { - // The counts differ because there is no gap between superklass's last oop - // field and the first local oop field. Extend the last oop map copied - // from the superklass instead of creating new one. - nonstatic_oop_map_count--; - nonstatic_oop_offsets++; - this_oop_map--; - this_oop_map->set_count(this_oop_map->count() + *nonstatic_oop_counts++); - this_oop_map++; - } - - // Add new map blocks, fill them - while (nonstatic_oop_map_count-- > 0) { - this_oop_map->set_offset(*nonstatic_oop_offsets++); - this_oop_map->set_count(*nonstatic_oop_counts++); - this_oop_map++; - } - assert(k->start_of_nonstatic_oop_maps() + k->nonstatic_oop_map_count() == - this_oop_map, "sanity"); - } -} - - -void ClassFileParser::set_precomputed_flags(InstanceKlass* ik) { +void ClassFileParser::set_precomputed_flags(InstanceKlass* ik, TRAPS) { assert(ik != NULL, "invariant"); const Klass* const super = ik->super(); @@ -4442,6 +4696,10 @@ // Check if this klass supports the java.lang.Cloneable interface if (SystemDictionary::Cloneable_klass_loaded()) { if (ik->is_subtype_of(SystemDictionary::Cloneable_klass())) { + if (ik->is_value()) { + throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support Cloneable"); + return; + } ik->set_is_cloneable(); } } @@ -4482,6 +4740,11 @@ } } +bool ClassFileParser::supports_value_types() const { + // Value types are only supported by class file version 55 and later + return _major_version >= JAVA_11_VERSION; +} + // utility methods for appending an array with check for duplicates static void append_interfaces(GrowableArray* result, @@ -4744,7 +5007,9 @@ void ClassFileParser::verify_legal_class_modifiers(jint flags, TRAPS) const { const bool is_module = (flags & JVM_ACC_MODULE) != 0; + const bool is_value_type = (flags & JVM_ACC_VALUE) != 0; assert(_major_version >= JAVA_9_VERSION || !is_module, "JVM_ACC_MODULE should not be set"); + assert(supports_value_types() || !is_value_type, "JVM_ACC_VALUE should not be set"); if (is_module) { ResourceMark rm(THREAD); Exceptions::fthrow( @@ -4755,6 +5020,16 @@ return; } + if (is_value_type && !EnableValhalla) { + ResourceMark rm(THREAD); + Exceptions::fthrow( + THREAD_AND_LOCATION, + vmSymbols::java_lang_ClassFormatError(), + "Class modifier ACC_VALUE in class %s requires option -XX:+EnableValhalla", + _class_name->as_C_string() + ); + } + if (!_need_verify) { return; } const bool is_interface = (flags & JVM_ACC_INTERFACE) != 0; @@ -4768,7 +5043,8 @@ if ((is_abstract && is_final) || (is_interface && !is_abstract) || (is_interface && major_gte_15 && (is_super || is_enum)) || - (!is_interface && major_gte_15 && is_annotation)) { + (!is_interface && major_gte_15 && is_annotation) || + (is_value_type && (is_interface || is_abstract || is_enum || !is_final))) { ResourceMark rm(THREAD); Exceptions::fthrow( THREAD_AND_LOCATION, @@ -4851,6 +5127,7 @@ void ClassFileParser::verify_legal_field_modifiers(jint flags, bool is_interface, + bool is_value_type, TRAPS) const { if (!_need_verify) { return; } @@ -4875,6 +5152,10 @@ } else { // not interface if (has_illegal_visibility(flags) || (is_final && is_volatile)) { is_illegal = true; + } else { + if (is_value_type && !is_static && !is_final) { + is_illegal = true; + } } } @@ -4891,6 +5172,7 @@ void ClassFileParser::verify_legal_method_modifiers(jint flags, bool is_interface, + bool is_value_type, const Symbol* name, TRAPS) const { if (!_need_verify) { return; } @@ -4950,10 +5232,14 @@ is_illegal = true; } } else { // not initializer - if (is_abstract) { - if ((is_final || is_native || is_private || is_static || - (major_gte_15 && (is_synchronized || is_strict)))) { - is_illegal = true; + if (is_value_type && is_synchronized && !is_static) { + is_illegal = true; + } else { + if (is_abstract) { + if ((is_final || is_native || is_private || is_static || + (major_gte_15 && (is_synchronized || is_strict)))) { + is_illegal = true; + } } } } @@ -5122,7 +5408,16 @@ case JVM_SIGNATURE_LONG: case JVM_SIGNATURE_DOUBLE: return signature + 1; - case JVM_SIGNATURE_CLASS: { + case JVM_SIGNATURE_VALUETYPE: + // Can't enable this check until JDK upgrades the bytecode generators + // if (_major_version < CONSTANT_CLASS_DESCRIPTORS ) { + // classfile_parse_error("Class name contains illegal Q-signature " + // "in descriptor in class file %s", + // CHECK_0); + // } + // fall through + case JVM_SIGNATURE_CLASS: + { if (_major_version < JAVA_1_5_VERSION) { // Skip over the class name if one is there const char* const p = skip_over_field_name(signature + 1, true, --length); @@ -5133,7 +5428,7 @@ } } else { - // Skip leading 'L' and ignore first appearance of ';' + // Skip leading 'L' or 'Q' and ignore first appearance of ';' signature++; const char* c = (const char*) memchr(signature, ';', length - 1); // Format check signature @@ -5188,6 +5483,9 @@ p = skip_over_field_name(bytes, true, length); legal = (p != NULL) && ((p - bytes) == (int)length); } + } else if (_major_version >= CONSTANT_CLASS_DESCRIPTORS && bytes[length - 1] == ';' ) { + // Support for L...; and Q...; descriptors + legal = verify_unqualified_name(bytes + 1, length - 2, LegalClass); } else { // 4900761: relax the constraints based on JSR202 spec // Class names may be drawn from the entire Unicode character set. @@ -5362,7 +5660,7 @@ int ClassFileParser::total_oop_map_count() const { assert(_field_info != NULL, "invariant"); - return _field_info->total_oop_map_count; + return _field_info->oop_map_blocks->nonstatic_oop_map_count; } jint ClassFileParser::layout_size() const { @@ -5492,6 +5790,12 @@ } } + if (ik->is_value()) { + ValueKlass* vk = ValueKlass::cast(ik); + oop val = ik->allocate_instance(CHECK_NULL); + vk->set_default_value(val); + } + return ik; } @@ -5512,7 +5816,7 @@ assert(_field_info != NULL, "invariant"); assert(ik->static_field_size() == _field_info->static_field_size, "sanity"); - assert(ik->nonstatic_oop_map_count() == _field_info->total_oop_map_count, + assert(ik->nonstatic_oop_map_count() == _field_info->oop_map_blocks->nonstatic_oop_map_count, "sanity"); assert(ik->is_instance_klass(), "sanity"); @@ -5525,7 +5829,7 @@ ik->set_nonstatic_field_size(_field_info->nonstatic_field_size); ik->set_has_nonstatic_fields(_field_info->has_nonstatic_fields); assert(_fac != NULL, "invariant"); - ik->set_static_oop_field_count(_fac->count[STATIC_OOP]); + ik->set_static_oop_field_count(_fac->count[STATIC_OOP] + _fac->count[STATIC_FLATTENABLE]); // this transfers ownership of a lot of arrays from // the parser onto the InstanceKlass* @@ -5613,13 +5917,13 @@ // Compute transitive closure of interfaces this class implements // Do final class setup - fill_oop_maps(ik, - _field_info->nonstatic_oop_map_count, - _field_info->nonstatic_oop_offsets, - _field_info->nonstatic_oop_counts); + OopMapBlocksBuilder* oop_map_blocks = _field_info->oop_map_blocks; + if (oop_map_blocks->nonstatic_oop_map_count > 0) { + oop_map_blocks->copy(ik->start_of_nonstatic_oop_maps()); + } // Fill in has_finalizer, has_vanilla_constructor, and layout_helper - set_precomputed_flags(ik); + set_precomputed_flags(ik, CHECK); // check if this class can access its super class check_super_class_access(ik, CHECK); @@ -5669,6 +5973,29 @@ } } + int nfields = ik->java_fields_count(); + if (ik->is_value()) nfields++; + for (int i = 0; i < nfields; i++) { + if (ik->field_access_flags(i) & JVM_ACC_FLATTENABLE) { + Symbol* klass_name = ik->field_signature(i)->fundamental_name(CHECK); + // Value classes must have been pre-loaded + Klass* klass = SystemDictionary::find(klass_name, + Handle(THREAD, ik->class_loader()), + Handle(THREAD, ik->protection_domain()), CHECK); + assert(klass != NULL, "Sanity check"); + assert(klass->access_flags().is_value_type(), "Value type expected"); + ik->set_value_field_klass(i, klass); + klass_name->decrement_refcount(); + } else if (is_value_type() && ((ik->field_access_flags(i) & JVM_ACC_FIELD_INTERNAL) != 0) + && ((ik->field_access_flags(i) & JVM_ACC_STATIC) != 0)) { + ValueKlass::cast(ik)->set_default_value_offset(ik->field_offset(i)); + } + } + + if (is_value_type()) { + ValueKlass::cast(ik)->initialize_calling_convention(CHECK); + } + ClassLoadingService::notify_class_loaded(ik, false /* not shared class */); if (!is_internal()) { @@ -5858,6 +6185,7 @@ _has_nonstatic_concrete_methods(false), _declares_nonstatic_concrete_methods(false), _has_final_method(false), + _has_flattenable_fields(false), _has_finalizer(false), _has_empty_finalizer(false), _has_vanilla_constructor(false), @@ -6053,15 +6381,19 @@ // ACCESS FLAGS stream->guarantee_more(8, CHECK); // flags, this_class, super_class, infs_len - // Access flags - jint flags; + jint recognized_modifiers = JVM_RECOGNIZED_CLASS_MODIFIERS; // JVM_ACC_MODULE is defined in JDK-9 and later. if (_major_version >= JAVA_9_VERSION) { - flags = stream->get_u2_fast() & (JVM_RECOGNIZED_CLASS_MODIFIERS | JVM_ACC_MODULE); - } else { - flags = stream->get_u2_fast() & JVM_RECOGNIZED_CLASS_MODIFIERS; + recognized_modifiers |= JVM_ACC_MODULE; + } + // JVM_ACC_VALUE is defined for class file version 55 and later + if (supports_value_types()) { + recognized_modifiers |= JVM_ACC_VALUE; } + // Access flags + jint flags = stream->get_u2_fast() & recognized_modifiers; + if ((flags & JVM_ACC_INTERFACE) && _major_version < JAVA_6_VERSION) { // Set abstract bit for old class files for backward compatibility flags |= JVM_ACC_ABSTRACT; @@ -6196,6 +6528,7 @@ _fac = new FieldAllocationCount(); parse_fields(stream, _access_flags.is_interface(), + _access_flags.is_value_type(), _fac, cp, cp_size, @@ -6208,6 +6541,7 @@ AccessFlags promoted_flags; parse_methods(stream, _access_flags.is_interface(), + _access_flags.is_value_type(), &promoted_flags, &_has_final_method, &_declares_nonstatic_concrete_methods, @@ -6289,6 +6623,14 @@ ); return; } + + // For a value class, only java/lang/Object is an acceptable super class + if (_access_flags.get_flags() & JVM_ACC_VALUE) { + guarantee_property(_super_klass->name() == vmSymbols::java_lang_Object(), + "Value type must have java.lang.Object as superclass in class file %s", + CHECK); + } + // Make sure super class is not final if (_super_klass->is_final()) { THROW_MSG(vmSymbols::java_lang_VerifyError(), "Cannot inherit from final class"); @@ -6329,6 +6671,19 @@ assert(_fac != NULL, "invariant"); assert(_parsed_annotations != NULL, "invariant"); + + for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) { + if (fs.is_flattenable()) { + // Pre-load value class + Klass* klass = SystemDictionary::resolve_flattenable_field_or_fail(&fs, + Handle(THREAD, _loader_data->class_loader()), + _protection_domain, true, CHECK); + assert(klass != NULL, "Sanity check"); + assert(klass->access_flags().is_value_type(), "Value type expected"); + _has_flattenable_fields = true; + } + } + _field_info = new FieldLayoutInfo(); layout_fields(cp, _fac, _parsed_annotations, _field_info, CHECK); @@ -6366,6 +6721,7 @@ return _stream->clone(); } + // ---------------------------------------------------------------------------- // debugging