< prev index next >

src/hotspot/share/classfile/classFileParser.cpp

Print this page




3886 
3887   // Count the contended fields by type.
3888   //
3889   // We ignore static fields, because @Contended is not supported for them.
3890   // The layout code below will also ignore the static fields.
3891   int nonstatic_contended_count = 0;
3892   FieldAllocationCount fac_contended;
3893   for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3894     FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
3895     if (fs.is_contended()) {
3896       fac_contended.count[atype]++;
3897       if (!fs.access_flags().is_static()) {
3898         nonstatic_contended_count++;
3899       }
3900     }
3901   }
3902 
3903 
3904   // Calculate the starting byte offsets
3905   int next_static_oop_offset    = InstanceMirrorKlass::offset_of_static_fields();
3906   // Value types in static fields are nor embedded, they are handled with oops
3907   int next_static_double_offset = next_static_oop_offset +
3908                                   ((fac->count[STATIC_OOP] + fac->count[STATIC_VALUETYPE]) * heapOopSize);
3909   if ( fac->count[STATIC_DOUBLE] &&
3910        (Universe::field_type_should_be_aligned(T_DOUBLE) ||
3911         Universe::field_type_should_be_aligned(T_LONG)) ) {
3912     next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
3913   }
3914 
3915   int next_static_word_offset   = next_static_double_offset +
3916                                     ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
3917   int next_static_short_offset  = next_static_word_offset +
3918                                     ((fac->count[STATIC_WORD]) * BytesPerInt);
3919   int next_static_byte_offset   = next_static_short_offset +
3920                                   ((fac->count[STATIC_SHORT]) * BytesPerShort);
3921 
3922   int nonstatic_fields_start  = instanceOopDesc::base_offset_in_bytes() +
3923                                 nonstatic_field_size * heapOopSize;
3924 
3925   // First field of value types is aligned on a long boundary in order to ease
3926   // in-lining of value types (with header removal) in packed arrays and


3970 
3971   nonstatic_value_type_indexes = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, int,
3972                                                               max_nonstatic_value_type);
3973   for (int i = 0; i < max_nonstatic_value_type; i++) {
3974     nonstatic_value_type_indexes[i] = -1;
3975   }
3976   nonstatic_value_type_klasses = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, Klass*,
3977                                                               max_nonstatic_value_type);
3978 
3979   for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
3980     if (fs.allocation_type() == STATIC_VALUETYPE) {
3981       static_value_type_count++;
3982     } else if (fs.allocation_type() == NONSTATIC_VALUETYPE) {
3983       Symbol* signature = fs.signature();
3984       Klass* klass = SystemDictionary::resolve_or_fail(signature,
3985                                                        Handle(THREAD, _loader_data->class_loader()),
3986                                                        _protection_domain, true, CHECK);
3987       assert(klass != NULL, "Sanity check");
3988       assert(klass->access_flags().is_value_type(), "Value type expected");
3989       ValueKlass* vk = ValueKlass::cast(klass);
3990       // Conditions to apply flattening or not should be defined
3991       //in a single place
3992       if (vk->size_helper() <= ValueArrayElemMaxFlatSize) {
3993         nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index();
3994         nonstatic_value_type_klasses[nonstatic_value_type_count] = klass;
3995         nonstatic_value_type_count++;
3996 
3997         ValueKlass* vklass = ValueKlass::cast(klass);
3998         if (vklass->contains_oops()) {
3999           value_type_oop_map_count += vklass->nonstatic_oop_map_count();
4000         }
4001         fs.set_flattening(true);
4002       } else {
4003         not_flattened_value_types++;
4004         fs.set_flattening(false);
4005       }
4006     }
4007   }
4008 
4009   // Adjusting non_static_oop_count to take into account not flattened value types;
4010   nonstatic_oop_count += not_flattened_value_types;
4011 
4012   // Total non-static fields count, including every contended field




3886 
3887   // Count the contended fields by type.
3888   //
3889   // We ignore static fields, because @Contended is not supported for them.
3890   // The layout code below will also ignore the static fields.
3891   int nonstatic_contended_count = 0;
3892   FieldAllocationCount fac_contended;
3893   for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3894     FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
3895     if (fs.is_contended()) {
3896       fac_contended.count[atype]++;
3897       if (!fs.access_flags().is_static()) {
3898         nonstatic_contended_count++;
3899       }
3900     }
3901   }
3902 
3903 
3904   // Calculate the starting byte offsets
3905   int next_static_oop_offset    = InstanceMirrorKlass::offset_of_static_fields();
3906   // Value types in static fields are not embedded, they are handled with oops
3907   int next_static_double_offset = next_static_oop_offset +
3908                                   ((fac->count[STATIC_OOP] + fac->count[STATIC_VALUETYPE]) * heapOopSize);
3909   if ( fac->count[STATIC_DOUBLE] &&
3910        (Universe::field_type_should_be_aligned(T_DOUBLE) ||
3911         Universe::field_type_should_be_aligned(T_LONG)) ) {
3912     next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
3913   }
3914 
3915   int next_static_word_offset   = next_static_double_offset +
3916                                     ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
3917   int next_static_short_offset  = next_static_word_offset +
3918                                     ((fac->count[STATIC_WORD]) * BytesPerInt);
3919   int next_static_byte_offset   = next_static_short_offset +
3920                                   ((fac->count[STATIC_SHORT]) * BytesPerShort);
3921 
3922   int nonstatic_fields_start  = instanceOopDesc::base_offset_in_bytes() +
3923                                 nonstatic_field_size * heapOopSize;
3924 
3925   // First field of value types is aligned on a long boundary in order to ease
3926   // in-lining of value types (with header removal) in packed arrays and


3970 
3971   nonstatic_value_type_indexes = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, int,
3972                                                               max_nonstatic_value_type);
3973   for (int i = 0; i < max_nonstatic_value_type; i++) {
3974     nonstatic_value_type_indexes[i] = -1;
3975   }
3976   nonstatic_value_type_klasses = NEW_RESOURCE_ARRAY_IN_THREAD(THREAD, Klass*,
3977                                                               max_nonstatic_value_type);
3978 
3979   for (AllFieldStream fs(_fields, _cp); !fs.done(); fs.next()) {
3980     if (fs.allocation_type() == STATIC_VALUETYPE) {
3981       static_value_type_count++;
3982     } else if (fs.allocation_type() == NONSTATIC_VALUETYPE) {
3983       Symbol* signature = fs.signature();
3984       Klass* klass = SystemDictionary::resolve_or_fail(signature,
3985                                                        Handle(THREAD, _loader_data->class_loader()),
3986                                                        _protection_domain, true, CHECK);
3987       assert(klass != NULL, "Sanity check");
3988       assert(klass->access_flags().is_value_type(), "Value type expected");
3989       ValueKlass* vk = ValueKlass::cast(klass);
3990       // Conditions to apply flattening or not should be defined in a single place
3991       if ((ValueFieldMaxFlatSize < 0) || vk->size_helper() <= ValueFieldMaxFlatSize) {

3992         nonstatic_value_type_indexes[nonstatic_value_type_count] = fs.index();
3993         nonstatic_value_type_klasses[nonstatic_value_type_count] = klass;
3994         nonstatic_value_type_count++;
3995 
3996         ValueKlass* vklass = ValueKlass::cast(klass);
3997         if (vklass->contains_oops()) {
3998           value_type_oop_map_count += vklass->nonstatic_oop_map_count();
3999         }
4000         fs.set_flattening(true);
4001       } else {
4002         not_flattened_value_types++;
4003         fs.set_flattening(false);
4004       }
4005     }
4006   }
4007 
4008   // Adjusting non_static_oop_count to take into account not flattened value types;
4009   nonstatic_oop_count += not_flattened_value_types;
4010 
4011   // Total non-static fields count, including every contended field


< prev index next >