3697 int nonstatic_contended_count = 0;
3698 FieldAllocationCount fac_contended;
3699 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3700 FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
3701 if (fs.is_contended()) {
3702 fac_contended.count[atype]++;
3703 if (!fs.access_flags().is_static()) {
3704 nonstatic_contended_count++;
3705 }
3706 }
3707 }
3708
3709
3710 // Calculate the starting byte offsets
3711 int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
3712 int next_static_double_offset = next_static_oop_offset +
3713 ((fac->count[STATIC_OOP]) * heapOopSize);
3714 if ( fac->count[STATIC_DOUBLE] &&
3715 (Universe::field_type_should_be_aligned(T_DOUBLE) ||
3716 Universe::field_type_should_be_aligned(T_LONG)) ) {
3717 next_static_double_offset = align_size_up(next_static_double_offset, BytesPerLong);
3718 }
3719
3720 int next_static_word_offset = next_static_double_offset +
3721 ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
3722 int next_static_short_offset = next_static_word_offset +
3723 ((fac->count[STATIC_WORD]) * BytesPerInt);
3724 int next_static_byte_offset = next_static_short_offset +
3725 ((fac->count[STATIC_SHORT]) * BytesPerShort);
3726
3727 int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() +
3728 nonstatic_field_size * heapOopSize;
3729
3730 int next_nonstatic_field_offset = nonstatic_fields_start;
3731
3732 const bool is_contended_class = parsed_annotations->is_contended();
3733
3734 // Class is contended, pad before all the fields
3735 if (is_contended_class) {
3736 next_nonstatic_field_offset += ContendedPaddingWidth;
3737 }
3839 allocation_style = 1; // allocate oops last
3840 next_nonstatic_double_offset = next_nonstatic_field_offset;
3841 }
3842 } else {
3843 ShouldNotReachHere();
3844 }
3845
3846 int nonstatic_oop_space_count = 0;
3847 int nonstatic_word_space_count = 0;
3848 int nonstatic_short_space_count = 0;
3849 int nonstatic_byte_space_count = 0;
3850 int nonstatic_oop_space_offset = 0;
3851 int nonstatic_word_space_offset = 0;
3852 int nonstatic_short_space_offset = 0;
3853 int nonstatic_byte_space_offset = 0;
3854
3855 // Try to squeeze some of the fields into the gaps due to
3856 // long/double alignment.
3857 if (nonstatic_double_count > 0) {
3858 int offset = next_nonstatic_double_offset;
3859 next_nonstatic_double_offset = align_size_up(offset, BytesPerLong);
3860 if (compact_fields && offset != next_nonstatic_double_offset) {
3861 // Allocate available fields into the gap before double field.
3862 int length = next_nonstatic_double_offset - offset;
3863 assert(length == BytesPerInt, "");
3864 nonstatic_word_space_offset = offset;
3865 if (nonstatic_word_count > 0) {
3866 nonstatic_word_count -= 1;
3867 nonstatic_word_space_count = 1; // Only one will fit
3868 length -= BytesPerInt;
3869 offset += BytesPerInt;
3870 }
3871 nonstatic_short_space_offset = offset;
3872 while (length >= BytesPerShort && nonstatic_short_count > 0) {
3873 nonstatic_short_count -= 1;
3874 nonstatic_short_space_count += 1;
3875 length -= BytesPerShort;
3876 offset += BytesPerShort;
3877 }
3878 nonstatic_byte_space_offset = offset;
3879 while (length > 0 && nonstatic_byte_count > 0) {
3889 nonstatic_oop_space_count = 1; // Only one will fit
3890 length -= heapOopSize;
3891 offset += heapOopSize;
3892 }
3893 }
3894 }
3895
3896 int next_nonstatic_word_offset = next_nonstatic_double_offset +
3897 (nonstatic_double_count * BytesPerLong);
3898 int next_nonstatic_short_offset = next_nonstatic_word_offset +
3899 (nonstatic_word_count * BytesPerInt);
3900 int next_nonstatic_byte_offset = next_nonstatic_short_offset +
3901 (nonstatic_short_count * BytesPerShort);
3902 int next_nonstatic_padded_offset = next_nonstatic_byte_offset +
3903 nonstatic_byte_count;
3904
3905 // let oops jump before padding with this allocation style
3906 if( allocation_style == 1 ) {
3907 next_nonstatic_oop_offset = next_nonstatic_padded_offset;
3908 if( nonstatic_oop_count > 0 ) {
3909 next_nonstatic_oop_offset = align_size_up(next_nonstatic_oop_offset, heapOopSize);
3910 }
3911 next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
3912 }
3913
3914 // Iterate over fields again and compute correct offsets.
3915 // The field allocation type was temporarily stored in the offset slot.
3916 // oop fields are located before non-oop fields (static and non-static).
3917 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3918
3919 // skip already laid out fields
3920 if (fs.is_offset_set()) continue;
3921
3922 // contended instance fields are handled below
3923 if (fs.is_contended() && !fs.access_flags().is_static()) continue;
3924
3925 int real_offset = 0;
3926 const FieldAllocationType atype = (const FieldAllocationType) fs.allocation_type();
3927
3928 // pack the rest of the fields
3929 switch (atype) {
4044
4045 int current_group = -1;
4046 while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
4047
4048 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
4049
4050 // skip already laid out fields
4051 if (fs.is_offset_set()) continue;
4052
4053 // skip non-contended fields and fields from different group
4054 if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
4055
4056 // handle statics below
4057 if (fs.access_flags().is_static()) continue;
4058
4059 int real_offset = 0;
4060 FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
4061
4062 switch (atype) {
4063 case NONSTATIC_BYTE:
4064 next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, 1);
4065 real_offset = next_nonstatic_padded_offset;
4066 next_nonstatic_padded_offset += 1;
4067 break;
4068
4069 case NONSTATIC_SHORT:
4070 next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerShort);
4071 real_offset = next_nonstatic_padded_offset;
4072 next_nonstatic_padded_offset += BytesPerShort;
4073 break;
4074
4075 case NONSTATIC_WORD:
4076 next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerInt);
4077 real_offset = next_nonstatic_padded_offset;
4078 next_nonstatic_padded_offset += BytesPerInt;
4079 break;
4080
4081 case NONSTATIC_DOUBLE:
4082 next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, BytesPerLong);
4083 real_offset = next_nonstatic_padded_offset;
4084 next_nonstatic_padded_offset += BytesPerLong;
4085 break;
4086
4087 case NONSTATIC_OOP:
4088 next_nonstatic_padded_offset = align_size_up(next_nonstatic_padded_offset, heapOopSize);
4089 real_offset = next_nonstatic_padded_offset;
4090 next_nonstatic_padded_offset += heapOopSize;
4091
4092 // Record this oop in the oop maps
4093 if( nonstatic_oop_map_count > 0 &&
4094 nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
4095 real_offset -
4096 int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
4097 heapOopSize ) {
4098 // This oop is adjacent to the previous one, add to current oop map
4099 assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
4100 nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
4101 } else {
4102 // This oop is not adjacent to the previous one, create new oop map
4103 assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
4104 nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
4105 nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
4106 nonstatic_oop_map_count += 1;
4107 if( first_nonstatic_oop_offset == 0 ) { // Undefined
4108 first_nonstatic_oop_offset = real_offset;
4130 // this is expected to alleviate memory contention effects for
4131 // subclass fields and/or adjacent object.
4132 // If this was the default group, the padding is already in place.
4133 if (current_group != 0) {
4134 next_nonstatic_padded_offset += ContendedPaddingWidth;
4135 }
4136 }
4137
4138 // handle static fields
4139 }
4140
4141 // Entire class is contended, pad in the back.
4142 // This helps to alleviate memory contention effects for subclass fields
4143 // and/or adjacent object.
4144 if (is_contended_class) {
4145 next_nonstatic_padded_offset += ContendedPaddingWidth;
4146 }
4147
4148 int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
4149
4150 int nonstatic_fields_end = align_size_up(notaligned_nonstatic_fields_end, heapOopSize);
4151 int instance_end = align_size_up(notaligned_nonstatic_fields_end, wordSize);
4152 int static_fields_end = align_size_up(next_static_byte_offset, wordSize);
4153
4154 int static_field_size = (static_fields_end -
4155 InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
4156 nonstatic_field_size = nonstatic_field_size +
4157 (nonstatic_fields_end - nonstatic_fields_start) / heapOopSize;
4158
4159 int instance_size = align_object_size(instance_end / wordSize);
4160
4161 assert(instance_size == align_object_size(align_size_up(
4162 (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
4163 wordSize) / wordSize), "consistent layout helper value");
4164
4165 // Invariant: nonstatic_field end/start should only change if there are
4166 // nonstatic fields in the class, or if the class is contended. We compare
4167 // against the non-aligned value, so that end alignment will not fail the
4168 // assert without actually having the fields.
4169 assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
4170 is_contended_class ||
4171 (nonstatic_fields_count > 0), "double-check nonstatic start/end");
4172
4173 // Number of non-static oop map blocks allocated at end of klass.
4174 const unsigned int total_oop_map_count =
4175 compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
4176 first_nonstatic_oop_offset);
4177
4178 #ifndef PRODUCT
4179 if (PrintFieldLayout) {
4180 print_field_layout(_class_name,
4181 _fields,
|
3697 int nonstatic_contended_count = 0;
3698 FieldAllocationCount fac_contended;
3699 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3700 FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
3701 if (fs.is_contended()) {
3702 fac_contended.count[atype]++;
3703 if (!fs.access_flags().is_static()) {
3704 nonstatic_contended_count++;
3705 }
3706 }
3707 }
3708
3709
3710 // Calculate the starting byte offsets
3711 int next_static_oop_offset = InstanceMirrorKlass::offset_of_static_fields();
3712 int next_static_double_offset = next_static_oop_offset +
3713 ((fac->count[STATIC_OOP]) * heapOopSize);
3714 if ( fac->count[STATIC_DOUBLE] &&
3715 (Universe::field_type_should_be_aligned(T_DOUBLE) ||
3716 Universe::field_type_should_be_aligned(T_LONG)) ) {
3717 next_static_double_offset = align_up(next_static_double_offset, BytesPerLong);
3718 }
3719
3720 int next_static_word_offset = next_static_double_offset +
3721 ((fac->count[STATIC_DOUBLE]) * BytesPerLong);
3722 int next_static_short_offset = next_static_word_offset +
3723 ((fac->count[STATIC_WORD]) * BytesPerInt);
3724 int next_static_byte_offset = next_static_short_offset +
3725 ((fac->count[STATIC_SHORT]) * BytesPerShort);
3726
3727 int nonstatic_fields_start = instanceOopDesc::base_offset_in_bytes() +
3728 nonstatic_field_size * heapOopSize;
3729
3730 int next_nonstatic_field_offset = nonstatic_fields_start;
3731
3732 const bool is_contended_class = parsed_annotations->is_contended();
3733
3734 // Class is contended, pad before all the fields
3735 if (is_contended_class) {
3736 next_nonstatic_field_offset += ContendedPaddingWidth;
3737 }
3839 allocation_style = 1; // allocate oops last
3840 next_nonstatic_double_offset = next_nonstatic_field_offset;
3841 }
3842 } else {
3843 ShouldNotReachHere();
3844 }
3845
3846 int nonstatic_oop_space_count = 0;
3847 int nonstatic_word_space_count = 0;
3848 int nonstatic_short_space_count = 0;
3849 int nonstatic_byte_space_count = 0;
3850 int nonstatic_oop_space_offset = 0;
3851 int nonstatic_word_space_offset = 0;
3852 int nonstatic_short_space_offset = 0;
3853 int nonstatic_byte_space_offset = 0;
3854
3855 // Try to squeeze some of the fields into the gaps due to
3856 // long/double alignment.
3857 if (nonstatic_double_count > 0) {
3858 int offset = next_nonstatic_double_offset;
3859 next_nonstatic_double_offset = align_up(offset, BytesPerLong);
3860 if (compact_fields && offset != next_nonstatic_double_offset) {
3861 // Allocate available fields into the gap before double field.
3862 int length = next_nonstatic_double_offset - offset;
3863 assert(length == BytesPerInt, "");
3864 nonstatic_word_space_offset = offset;
3865 if (nonstatic_word_count > 0) {
3866 nonstatic_word_count -= 1;
3867 nonstatic_word_space_count = 1; // Only one will fit
3868 length -= BytesPerInt;
3869 offset += BytesPerInt;
3870 }
3871 nonstatic_short_space_offset = offset;
3872 while (length >= BytesPerShort && nonstatic_short_count > 0) {
3873 nonstatic_short_count -= 1;
3874 nonstatic_short_space_count += 1;
3875 length -= BytesPerShort;
3876 offset += BytesPerShort;
3877 }
3878 nonstatic_byte_space_offset = offset;
3879 while (length > 0 && nonstatic_byte_count > 0) {
3889 nonstatic_oop_space_count = 1; // Only one will fit
3890 length -= heapOopSize;
3891 offset += heapOopSize;
3892 }
3893 }
3894 }
3895
3896 int next_nonstatic_word_offset = next_nonstatic_double_offset +
3897 (nonstatic_double_count * BytesPerLong);
3898 int next_nonstatic_short_offset = next_nonstatic_word_offset +
3899 (nonstatic_word_count * BytesPerInt);
3900 int next_nonstatic_byte_offset = next_nonstatic_short_offset +
3901 (nonstatic_short_count * BytesPerShort);
3902 int next_nonstatic_padded_offset = next_nonstatic_byte_offset +
3903 nonstatic_byte_count;
3904
3905 // let oops jump before padding with this allocation style
3906 if( allocation_style == 1 ) {
3907 next_nonstatic_oop_offset = next_nonstatic_padded_offset;
3908 if( nonstatic_oop_count > 0 ) {
3909 next_nonstatic_oop_offset = align_up(next_nonstatic_oop_offset, heapOopSize);
3910 }
3911 next_nonstatic_padded_offset = next_nonstatic_oop_offset + (nonstatic_oop_count * heapOopSize);
3912 }
3913
3914 // Iterate over fields again and compute correct offsets.
3915 // The field allocation type was temporarily stored in the offset slot.
3916 // oop fields are located before non-oop fields (static and non-static).
3917 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
3918
3919 // skip already laid out fields
3920 if (fs.is_offset_set()) continue;
3921
3922 // contended instance fields are handled below
3923 if (fs.is_contended() && !fs.access_flags().is_static()) continue;
3924
3925 int real_offset = 0;
3926 const FieldAllocationType atype = (const FieldAllocationType) fs.allocation_type();
3927
3928 // pack the rest of the fields
3929 switch (atype) {
4044
4045 int current_group = -1;
4046 while ((current_group = (int)bm.get_next_one_offset(current_group + 1)) != (int)bm.size()) {
4047
4048 for (AllFieldStream fs(_fields, cp); !fs.done(); fs.next()) {
4049
4050 // skip already laid out fields
4051 if (fs.is_offset_set()) continue;
4052
4053 // skip non-contended fields and fields from different group
4054 if (!fs.is_contended() || (fs.contended_group() != current_group)) continue;
4055
4056 // handle statics below
4057 if (fs.access_flags().is_static()) continue;
4058
4059 int real_offset = 0;
4060 FieldAllocationType atype = (FieldAllocationType) fs.allocation_type();
4061
4062 switch (atype) {
4063 case NONSTATIC_BYTE:
4064 next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, 1);
4065 real_offset = next_nonstatic_padded_offset;
4066 next_nonstatic_padded_offset += 1;
4067 break;
4068
4069 case NONSTATIC_SHORT:
4070 next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerShort);
4071 real_offset = next_nonstatic_padded_offset;
4072 next_nonstatic_padded_offset += BytesPerShort;
4073 break;
4074
4075 case NONSTATIC_WORD:
4076 next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerInt);
4077 real_offset = next_nonstatic_padded_offset;
4078 next_nonstatic_padded_offset += BytesPerInt;
4079 break;
4080
4081 case NONSTATIC_DOUBLE:
4082 next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, BytesPerLong);
4083 real_offset = next_nonstatic_padded_offset;
4084 next_nonstatic_padded_offset += BytesPerLong;
4085 break;
4086
4087 case NONSTATIC_OOP:
4088 next_nonstatic_padded_offset = align_up(next_nonstatic_padded_offset, heapOopSize);
4089 real_offset = next_nonstatic_padded_offset;
4090 next_nonstatic_padded_offset += heapOopSize;
4091
4092 // Record this oop in the oop maps
4093 if( nonstatic_oop_map_count > 0 &&
4094 nonstatic_oop_offsets[nonstatic_oop_map_count - 1] ==
4095 real_offset -
4096 int(nonstatic_oop_counts[nonstatic_oop_map_count - 1]) *
4097 heapOopSize ) {
4098 // This oop is adjacent to the previous one, add to current oop map
4099 assert(nonstatic_oop_map_count - 1 < max_nonstatic_oop_maps, "range check");
4100 nonstatic_oop_counts[nonstatic_oop_map_count - 1] += 1;
4101 } else {
4102 // This oop is not adjacent to the previous one, create new oop map
4103 assert(nonstatic_oop_map_count < max_nonstatic_oop_maps, "range check");
4104 nonstatic_oop_offsets[nonstatic_oop_map_count] = real_offset;
4105 nonstatic_oop_counts [nonstatic_oop_map_count] = 1;
4106 nonstatic_oop_map_count += 1;
4107 if( first_nonstatic_oop_offset == 0 ) { // Undefined
4108 first_nonstatic_oop_offset = real_offset;
4130 // this is expected to alleviate memory contention effects for
4131 // subclass fields and/or adjacent object.
4132 // If this was the default group, the padding is already in place.
4133 if (current_group != 0) {
4134 next_nonstatic_padded_offset += ContendedPaddingWidth;
4135 }
4136 }
4137
4138 // handle static fields
4139 }
4140
4141 // Entire class is contended, pad in the back.
4142 // This helps to alleviate memory contention effects for subclass fields
4143 // and/or adjacent object.
4144 if (is_contended_class) {
4145 next_nonstatic_padded_offset += ContendedPaddingWidth;
4146 }
4147
4148 int notaligned_nonstatic_fields_end = next_nonstatic_padded_offset;
4149
4150 int nonstatic_fields_end = align_up(notaligned_nonstatic_fields_end, heapOopSize);
4151 int instance_end = align_up(notaligned_nonstatic_fields_end, wordSize);
4152 int static_fields_end = align_up(next_static_byte_offset, wordSize);
4153
4154 int static_field_size = (static_fields_end -
4155 InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
4156 nonstatic_field_size = nonstatic_field_size +
4157 (nonstatic_fields_end - nonstatic_fields_start) / heapOopSize;
4158
4159 int instance_size = align_object_size(instance_end / wordSize);
4160
4161 assert(instance_size == align_object_size(align_up(
4162 (instanceOopDesc::base_offset_in_bytes() + nonstatic_field_size*heapOopSize),
4163 wordSize) / wordSize), "consistent layout helper value");
4164
4165 // Invariant: nonstatic_field end/start should only change if there are
4166 // nonstatic fields in the class, or if the class is contended. We compare
4167 // against the non-aligned value, so that end alignment will not fail the
4168 // assert without actually having the fields.
4169 assert((notaligned_nonstatic_fields_end == nonstatic_fields_start) ||
4170 is_contended_class ||
4171 (nonstatic_fields_count > 0), "double-check nonstatic start/end");
4172
4173 // Number of non-static oop map blocks allocated at end of klass.
4174 const unsigned int total_oop_map_count =
4175 compute_oop_map_count(_super_klass, nonstatic_oop_map_count,
4176 first_nonstatic_oop_offset);
4177
4178 #ifndef PRODUCT
4179 if (PrintFieldLayout) {
4180 print_field_layout(_class_name,
4181 _fields,
|