1 /*
   2  * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classFileParser.hpp"
  28 #include "classfile/fieldLayoutBuilder.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/array.hpp"
  31 #include "oops/instanceMirrorKlass.hpp"
  32 #include "oops/valueKlass.hpp"
  33 #include "runtime/fieldDescriptor.inline.hpp"
  34 
  35 RawBlock::RawBlock(Kind kind, int size) {
  36   assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
  37       "Otherwise, should use the constructor with a field index argument");
  38   assert(size > 0, "Sanity check");
  39   _next_field = NULL;
  40   _prev_field = NULL;
  41   _next_block = NULL;
  42   _prev_block = NULL;
  43   _field_index = -1; // no field
  44     _kind = kind;
  45   _size = size;
  46   _alignment = 1;
  47   _offset = -1;
  48   _is_reference = false;
  49   _value_klass = NULL;
  50 }
  51 
  52 RawBlock::RawBlock(int index, Kind kind, int size, int alignment, bool is_reference) {
  53   assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED,
  54       "Other kind do not have a field index");
  55   assert(size > 0, "Sanity check");
  56   assert(alignment > 0, "Sanity check");
  57   _next_field = NULL;
  58   _prev_field = NULL;
  59   _next_block = NULL;
  60   _prev_block = NULL;
  61   _field_index = index;
  62   _kind = kind;
  63   _size = size;
  64   _alignment = alignment;
  65   _offset = -1;
  66   _is_reference = is_reference;
  67   _value_klass = NULL;
  68 }
  69 
  70 bool RawBlock::fit(int size, int alignment) {
  71   int adjustment = _offset % alignment;
  72   return _size >= size + adjustment;
  73 }
  74 
  75 FieldGroup::FieldGroup(int contended_group) {
  76   _next = NULL;
  77   _primitive_fields = NULL;
  78   _oop_fields = NULL;
  79   _flattened_fields = NULL;
  80   _contended_group = contended_group; // -1 means no contended group, 0 means default contended group
  81   _oop_count = 0;
  82 }
  83 
  84 void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
  85   int size = type2aelembytes(type);
  86   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
  87   add_block(&_primitive_fields, block);
  88 }
  89 
  90 void FieldGroup::add_oop_field(AllFieldStream fs) {
  91   int size = type2aelembytes(T_OBJECT);
  92   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for oops */, true);
  93   add_block(&_oop_fields, block);
  94   _oop_count++;
  95 }
  96 
  97 void FieldGroup::add_flattened_field(AllFieldStream fs, ValueKlass* vk) {
  98   // _flattened_fields list might be merged with the _primitive_fields list in the future
  99   RawBlock* block = new RawBlock(fs.index(), RawBlock::FLATTENED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
 100   block->set_value_klass(vk);
 101   add_block(&_flattened_fields, block);
 102 }
 103 
 104 /* Adds a field to a field group. Inside a field group, fields are sorted by
 105  * decreasing sizes. Fields with the same size are sorted according to their
 106  * order of insertion (easy hack to respect field order for classes with
 107  * hard coded offsets).
 108  */
 109 void FieldGroup::add_block(RawBlock** list, RawBlock* block) {
 110   if (*list == NULL) {
 111     *list = block;
 112   } else {
 113     if (block->size() > (*list)->size()) {  // cannot be >= to respect order of field (for classes with hard coded offsets)
 114       block->set_next_field(*list);
 115       (*list)->set_prev_field(block);
 116       *list = block;
 117     } else {
 118       RawBlock* b = *list;
 119       while (b->next_field() != NULL) {
 120         if (b->next_field()->size() < block->size()) {
 121           break;
 122         }
 123         b = b->next_field();
 124       }
 125       block->set_next_field(b->next_field());
 126       block->set_prev_field(b);
 127       b->set_next_field(block);
 128       if (b->next_field() != NULL) {
 129         b->next_field()->set_prev_field(block);
 130       }
 131     }
 132   }
 133 }
 134 
 135 FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) {
 136   _fields = fields;
 137   _cp = cp;
 138   _blocks = NULL;
 139   _start = _blocks;
 140   _last = _blocks;
 141 }
 142 
 143 void FieldLayout::initialize_static_layout() {
 144   _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 145   _blocks->set_offset(0);
 146   _last = _blocks;
 147   _start = _blocks;
 148   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
 149   // during bootstrapping, the size of the java.lang.Class is still not known when layout
 150   // of static field is computed. Field offsets are fixed later when the size is known
 151   // (see java_lang_Class::fixup_mirror())
 152   if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
 153     insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
 154     _blocks->set_offset(0);
 155   }
 156 }
 157 
 158 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klasss) {
 159   if (super_klasss == NULL) {
 160     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 161     _blocks->set_offset(0);
 162     _last = _blocks;
 163     _start = _blocks;
 164     insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 165   } else {
 166     // The JVM could reconstruct the layouts of the super classes, in order to use the
 167     // empty slots in these layouts to allocate current class' fields. However, some codes
 168     // in the JVM are not ready yet to find fields allocated this way, so the optimization
 169     // is not enabled yet.
 170 #if 0
 171     reconstruct_layout(super_klasss);
 172     fill_holes(super_klasss);
 173     // _start = _last;  // uncomment to fill holes in super classes layouts
 174 #else
 175     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 176     _blocks->set_offset(0);
 177     _last = _blocks;
 178     insert(_last, new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 179     if (super_klasss->nonstatic_field_size() > 0) {
 180       // To take into account the space allocated to super classes' fields, this code
 181       // uses the nonstatic_field_size() value to allocate a single INHERITED RawBlock.
 182       // The drawback is that nonstatic_field_size() expresses the size of non-static
 183       // fields in heapOopSize, which implies that some space could be lost at the
 184       // end because of the rounding up of the real size. Using the exact size, with
 185       // no rounding up, would be possible, but would require modifications to other
 186       // codes in the JVM performing fields lookup (as they often expect this rounding
 187       // to be applied).
 188       RawBlock* inherited = new RawBlock(RawBlock::INHERITED,
 189           super_klasss->nonstatic_field_size() * heapOopSize);
 190       insert(_last, inherited);
 191     }
 192     _start = _last;
 193 #endif
 194   }
 195 }
 196 
 197 RawBlock* FieldLayout::first_field_block() {
 198   RawBlock* block = _start;
 199   // Not sure the condition below will work well when inheriting layout with contented padding
 200   while (block->kind() != RawBlock::INHERITED && block->kind() != RawBlock::REGULAR
 201       && block->kind() != RawBlock::FLATTENED && block->kind() != RawBlock::PADDING) {
 202     block = block->next_block();
 203   }
 204   return block;
 205 }
 206 
 207 /* The allocation logic uses a first fit strategy: the field is allocated in the
 208  * first empty slot big enough to contain it (including padding to fit alignment
 209  * constraints).
 210  */
 211 void FieldLayout::add(RawBlock* blocks, RawBlock* start) {
 212   if (start == NULL) {
 213     // start = this->_blocks;
 214     start = this->_start;
 215   }
 216   RawBlock* b = blocks;
 217   RawBlock* candidate = NULL;
 218   while (b != NULL) {
 219     RawBlock* candidate = start;
 220     while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(b->size(), b->alignment())) candidate = candidate->next_block();
 221     assert(candidate != NULL && candidate->fit(b->size(), b->alignment()), "paranoid check");
 222     insert_field_block(candidate, b);
 223     b = b->next_field();
 224   }
 225 }
 226 
 227 /* The allocation logic uses a first fit strategy: the set of fields is allocated
 228  * in the first empty slot big enough to contain the whole set ((including padding
 229  * to fit alignment constraints).
 230  */
 231 void FieldLayout::add_contiguously(RawBlock* blocks, RawBlock* start) {
 232   if (blocks == NULL) return;
 233   if (start == NULL) {
 234     start = _start;
 235   }
 236   // This code assumes that if the first block is well aligned, the following
 237   // blocks would naturally be well aligned (no need for adjustment)
 238   int size = 0;
 239   RawBlock* b = blocks;
 240   while (b != NULL) {
 241     size += b->size();
 242     b = b->next_field();
 243   }
 244   RawBlock* candidate = start;
 245   while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(size, blocks->alignment())) candidate = candidate->next_block();
 246   b = blocks;
 247   while (b != NULL) {
 248     insert_field_block(candidate, b);
 249     b = b->next_field();
 250     assert(b == NULL || (candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
 251   }
 252 }
 253 
 254 RawBlock* FieldLayout::insert_field_block(RawBlock* slot, RawBlock* block) {
 255   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 256   if (slot->offset() % block->alignment() != 0) {
 257     int adjustment = block->alignment() - (slot->offset() % block->alignment());
 258     RawBlock* adj = new RawBlock(RawBlock::EMPTY, adjustment);
 259     insert(slot, adj);
 260   }
 261   insert(slot, block);
 262   if (slot->size() == 0) {
 263     remove(slot);
 264   }
 265   if (UseNewLayout) {
 266     FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
 267   }
 268   return block;
 269 }
 270 
 271 void FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
 272   // TODO: it makes no sense to support static fields, static fields go to
 273   // the mirror, and are not impacted by static fields of the parent class
 274   if (ik->super() != NULL) {
 275     reconstruct_layout(InstanceKlass::cast(ik->super()));
 276   } else {
 277     _blocks = new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
 278     _blocks->set_offset(0);
 279     _last = _blocks;
 280     _start = _blocks;
 281   }
 282   for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
 283     BasicType type = vmSymbols::signature_type(fs.signature());
 284     // distinction between static and non-static fields is missing
 285     if (fs.access_flags().is_static()) continue;
 286     ik->fields_annotations();
 287     if (type != T_VALUETYPE) {
 288       int size = type2aelembytes(type);
 289       // INHERITED blocs are marked as non-reference because oop_maps are handled by their holder class
 290       RawBlock* block = new RawBlock(fs.index(), RawBlock::INHERITED, size, size, false);
 291       block->set_offset(fs.offset());
 292       insert_per_offset(block);
 293     } else {
 294       fatal("Not supported yet");
 295     }
 296   }
 297 }
 298 
 299 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
 300   assert(_blocks != NULL, "Sanity check");
 301   assert(_blocks->offset() == 0, "first block must be at offset zero");
 302   RawBlock* b = _blocks;
 303   while (b->next_block() != NULL) {
 304     if (b->next_block()->offset() > (b->offset() + b->size())) {
 305       int size = b->next_block()->offset() - (b->offset() + b->size());
 306       RawBlock* empty = new RawBlock(RawBlock::EMPTY, size);
 307       empty->set_offset(b->offset() + b->size());
 308       empty->set_next_block(b->next_block());
 309       b->next_block()->set_prev_block(empty);
 310       b->set_next_block(empty);
 311       empty->set_prev_block(b);
 312     }
 313     b = b->next_block();
 314   }
 315   assert(b->next_block() == NULL, "Invariant at this point");
 316   if (b->kind() != RawBlock::EMPTY) {
 317     RawBlock* last = new RawBlock(RawBlock::EMPTY, INT_MAX);
 318     last->set_offset(b->offset() + b->size());
 319     assert(last->offset() > 0, "Sanity check");
 320     b->set_next_block(last);
 321     last->set_prev_block(b);
 322     _last = last;
 323   }
 324   // Still doing the padding to have a size that can be expressed in heapOopSize
 325   int super_end = instanceOopDesc::base_offset_in_bytes() + super_klass->nonstatic_field_size() * heapOopSize;
 326   if (_last->offset() < super_end) {
 327     RawBlock* padding = new RawBlock(RawBlock::PADDING, super_end - _last->offset());
 328     insert(_last, padding);
 329   }
 330 }
 331 
 332 RawBlock* FieldLayout::insert(RawBlock* slot, RawBlock* block) {
 333   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 334   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
 335   block->set_offset(slot->offset());
 336   slot->set_offset(slot->offset() + block->size());
 337   slot->set_size(slot->size() - block->size());
 338   block->set_prev_block(slot->prev_block());
 339   block->set_next_block(slot);
 340   slot->set_prev_block(block);
 341   if (block->prev_block() != NULL) {       // suspicious test
 342     block->prev_block()->set_next_block(block);
 343   }
 344   if (_blocks == slot) {
 345     _blocks = block;
 346   }
 347   if (_start == slot) {
 348     _start = block;
 349   }
 350   return block;
 351 }
 352 
 353 void FieldLayout::insert_per_offset(RawBlock* block) {
 354   if (_blocks == NULL) {
 355     _blocks = block;
 356   } else if (_blocks->offset() > block->offset()) {
 357     block->set_next_block(_blocks);
 358     _blocks->set_prev_block(block);
 359     _blocks = block;
 360   } else {
 361     RawBlock* b = _blocks;
 362     while (b->next_block() != NULL && b->next_block()->offset() < block->offset()) b = b->next_block();
 363     if (b->next_block() == NULL) {
 364       b->set_next_block(block);
 365       block->set_prev_block(b);
 366     } else {
 367       assert(b->next_block()->offset() >= block->offset(), "Sanity check");
 368       assert(b->next_block()->offset() > block->offset() || b->next_block()->kind() == RawBlock::EMPTY, "Sanity check");
 369       block->set_next_block(b->next_block());
 370       b->next_block()->set_prev_block(block);
 371       block->set_prev_block(b);
 372       b->set_next_block(block);
 373     }
 374   }
 375 }
 376 
 377 void FieldLayout::remove(RawBlock* block) {
 378   assert(block != NULL, "Sanity check");
 379   assert(block != _last, "Sanity check");
 380   if (_blocks == block) {
 381     _blocks = block->next_block();
 382     if (_blocks != NULL) {
 383       _blocks->set_prev_block(NULL);
 384     }
 385   } else {
 386     assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
 387     block->prev_block()->set_next_block(block->next_block());
 388     block->next_block()->set_prev_block(block->prev_block());
 389   }
 390   if (block == _start) {
 391     _start = block->prev_block();
 392   }
 393 }
 394 
 395 void FieldLayout::print(outputStream* output) {
 396   ResourceMark rm;
 397   RawBlock* b = _blocks;
 398   while(b != _last) {
 399     switch(b->kind()) {
 400     case RawBlock::REGULAR: {
 401       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 402       output->print_cr("  %d %s %d %d %s %s",
 403           b->offset(),
 404           "REGULAR",
 405           b->size(),
 406           b->alignment(),
 407           fi->signature(_cp)->as_C_string(),
 408           fi->name(_cp)->as_C_string());
 409       break;
 410     }
 411     case RawBlock::FLATTENED: {
 412       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 413       output->print_cr("  %d %s %d %d %s %s",
 414           b->offset(),
 415           "FLATTENED",
 416           b->size(),
 417           b->alignment(),
 418           fi->signature(_cp)->as_C_string(),
 419           fi->name(_cp)->as_C_string());
 420       break;
 421     }
 422     case RawBlock::RESERVED:
 423       output->print_cr("  %d %s %d",
 424           b->offset(),
 425           "RESERVED",
 426           b->size());
 427       break;
 428     case RawBlock::INHERITED:
 429       output->print_cr("  %d %s %d",
 430           b->offset(),
 431           "INHERITED",
 432           b->size());
 433       break;
 434     case RawBlock::EMPTY:
 435       output->print_cr("  %d %s %d",
 436           b->offset(),
 437           "EMPTY",
 438           b->size());
 439       break;
 440     case RawBlock::PADDING:
 441       output->print_cr("  %d %s %d",
 442           b->offset(),
 443           "PADDING",
 444           b->size());
 445       break;
 446     }
 447     b = b->next_block();
 448   }
 449 }
 450 
 451 
 452 FieldLayoutBuilder::FieldLayoutBuilder(ClassFileParser* cfp, FieldLayoutInfo* info) {
 453   _cfp = cfp;
 454   _info = info;
 455   _fields = NULL;
 456   _root_group = NULL;
 457   _contended_groups = NULL;
 458   _static_fields = NULL;
 459   _layout = NULL;
 460   _static_layout = NULL;
 461   _nonstatic_oopmap_count = 0;
 462   // Inline class specific information
 463   _alignment = -1;
 464   _first_field_offset = -1;
 465   _exact_size_in_bytes = -1;
 466   _has_nonstatic_fields = false;
 467   _has_flattening_information = _cfp->is_value_type();
 468 }
 469 
 470 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 471   assert(g > 0, "must only be called for named contended groups");
 472   if (_contended_groups == NULL) {
 473     _contended_groups = new FieldGroup(g);
 474     return _contended_groups;
 475   }
 476   FieldGroup* group = _contended_groups;
 477   while(group->next() != NULL) {
 478     if (group->contended_group() == g) break;
 479     group = group->next();
 480   }
 481   if (group->contended_group() == g) return group;
 482   group->set_next(new FieldGroup(g));
 483   return group->next();
 484 }
 485 
 486 void FieldLayoutBuilder::prologue() {
 487   _layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 488   const InstanceKlass* super_klass = _cfp->_super_klass;
 489   _layout->initialize_instance_layout(super_klass);
 490   if (super_klass != NULL) {
 491     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 492   }
 493   _static_layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 494   _static_layout->initialize_static_layout();
 495   _static_fields = new FieldGroup();
 496   _root_group = new FieldGroup();
 497   _contended_groups = NULL;
 498 }
 499 
 500 /* Field sorting for regular (non-inline) classes:
 501  *   - fields are sorted in static and non-static fields
 502  *   - non-static fields are also sorted according to their contention group
 503  *     (support of the @Contended annotation)
 504  *   - @Contended annotation is ignored for static fields
 505  *   - field flattening decisions are taken in this method
 506  */
 507 void FieldLayoutBuilder::regular_field_sorting(TRAPS) {
 508   assert(!_cfp->is_value_type(), "Should only be used for non-inline classes");
 509   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 510     FieldGroup* group = NULL;
 511     if (fs.access_flags().is_static()) {
 512       group = _static_fields;
 513     } else {
 514       _has_nonstatic_fields = true;
 515       if (fs.is_contended()) {
 516         int g = fs.contended_group();
 517         if (g == 0) {
 518           // default group means the field is alone in its contended group
 519           group = new FieldGroup(true);
 520           group->set_next(_contended_groups);
 521           _contended_groups = group;
 522         } else {
 523           group = get_or_create_contended_group(g);
 524         }
 525       } else {
 526         group = _root_group;
 527       }
 528     }
 529     assert(group != NULL, "invariant");
 530     BasicType type = vmSymbols::signature_type(fs.signature());
 531     switch(type) {
 532     case T_BYTE:
 533     case T_CHAR:
 534     case T_DOUBLE:
 535     case T_FLOAT:
 536     case T_INT:
 537     case T_LONG:
 538     case T_SHORT:
 539     case T_BOOLEAN:
 540       group->add_primitive_field(fs, type);
 541       break;
 542     case T_OBJECT:
 543     case T_ARRAY:
 544       if (group != _static_fields) _nonstatic_oopmap_count++;
 545       group->add_oop_field(fs);
 546       break;
 547     case T_VALUETYPE: {
 548       if (group == _static_fields) {
 549         // static fields are never flattened
 550         group->add_oop_field(fs);
 551       } else {
 552         _has_flattening_information = true;
 553         // Flattening decision to be taken here
 554         // This code assumes all verification have been performed before
 555         // (field is a flattenable field, field's type has been loaded
 556         // and it is an inline klass
 557         Klass* klass =
 558             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 559                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 560                 _cfp->_protection_domain, true, CHECK);
 561         assert(klass != NULL, "Sanity check");
 562         ValueKlass* vk = ValueKlass::cast(klass);
 563         bool flattened = (ValueFieldMaxFlatSize < 0)
 564                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 565         if (flattened) {
 566           group->add_flattened_field(fs, vk);
 567           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 568           fs.set_flattened(true);
 569         } else {
 570           _nonstatic_oopmap_count++;
 571           group->add_oop_field(fs);
 572         }
 573       }
 574       break;
 575     }
 576     default:
 577       fatal("Something wrong?");
 578     }
 579   }
 580 }
 581 /* Field sorting for inline classes:
 582  *   - because inline classes are immutable, the @Contended annotation is ignored
 583  *     when computing their layout (with only read operation, there's no false
 584  *     sharing issue)
 585  *   - this method also records the alignment of the field with the most
 586  *     constraining alignment, this value is then used as the alignment
 587  *     constraint when flattening this inline type into another container
 588  *   - field flattening decisions are taken in this method (those decisions are
 589  *     currently only based in the size of the fields to be flattened, the size
 590  *     of the resulting instance is not considered)
 591  */
 592 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 593   assert(_cfp->is_value_type(), "Should only be used for inline classes");
 594   int alignment = 1;
 595   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 596     FieldGroup* group = NULL;
 597     int field_alignment = 1;
 598     if (fs.access_flags().is_static()) {
 599       group = _static_fields;
 600     } else {
 601       _has_nonstatic_fields = true;
 602       group = _root_group;
 603     }
 604     assert(group != NULL, "invariant");
 605     BasicType type = vmSymbols::signature_type(fs.signature());
 606     switch(type) {
 607     case T_BYTE:
 608     case T_CHAR:
 609     case T_DOUBLE:
 610     case T_FLOAT:
 611     case T_INT:
 612     case T_LONG:
 613     case T_SHORT:
 614     case T_BOOLEAN:
 615       if (group != _static_fields) {
 616         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 617       }
 618       group->add_primitive_field(fs, type);
 619       break;
 620     case T_OBJECT:
 621     case T_ARRAY:
 622       if (group != _static_fields) {
 623         _nonstatic_oopmap_count++;
 624         field_alignment = type2aelembytes(type); // alignment == size for oops
 625       }
 626       group->add_oop_field(fs);
 627       break;
 628     case T_VALUETYPE: {
 629       if (group == _static_fields) {
 630         // static fields are never flattened
 631         group->add_oop_field(fs);
 632       } else {
 633         // Flattening decision to be taken here
 634         // This code assumes all verifications have been performed before
 635         // (field is a flattenable field, field's type has been loaded
 636         // and it is an inline klass
 637         Klass* klass =
 638             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 639                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 640                 _cfp->_protection_domain, true, CHECK);
 641         assert(klass != NULL, "Sanity check");
 642         ValueKlass* vk = ValueKlass::cast(klass);
 643         bool flattened = (ValueFieldMaxFlatSize < 0)
 644                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 645         if (flattened) {
 646           group->add_flattened_field(fs, vk);
 647           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 648           field_alignment = vk->get_alignment();
 649           fs.set_flattened(true);
 650         } else {
 651           _nonstatic_oopmap_count++;
 652           field_alignment = type2aelembytes(T_OBJECT);
 653           group->add_oop_field(fs);
 654         }
 655       }
 656       break;
 657     }
 658     default:
 659       fatal("Unexpected BasicType");
 660     }
 661     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 662   }
 663   _alignment = alignment;
 664   if (_cfp->is_value_type() && (!_has_nonstatic_fields)) {
 665     // There are a number of fixes required throughout the type system and JIT
 666     _cfp->throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet");
 667     return;
 668   }
 669 }
 670 
 671 void FieldLayoutBuilder::insert_contended_padding(RawBlock* slot) {
 672   if (ContendedPaddingWidth > 0) {
 673     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 674     _layout->insert(slot, padding);
 675   }
 676 }
 677 
 678 /* Computation of regular classes layout is an evolution of the previous default layout
 679  * (FieldAllocationStyle 1):
 680  *   - flattened fields are allocated first (because they have potentially the
 681  *     least regular shapes, and are more likely to create empty slots between them,
 682  *     which can then be used to allocation primitive or oop fields). Allocation is
 683  *     performed from the biggest to the smallest flattened field.
 684  *   - then primitive fields (from the biggest to the smallest)
 685  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 686  *     and reduce the work of the GC).
 687  */
 688 void FieldLayoutBuilder::compute_regular_layout(TRAPS) {
 689   bool need_tail_padding = false;
 690   prologue();
 691   regular_field_sorting(CHECK);
 692   const bool is_contended_class = _cfp->_parsed_annotations->is_contended();
 693   if (is_contended_class) {
 694     // insertion is currently easy because the current strategy doesn't try to fill holes
 695     // in super classes layouts => the _start block is by consequence the _last_block
 696     insert_contended_padding(_layout->start());
 697     need_tail_padding = true;
 698   }
 699   _layout->add(_root_group->flattened_fields());
 700   _layout->add(_root_group->primitive_fields());
 701   _layout->add_contiguously(_root_group->oop_fields());
 702   FieldGroup* cg = _contended_groups;
 703   while (cg != NULL) {
 704     RawBlock* start = _layout->last_block();
 705     insert_contended_padding(start);
 706     _layout->add(cg->flattened_fields(), start);
 707     _layout->add(cg->primitive_fields(), start);
 708     _layout->add(cg->oop_fields(), start);
 709     need_tail_padding = true;
 710     cg = cg->next();
 711   }
 712   if (need_tail_padding) {
 713     insert_contended_padding(_layout->last_block());
 714   }
 715   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 716   _static_layout->add(this->_static_fields->primitive_fields());
 717 
 718   epilogue();
 719 }
 720 
 721 /* Computation of inline classes has a slightly different strategy than for
 722  * regular classes. Regular classes have their oop fields allocated at the end
 723  * of the layout to increase GC performances. Unfortunately, this strategy
 724  * increases the number of empty slots inside an instance. Because the purpose
 725  * of inline classes is to be embedded into other containers, it is critical
 726  * to keep their size as small as possible. For this reason, the allocation
 727  * strategy is:
 728  *   - flattened fields are allocated first (because they have potentially the
 729  *     least regular shapes, and are more likely to create empty slots between them,
 730  *     which can then be used to allocation primitive or oop fields). Allocation is
 731  *     performed from the biggest to the smallest flattened field.
 732  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 733  *     and reduce the work of the GC)
 734  *   - then primitive fields (from the biggest to the smallest)
 735  */
 736 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
 737   prologue();
 738   inline_class_field_sorting(CHECK);
 739   if (_layout->start()->offset() % _alignment != 0) {
 740     RawBlock* padding = new RawBlock(RawBlock::PADDING, _alignment - (_layout->start()->offset() % _alignment));
 741     _layout->insert(_layout->start(), padding);
 742     _layout->set_start(padding->next_block());
 743   }
 744   _first_field_offset = _layout->start()->offset();
 745   _layout->add(_root_group->flattened_fields());
 746   _layout->add_contiguously(_root_group->oop_fields());
 747   _layout->add(_root_group->primitive_fields());
 748   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->start()->offset();
 749 
 750   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 751   _static_layout->add(this->_static_fields->primitive_fields());
 752 
 753   epilogue();
 754 }
 755 
 756 void FieldLayoutBuilder::add_flattened_field_oopmap(OopMapBlocksBuilder* nonstatic_oop_maps,
 757                                                     ValueKlass* vklass, int offset) {
 758   int diff = offset - vklass->first_field_offset();
 759   const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 760   const OopMapBlock* last_map = map + vklass->nonstatic_oop_map_count();
 761   while (map < last_map) {
 762     nonstatic_oop_maps->add(map->offset() + diff, map->count());
 763     map++;
 764   }
 765 }
 766   
 767 
 768 void FieldLayoutBuilder::epilogue() {
 769   // Computing oopmaps
 770   int super_oop_map_count = (_cfp->_super_klass == NULL) ? 0 :_cfp->_super_klass->nonstatic_oop_map_count();
 771   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
 772 
 773   OopMapBlocksBuilder* nonstatic_oop_maps =
 774       new OopMapBlocksBuilder(max_oop_map_count, Thread::current());
 775   if (super_oop_map_count > 0) {
 776     nonstatic_oop_maps->initialize_inherited_blocks(_cfp->_super_klass->start_of_nonstatic_oop_maps(),
 777         _cfp->_super_klass->nonstatic_oop_map_count());
 778   }
 779   if (_root_group->oop_fields() != NULL) {
 780     nonstatic_oop_maps->add(_root_group->oop_fields()->offset(), _root_group->oop_count());
 781   }
 782   RawBlock* ff = _root_group->flattened_fields();
 783   while (ff != NULL) {
 784     ValueKlass* vklass = ff->value_klass();
 785     assert(vklass != NULL, "Should have been initialized");
 786     if (vklass->contains_oops()) {
 787       add_flattened_field_oopmap(nonstatic_oop_maps, vklass, ff->offset());
 788     }
 789     ff = ff->next_field();
 790   }
 791   FieldGroup* cg = _contended_groups;
 792   while (cg != NULL) {
 793     if (cg->oop_count() > 0) {
 794       nonstatic_oop_maps->add(cg->oop_fields()->offset(), cg->oop_count());
 795     }
 796     RawBlock* ff = cg->flattened_fields();
 797     while (ff != NULL) {
 798       ValueKlass* vklass = ff->value_klass();
 799       assert(vklass != NULL, "Should have been initialized");
 800       if (vklass->contains_oops()) {
 801         add_flattened_field_oopmap(nonstatic_oop_maps, vklass, ff->offset());
 802       }
 803       ff = ff->next_field();
 804     }
 805     cg = cg->next();
 806   }
 807 
 808   // nonstatic_oop_maps->compact(Thread::current());
 809 
 810   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 811   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 812   int static_fields_size = (static_fields_end -
 813       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
 814   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
 815 
 816   // Pass back information needed for InstanceKlass creation
 817 
 818   _info->oop_map_blocks = nonstatic_oop_maps;
 819   _info->instance_size = align_object_size(instance_end / wordSize);
 820   _info->static_field_size = static_fields_size;
 821   _info->nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
 822   _info->has_nonstatic_fields = _has_nonstatic_fields;
 823 
 824   if (PrintNewLayout || (PrintFlattenableLayouts && _has_flattening_information)) {
 825     ResourceMark rm;
 826     tty->print_cr("Layout of class %s", _cfp->_class_name->as_C_string());
 827     tty->print_cr("| offset | kind | size | alignment | signature | name |");
 828     tty->print_cr("Instance fields:");
 829     _layout->print(tty);
 830     tty->print_cr("Static fields");
 831     _static_layout->print(tty);
 832     nonstatic_oop_maps->print_on(tty);
 833     tty->print_cr("Instance size = %d * heapWordSize", _info->instance_size);
 834     tty->print_cr("Non-static field size = %d * heapWordSize", _info->nonstatic_field_size);
 835     tty->print_cr("Static field size = %d * heapWordSize", _info->static_field_size);
 836     if (_cfp->is_value_type()) {
 837       tty->print_cr("alignment = %d", _alignment);
 838       tty->print_cr("exact_size_in_bytes = %d", _exact_size_in_bytes);
 839       tty->print_cr("first_field_offset = %d", _first_field_offset);
 840     }
 841     tty->print_cr("---");
 842   }
 843 }