1 /*
   2  * Copyright (c) 2019, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "jvm.h"
  27 #include "classfile/classFileParser.hpp"
  28 #include "classfile/fieldLayoutBuilder.hpp"
  29 #include "memory/resourceArea.hpp"
  30 #include "oops/array.hpp"
  31 #include "oops/instanceMirrorKlass.hpp"
  32 #include "oops/valueKlass.hpp"
  33 #include "runtime/fieldDescriptor.inline.hpp"
  34 
  35 RawBlock::RawBlock(Kind kind, int size) {
  36   assert(kind == EMPTY || kind == RESERVED || kind == PADDING || kind == INHERITED,
  37       "Otherwise, should use the constructor with a field index argument");
  38   _next_field = NULL;
  39   _prev_field = NULL;
  40   _next_block = NULL;
  41   _prev_block = NULL;
  42   _field_index = -1; // no field
  43     _kind = kind;
  44   _size = size;
  45   _alignment = 1;
  46   _offset = -1;
  47   _is_reference = false;
  48   _value_klass = NULL;
  49 }
  50 
  51 RawBlock::RawBlock(int index, Kind kind, int size, int alignment, bool is_reference) {
  52   assert(kind == REGULAR || kind == FLATTENED || kind == INHERITED,
  53       "Other kind do not have a field index");
  54   assert(size > 0, "Sanity check");
  55   assert(alignment > 0, "Sanity check");
  56   _next_field = NULL;
  57   _prev_field = NULL;
  58   _next_block = NULL;
  59   _prev_block = NULL;
  60   _field_index = index;
  61   _kind = kind;
  62   _size = size;
  63   _alignment = alignment;
  64   _offset = -1;
  65   _is_reference = is_reference;
  66   _value_klass = NULL;
  67 }
  68 
  69 bool RawBlock::fit(int size, int alignment) {
  70   int adjustment = _offset % alignment;
  71   return _size >= size + adjustment;
  72 }
  73 
  74 FieldGroup::FieldGroup(int contended_group) {
  75   _next = NULL;
  76   _primitive_fields = NULL;
  77   _oop_fields = NULL;
  78   _flattened_fields = NULL;
  79   _contended_group = contended_group; // -1 means no contended group, 0 means default contended group
  80   _oop_count = 0;
  81 }
  82 
  83 void FieldGroup::add_primitive_field(AllFieldStream fs, BasicType type) {
  84   int size = type2aelembytes(type);
  85   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for primitive types */, false);
  86   add_block(&_primitive_fields, block);
  87 }
  88 
  89 void FieldGroup::add_oop_field(AllFieldStream fs) {
  90   int size = type2aelembytes(T_OBJECT);
  91   RawBlock* block = new RawBlock(fs.index(), RawBlock::REGULAR, size, size /* alignment == size for oops */, true);
  92   add_block(&_oop_fields, block);
  93   _oop_count++;
  94 }
  95 
  96 void FieldGroup::add_flattened_field(AllFieldStream fs, ValueKlass* vk) {
  97   // _flattened_fields list might be merged with the _primitive_fields list in the future
  98   RawBlock* block = new RawBlock(fs.index(), RawBlock::FLATTENED, vk->get_exact_size_in_bytes(), vk->get_alignment(), false);
  99   block->set_value_klass(vk);
 100   add_block(&_flattened_fields, block);
 101 }
 102 
 103 /* Adds a field to a field group. Inside a field group, fields are sorted by
 104  * decreasing sizes. Fields with the same size are sorted according to their
 105  * order of insertion (easy hack to respect field order for classes with
 106  * hard coded offsets).
 107  */
 108 void FieldGroup::add_block(RawBlock** list, RawBlock* block) {
 109   if (*list == NULL) {
 110     *list = block;
 111   } else {
 112     if (block->size() > (*list)->size()) {  // cannot be >= to respect order of field (for classes with hard coded offsets)
 113       block->set_next_field(*list);
 114       (*list)->set_prev_field(block);
 115       *list = block;
 116     } else {
 117       RawBlock* b = *list;
 118       while (b->next_field() != NULL) {
 119         if (b->next_field()->size() < block->size()) {
 120           break;
 121         }
 122         b = b->next_field();
 123       }
 124       block->set_next_field(b->next_field());
 125       block->set_prev_field(b);
 126       b->set_next_field(block);
 127       if (b->next_field() != NULL) {
 128         b->next_field()->set_prev_field(block);
 129       }
 130     }
 131   }
 132 }
 133 
 134 FieldLayout::FieldLayout(Array<u2>* fields, ConstantPool* cp) {
 135   _fields = fields;
 136   _cp = cp;
 137   _blocks = NULL;
 138   _start = _blocks;
 139   _last = _blocks;
 140 }
 141 
 142 void FieldLayout::initialize_static_layout() {
 143   _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 144   _blocks->set_offset(0);
 145   _last = _blocks;
 146   _start = _blocks;
 147   // Note: at this stage, InstanceMirrorKlass::offset_of_static_fields() could be zero, because
 148   // during bootstrapping, the size of the java.lang.Class is still not known when layout
 149   // of static field is computed. Field offsets are fixed later when the size is known
 150   // (see java_lang_Class::fixup_mirror())
 151   if (InstanceMirrorKlass::offset_of_static_fields() > 0) {
 152     insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, InstanceMirrorKlass::offset_of_static_fields()));
 153     _blocks->set_offset(0);
 154   }
 155 }
 156 
 157 void FieldLayout::initialize_instance_layout(const InstanceKlass* super_klasss) {
 158   if (super_klasss == NULL) {
 159     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 160     _blocks->set_offset(0);
 161     _last = _blocks;
 162     _start = _blocks;
 163     insert(first_empty_block(), new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 164   } else {
 165     // The JVM could reconstruct the layouts of the super classes, in order to use the
 166     // empty slots in these layouts to allocate current class' fields. However, some codes
 167     // in the JVM are not ready yet to find fields allocated this way, so the optimization
 168     // is not enabled yet.
 169 #if 0
 170     reconstruct_layout(super_klasss);
 171     fill_holes(super_klasss);
 172     // _start = _last;  // uncomment to fill holes in super classes layouts
 173 #else
 174     _blocks = new RawBlock(RawBlock::EMPTY, INT_MAX);
 175     _blocks->set_offset(0);
 176     _last = _blocks;
 177     insert(_last, new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes()));
 178     if (super_klasss->nonstatic_field_size() > 0) {
 179       // To take into account the space allocated to super classes' fields, this code
 180       // uses the nonstatic_field_size() value to allocate a single INHERITED RawBlock.
 181       // The drawback is that nonstatic_field_size() expresses the size of non-static
 182       // fields in heapOopSize, which implies that some space could be lost at the
 183       // end because of the rounding up of the real size. Using the exact size, with
 184       // no rounding up, would be possible, but would require modifications to other
 185       // codes in the JVM performing fields lookup (as they often expect this rounding
 186       // to be applied).
 187       RawBlock* inherited = new RawBlock(RawBlock::INHERITED,
 188           super_klasss->nonstatic_field_size() * heapOopSize);
 189       insert(_last, inherited);
 190     }
 191     _start = _last;
 192 #endif
 193   }
 194 }
 195 
 196 RawBlock* FieldLayout::first_field_block() {
 197   RawBlock* block = _start;
 198   // Not sure the condition below will work well when inheriting layout with contented padding
 199   while (block->kind() != RawBlock::INHERITED && block->kind() != RawBlock::REGULAR
 200       && block->kind() != RawBlock::FLATTENED && block->kind() != RawBlock::PADDING) {
 201     block = block->next_block();
 202   }
 203   return block;
 204 }
 205 
 206 /* The allocation logic uses a first fit strategy: the field is allocated in the
 207  * first empty slot big enough to contain it (including padding to fit alignment
 208  * constraints).
 209  */
 210 void FieldLayout::add(RawBlock* blocks, RawBlock* start) {
 211   if (start == NULL) {
 212     // start = this->_blocks;
 213     start = this->_start;
 214   }
 215   RawBlock* b = blocks;
 216   RawBlock* candidate = NULL;
 217   while (b != NULL) {
 218     RawBlock* candidate = start;
 219     while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(b->size(), b->alignment())) candidate = candidate->next_block();
 220     assert(candidate != NULL && candidate->fit(b->size(), b->alignment()), "paranoid check");
 221     insert_field_block(candidate, b);
 222     b = b->next_field();
 223   }
 224 }
 225 
 226 /* The allocation logic uses a first fit strategy: the set of fields is allocated
 227  * in the first empty slot big enough to contain the whole set ((including padding
 228  * to fit alignment constraints).
 229  */
 230 void FieldLayout::add_contiguously(RawBlock* blocks, RawBlock* start) {
 231   if (blocks == NULL) return;
 232   if (start == NULL) {
 233     start = _start;
 234   }
 235   // This code assumes that if the first block is well aligned, the following
 236   // blocks would naturally be well aligned (no need for adjustment)
 237   int size = 0;
 238   RawBlock* b = blocks;
 239   while (b != NULL) {
 240     size += b->size();
 241     b = b->next_field();
 242   }
 243   RawBlock* candidate = start;
 244   while (candidate->kind() != RawBlock::EMPTY || !candidate->fit(size, blocks->alignment())) candidate = candidate->next_block();
 245   b = blocks;
 246   while (b != NULL) {
 247     insert_field_block(candidate, b);
 248     b = b->next_field();
 249     assert(b == NULL || (candidate->offset() % b->alignment() == 0), "Contiguous blocks must be naturally well aligned");
 250   }
 251 }
 252 
 253 RawBlock* FieldLayout::insert_field_block(RawBlock* slot, RawBlock* block) {
 254   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 255   if (slot->offset() % block->alignment() != 0) {
 256     int adjustment = block->alignment() - (slot->offset() % block->alignment());
 257     RawBlock* adj = new RawBlock(RawBlock::EMPTY, adjustment);
 258     insert(slot, adj);
 259   }
 260   insert(slot, block);
 261   if (slot->size() == 0) {
 262     remove(slot);
 263   }
 264   if (UseNewLayout) {
 265     FieldInfo::from_field_array(_fields, block->field_index())->set_offset(block->offset());
 266   }
 267   return block;
 268 }
 269 
 270 void FieldLayout::reconstruct_layout(const InstanceKlass* ik) {
 271   // TODO: it makes no sense to support static fields, static fields go to
 272   // the mirror, and are not impacted by static fields of the parent class
 273   if (ik->super() != NULL) {
 274     reconstruct_layout(InstanceKlass::cast(ik->super()));
 275   } else {
 276     _blocks = new RawBlock(RawBlock::RESERVED, instanceOopDesc::base_offset_in_bytes());
 277     _blocks->set_offset(0);
 278     _last = _blocks;
 279     _start = _blocks;
 280   }
 281   for (AllFieldStream fs(ik->fields(), ik->constants()); !fs.done(); fs.next()) {
 282     BasicType type = vmSymbols::signature_type(fs.signature());
 283     // distinction between static and non-static fields is missing
 284     if (fs.access_flags().is_static()) continue;
 285     ik->fields_annotations();
 286     if (type != T_VALUETYPE) {
 287       int size = type2aelembytes(type);
 288       // INHERITED blocs are marked as non-reference because oop_maps are handled by their holder class
 289       RawBlock* block = new RawBlock(fs.index(), RawBlock::INHERITED, size, size, false);
 290       block->set_offset(fs.offset());
 291       insert_per_offset(block);
 292     } else {
 293       fatal("Not supported yet");
 294     }
 295   }
 296 }
 297 
 298 void FieldLayout::fill_holes(const InstanceKlass* super_klass) {
 299   assert(_blocks != NULL, "Sanity check");
 300   assert(_blocks->offset() == 0, "first block must be at offset zero");
 301   RawBlock* b = _blocks;
 302   while (b->next_block() != NULL) {
 303     if (b->next_block()->offset() > (b->offset() + b->size())) {
 304       int size = b->next_block()->offset() - (b->offset() + b->size());
 305       RawBlock* empty = new RawBlock(RawBlock::EMPTY, size);
 306       empty->set_offset(b->offset() + b->size());
 307       empty->set_next_block(b->next_block());
 308       b->next_block()->set_prev_block(empty);
 309       b->set_next_block(empty);
 310       empty->set_prev_block(b);
 311     }
 312     b = b->next_block();
 313   }
 314   assert(b->next_block() == NULL, "Invariant at this point");
 315   if (b->kind() != RawBlock::EMPTY) {
 316     RawBlock* last = new RawBlock(RawBlock::EMPTY, INT_MAX);
 317     last->set_offset(b->offset() + b->size());
 318     assert(last->offset() > 0, "Sanity check");
 319     b->set_next_block(last);
 320     last->set_prev_block(b);
 321     _last = last;
 322   }
 323   // Still doing the padding to have a size that can be expressed in heapOopSize
 324   int super_end = instanceOopDesc::base_offset_in_bytes() + super_klass->nonstatic_field_size() * heapOopSize;
 325   if (_last->offset() < super_end) {
 326     RawBlock* padding = new RawBlock(RawBlock::PADDING, super_end - _last->offset());
 327     insert(_last, padding);
 328   }
 329 }
 330 
 331 RawBlock* FieldLayout::insert(RawBlock* slot, RawBlock* block) {
 332   assert(slot->kind() == RawBlock::EMPTY, "Blocks can only be inserted in empty blocks");
 333   assert(slot->offset() % block->alignment() == 0, "Incompatible alignment");
 334   block->set_offset(slot->offset());
 335   slot->set_offset(slot->offset() + block->size());
 336   slot->set_size(slot->size() - block->size());
 337   block->set_prev_block(slot->prev_block());
 338   block->set_next_block(slot);
 339   slot->set_prev_block(block);
 340   if (block->prev_block() != NULL) {       // suspicious test
 341     block->prev_block()->set_next_block(block);
 342   }
 343   if (_blocks == slot) {
 344     _blocks = block;
 345   }
 346   if (_start == slot) {
 347     _start = block;
 348   }
 349   return block;
 350 }
 351 
 352 void FieldLayout::insert_per_offset(RawBlock* block) {
 353   if (_blocks == NULL) {
 354     _blocks = block;
 355   } else if (_blocks->offset() > block->offset()) {
 356     block->set_next_block(_blocks);
 357     _blocks->set_prev_block(block);
 358     _blocks = block;
 359   } else {
 360     RawBlock* b = _blocks;
 361     while (b->next_block() != NULL && b->next_block()->offset() < block->offset()) b = b->next_block();
 362     if (b->next_block() == NULL) {
 363       b->set_next_block(block);
 364       block->set_prev_block(b);
 365     } else {
 366       assert(b->next_block()->offset() >= block->offset(), "Sanity check");
 367       assert(b->next_block()->offset() > block->offset() || b->next_block()->kind() == RawBlock::EMPTY, "Sanity check");
 368       block->set_next_block(b->next_block());
 369       b->next_block()->set_prev_block(block);
 370       block->set_prev_block(b);
 371       b->set_next_block(block);
 372     }
 373   }
 374 }
 375 
 376 void FieldLayout::remove(RawBlock* block) {
 377   assert(block != NULL, "Sanity check");
 378   assert(block != _last, "Sanity check");
 379   if (_blocks == block) {
 380     _blocks = block->next_block();
 381     if (_blocks != NULL) {
 382       _blocks->set_prev_block(NULL);
 383     }
 384   } else {
 385     assert(block->prev_block() != NULL, "_prev should be set for non-head blocks");
 386     block->prev_block()->set_next_block(block->next_block());
 387     block->next_block()->set_prev_block(block->prev_block());
 388   }
 389   if (block == _start) {
 390     _start = block->prev_block();
 391   }
 392 }
 393 
 394 void FieldLayout::print(outputStream* output) {
 395   ResourceMark rm;
 396   RawBlock* b = _blocks;
 397   while(b != _last) {
 398     switch(b->kind()) {
 399     case RawBlock::REGULAR: {
 400       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 401       output->print_cr("  %d %s %d %d %s %s",
 402           b->offset(),
 403           "REGULAR",
 404           b->size(),
 405           b->alignment(),
 406           fi->signature(_cp)->as_C_string(),
 407           fi->name(_cp)->as_C_string());
 408       break;
 409     }
 410     case RawBlock::FLATTENED: {
 411       FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
 412       output->print_cr("  %d %s %d %d %s %s",
 413           b->offset(),
 414           "FLATTENED",
 415           b->size(),
 416           b->alignment(),
 417           fi->signature(_cp)->as_C_string(),
 418           fi->name(_cp)->as_C_string());
 419       break;
 420     }
 421     case RawBlock::RESERVED:
 422       output->print_cr("  %d %s %d",
 423           b->offset(),
 424           "RESERVED",
 425           b->size());
 426       break;
 427     case RawBlock::INHERITED:
 428       output->print_cr("  %d %s %d",
 429           b->offset(),
 430           "INHERITED",
 431           b->size());
 432       break;
 433     case RawBlock::EMPTY:
 434       output->print_cr("  %d %s %d",
 435           b->offset(),
 436           "EMPTY",
 437           b->size());
 438       break;
 439     case RawBlock::PADDING:
 440       output->print_cr("  %d %s %d",
 441           b->offset(),
 442           "PADDING",
 443           b->size());
 444       break;
 445     }
 446     b = b->next_block();
 447   }
 448 }
 449 
 450 
 451 FieldLayoutBuilder::FieldLayoutBuilder(ClassFileParser* cfp, FieldLayoutInfo* info) {
 452   _cfp = cfp;
 453   _info = info;
 454   _fields = NULL;
 455   _root_group = NULL;
 456   _contended_groups = NULL;
 457   _static_fields = NULL;
 458   _layout = NULL;
 459   _static_layout = NULL;
 460   _nonstatic_oopmap_count = 0;
 461   // Inline class specific information
 462   _alignment = -1;
 463   _first_field_offset = -1;
 464   _exact_size_in_bytes = -1;
 465   _has_nonstatic_fields = false;
 466   _has_flattening_information = _cfp->is_value_type();
 467 }
 468 
 469 FieldGroup* FieldLayoutBuilder::get_or_create_contended_group(int g) {
 470   assert(g > 0, "must only be called for named contended groups");
 471   if (_contended_groups == NULL) {
 472     _contended_groups = new FieldGroup(g);
 473     return _contended_groups;
 474   }
 475   FieldGroup* group = _contended_groups;
 476   while(group->next() != NULL) {
 477     if (group->contended_group() == g) break;
 478     group = group->next();
 479   }
 480   if (group->contended_group() == g) return group;
 481   group->set_next(new FieldGroup(g));
 482   return group->next();
 483 }
 484 
 485 void FieldLayoutBuilder::prologue() {
 486   _layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 487   const InstanceKlass* super_klass = _cfp->_super_klass;
 488   _layout->initialize_instance_layout(super_klass);
 489   if (super_klass != NULL) {
 490     _has_nonstatic_fields = super_klass->has_nonstatic_fields();
 491   }
 492   _static_layout = new FieldLayout(_cfp->_fields, _cfp->_cp);
 493   _static_layout->initialize_static_layout();
 494   _static_fields = new FieldGroup();
 495   _root_group = new FieldGroup();
 496   _contended_groups = NULL;
 497 }
 498 
 499 /* Field sorting for regular (non-inline) classes:
 500  *   - fields are sorted in static and non-static fields
 501  *   - non-static fields are also sorted according to their contention group
 502  *     (support of the @Contended annotation)
 503  *   - @Contended annotation is ignored for static fields
 504  *   - field flattening decisions are taken in this method
 505  */
 506 void FieldLayoutBuilder::regular_field_sorting(TRAPS) {
 507   assert(!_cfp->is_value_type(), "Should only be used for non-inline classes");
 508   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 509     FieldGroup* group = NULL;
 510     if (fs.access_flags().is_static()) {
 511       group = _static_fields;
 512     } else {
 513       _has_nonstatic_fields = true;
 514       if (fs.is_contended()) {
 515         int g = fs.contended_group();
 516         if (g == 0) {
 517           // default group means the field is alone in its contended group
 518           group = new FieldGroup(true);
 519           group->set_next(_contended_groups);
 520           _contended_groups = group;
 521         } else {
 522           group = get_or_create_contended_group(g);
 523         }
 524       } else {
 525         group = _root_group;
 526       }
 527     }
 528     assert(group != NULL, "invariant");
 529     BasicType type = vmSymbols::signature_type(fs.signature());
 530     switch(type) {
 531     case T_BYTE:
 532     case T_CHAR:
 533     case T_DOUBLE:
 534     case T_FLOAT:
 535     case T_INT:
 536     case T_LONG:
 537     case T_SHORT:
 538     case T_BOOLEAN:
 539       group->add_primitive_field(fs, type);
 540       break;
 541     case T_OBJECT:
 542     case T_ARRAY:
 543       if (group != _static_fields) _nonstatic_oopmap_count++;
 544       group->add_oop_field(fs);
 545       break;
 546     case T_VALUETYPE: {
 547       if (group == _static_fields) {
 548         // static fields are never flattened
 549         group->add_oop_field(fs);
 550       } else {
 551         _has_flattening_information = true;
 552         // Flattening decision to be taken here
 553         // This code assumes all verification have been performed before
 554         // (field is a flattenable field, field's type has been loaded
 555         // and it is an inline klass
 556         Klass* klass =
 557             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 558                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 559                 _cfp->_protection_domain, true, CHECK);
 560         assert(klass != NULL, "Sanity check");
 561         ValueKlass* vk = ValueKlass::cast(klass);
 562         bool flattened = (ValueFieldMaxFlatSize < 0)
 563                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 564         if (flattened) {
 565           group->add_flattened_field(fs, vk);
 566           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 567           fs.set_flattened(true);
 568         } else {
 569           _nonstatic_oopmap_count++;
 570           group->add_oop_field(fs);
 571         }
 572       }
 573       break;
 574     }
 575     default:
 576       fatal("Something wrong?");
 577     }
 578   }
 579 }
 580 /* Field sorting for inline classes:
 581  *   - because inline classes are immutable, the @Contended annotation is ignored
 582  *     when computing their layout (with only read operation, there's no false
 583  *     sharing issue)
 584  *   - this method also records the alignment of the field with the most
 585  *     constraining alignment, this value is then used as the alignment
 586  *     constraint when flattening this inline type into another container
 587  *   - field flattening decisions are taken in this method (those decisions are
 588  *     currently only based in the size of the fields to be flattened, the size
 589  *     of the resulting instance is not considered)
 590  */
 591 void FieldLayoutBuilder::inline_class_field_sorting(TRAPS) {
 592   assert(_cfp->is_value_type(), "Should only be used for inline classes");
 593   int alignment = 1;
 594   for (AllFieldStream fs(_cfp->_fields, _cfp->_cp); !fs.done(); fs.next()) {
 595     FieldGroup* group = NULL;
 596     int field_alignment = 1;
 597     if (fs.access_flags().is_static()) {
 598       group = _static_fields;
 599     } else {
 600       _has_nonstatic_fields = true;
 601       group = _root_group;
 602     }
 603     assert(group != NULL, "invariant");
 604     BasicType type = vmSymbols::signature_type(fs.signature());
 605     switch(type) {
 606     case T_BYTE:
 607     case T_CHAR:
 608     case T_DOUBLE:
 609     case T_FLOAT:
 610     case T_INT:
 611     case T_LONG:
 612     case T_SHORT:
 613     case T_BOOLEAN:
 614       if (group != _static_fields) {
 615         field_alignment = type2aelembytes(type); // alignment == size for primitive types
 616       }
 617       group->add_primitive_field(fs, type);
 618       break;
 619     case T_OBJECT:
 620     case T_ARRAY:
 621       if (group != _static_fields) {
 622         _nonstatic_oopmap_count++;
 623         field_alignment = type2aelembytes(type); // alignment == size for oops
 624       }
 625       group->add_oop_field(fs);
 626       break;
 627     case T_VALUETYPE: {
 628       if (group == _static_fields) {
 629         // static fields are never flattened
 630         group->add_oop_field(fs);
 631       } else {
 632         // Flattening decision to be taken here
 633         // This code assumes all verifications have been performed before
 634         // (field is a flattenable field, field's type has been loaded
 635         // and it is an inline klass
 636         Klass* klass =
 637             SystemDictionary::resolve_flattenable_field_or_fail(&fs,
 638                 Handle(THREAD, _cfp->_loader_data->class_loader()),
 639                 _cfp->_protection_domain, true, CHECK);
 640         assert(klass != NULL, "Sanity check");
 641         ValueKlass* vk = ValueKlass::cast(klass);
 642         bool flattened = (ValueFieldMaxFlatSize < 0)
 643                          || (vk->size_helper() * HeapWordSize) <= ValueFieldMaxFlatSize;
 644         if (flattened) {
 645           group->add_flattened_field(fs, vk);
 646           _nonstatic_oopmap_count += vk->nonstatic_oop_map_count();
 647           field_alignment = vk->get_alignment();
 648           fs.set_flattened(true);
 649         } else {
 650           _nonstatic_oopmap_count++;
 651           field_alignment = type2aelembytes(T_OBJECT);
 652           group->add_oop_field(fs);
 653         }
 654       }
 655       break;
 656     }
 657     default:
 658       fatal("Unexpected BasicType");
 659     }
 660     if (!fs.access_flags().is_static() && field_alignment > alignment) alignment = field_alignment;
 661   }
 662   _alignment = alignment;
 663   if (_cfp->is_value_type() && (!_has_nonstatic_fields)) {
 664     // There are a number of fixes required throughout the type system and JIT
 665     _cfp->throwValueTypeLimitation(THREAD_AND_LOCATION, "Value Types do not support zero instance size yet");
 666     return;
 667   }
 668 }
 669 
 670 /* Computation of regular classes layout is an evolution of the previous default layout
 671  * (FieldAllocationStyle 1):
 672  *   - flattened fields are allocated first (because they have potentially the
 673  *     least regular shapes, and are more likely to create empty slots between them,
 674  *     which can then be used to allocation primitive or oop fields). Allocation is
 675  *     performed from the biggest to the smallest flattened field.
 676  *   - then primitive fields (from the biggest to the smallest)
 677  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 678  *     and reduce the work of the GC).
 679  */
 680 void FieldLayoutBuilder::compute_regular_layout(TRAPS) {
 681   bool need_tail_padding = false;
 682   prologue();
 683   regular_field_sorting(CHECK);
 684   const bool is_contended_class = _cfp->_parsed_annotations->is_contended();
 685   if (is_contended_class) {
 686     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 687     // insertion is currently easy because the current strategy doesn't try to fill holes
 688     // in super classes layouts => the _start block is by consequence the _last_block
 689     _layout->insert(_layout->start(), padding);
 690     need_tail_padding = true;
 691   }
 692   _layout->add(_root_group->flattened_fields());
 693   _layout->add(_root_group->primitive_fields());
 694   _layout->add_contiguously(_root_group->oop_fields());
 695   FieldGroup* cg = _contended_groups;
 696   while (cg != NULL) {
 697     RawBlock* start = _layout->last_block();
 698     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 699     _layout->insert(start, padding);
 700     _layout->add(cg->flattened_fields(), start);
 701     _layout->add(cg->primitive_fields(), start);
 702     _layout->add(cg->oop_fields(), start);
 703     need_tail_padding = true;
 704     cg = cg->next();
 705   }
 706   if (need_tail_padding) {
 707     RawBlock* padding = new RawBlock(RawBlock::PADDING, ContendedPaddingWidth);
 708     _layout->insert(_layout->last_block(), padding);
 709   }
 710   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 711   _static_layout->add(this->_static_fields->primitive_fields());
 712 
 713   epilogue();
 714 }
 715 
 716 /* Computation of inline classes has a slightly different strategy than for
 717  * regular classes. Regular classes have their oop fields allocated at the end
 718  * of the layout to increase GC performances. Unfortunately, this strategy
 719  * increases the number of empty slots inside an instance. Because the purpose
 720  * of inline classes is to be embedded into other containers, it is critical
 721  * to keep their size as small as possible. For this reason, the allocation
 722  * strategy is:
 723  *   - flattened fields are allocated first (because they have potentially the
 724  *     least regular shapes, and are more likely to create empty slots between them,
 725  *     which can then be used to allocation primitive or oop fields). Allocation is
 726  *     performed from the biggest to the smallest flattened field.
 727  *   - then oop fields are allocated contiguously (to reduce the number of oopmaps
 728  *     and reduce the work of the GC)
 729  *   - then primitive fields (from the biggest to the smallest)
 730  */
 731 void FieldLayoutBuilder::compute_inline_class_layout(TRAPS) {
 732   prologue();
 733   inline_class_field_sorting(CHECK);
 734   if (_layout->start()->offset() % _alignment != 0) {
 735     RawBlock* padding = new RawBlock(RawBlock::PADDING, _alignment - (_layout->start()->offset() % _alignment));
 736     _layout->insert(_layout->start(), padding);
 737     _layout->set_start(padding->next_block());
 738   }
 739   _first_field_offset = _layout->start()->offset();
 740   _layout->add(_root_group->flattened_fields());
 741   _layout->add_contiguously(_root_group->oop_fields());
 742   _layout->add(_root_group->primitive_fields());
 743   _exact_size_in_bytes = _layout->last_block()->offset() - _layout->start()->offset();
 744 
 745   _static_layout->add_contiguously(this->_static_fields->oop_fields());
 746   _static_layout->add(this->_static_fields->primitive_fields());
 747 
 748   epilogue();
 749 }
 750 
 751 void FieldLayoutBuilder::add_flattened_field_oopmap(OopMapBlocksBuilder* nonstatic_oop_maps,
 752                                                     ValueKlass* vklass, int offset) {
 753   int diff = offset - vklass->first_field_offset();
 754   const OopMapBlock* map = vklass->start_of_nonstatic_oop_maps();
 755   const OopMapBlock* last_map = map + vklass->nonstatic_oop_map_count();
 756   while (map < last_map) {
 757     nonstatic_oop_maps->add(map->offset() + diff, map->count());
 758     map++;
 759   }
 760 }
 761   
 762 
 763 void FieldLayoutBuilder::epilogue() {
 764   // Computing oopmaps
 765   int super_oop_map_count = (_cfp->_super_klass == NULL) ? 0 :_cfp->_super_klass->nonstatic_oop_map_count();
 766   int max_oop_map_count = super_oop_map_count + _nonstatic_oopmap_count;
 767 
 768   OopMapBlocksBuilder* nonstatic_oop_maps =
 769       new OopMapBlocksBuilder(max_oop_map_count, Thread::current());
 770   if (super_oop_map_count > 0) {
 771     nonstatic_oop_maps->initialize_inherited_blocks(_cfp->_super_klass->start_of_nonstatic_oop_maps(),
 772         _cfp->_super_klass->nonstatic_oop_map_count());
 773   }
 774   if (_root_group->oop_fields() != NULL) {
 775     nonstatic_oop_maps->add(_root_group->oop_fields()->offset(), _root_group->oop_count());
 776   }
 777   RawBlock* ff = _root_group->flattened_fields();
 778   while (ff != NULL) {
 779     ValueKlass* vklass = ff->value_klass();
 780     assert(vklass != NULL, "Should have been initialized");
 781     if (vklass->contains_oops()) {
 782       add_flattened_field_oopmap(nonstatic_oop_maps, vklass, ff->offset());
 783     }
 784     ff = ff->next_field();
 785   }
 786   FieldGroup* cg = _contended_groups;
 787   while (cg != NULL) {
 788     if (cg->oop_count() > 0) {
 789       nonstatic_oop_maps->add(cg->oop_fields()->offset(), cg->oop_count());
 790     }
 791     RawBlock* ff = cg->flattened_fields();
 792     while (ff != NULL) {
 793       ValueKlass* vklass = ff->value_klass();
 794       assert(vklass != NULL, "Should have been initialized");
 795       if (vklass->contains_oops()) {
 796         add_flattened_field_oopmap(nonstatic_oop_maps, vklass, ff->offset());
 797       }
 798       ff = ff->next_field();
 799     }
 800     cg = cg->next();
 801   }
 802 
 803   // nonstatic_oop_maps->compact(Thread::current());
 804 
 805   int instance_end = align_up(_layout->last_block()->offset(), wordSize);
 806   int static_fields_end = align_up(_static_layout->last_block()->offset(), wordSize);
 807   int static_fields_size = (static_fields_end -
 808       InstanceMirrorKlass::offset_of_static_fields()) / wordSize;
 809   int nonstatic_field_end = align_up(_layout->last_block()->offset(), heapOopSize);
 810 
 811   // Pass back information needed for InstanceKlass creation
 812 
 813   _info->oop_map_blocks = nonstatic_oop_maps;
 814   _info->instance_size = align_object_size(instance_end / wordSize);
 815   _info->static_field_size = static_fields_size;
 816   _info->nonstatic_field_size = (nonstatic_field_end - instanceOopDesc::base_offset_in_bytes()) / heapOopSize;
 817   _info->has_nonstatic_fields = _has_nonstatic_fields;
 818 
 819   if (PrintNewLayout || (PrintFlattenableLayouts && _has_flattening_information)) {
 820     ResourceMark rm;
 821     tty->print_cr("Layout of class %s", _cfp->_class_name->as_C_string());
 822     tty->print_cr("| offset | kind | size | alignment | signature | name |");
 823     tty->print_cr("Instance fields:");
 824     _layout->print(tty);
 825     tty->print_cr("Static fields");
 826     _static_layout->print(tty);
 827     nonstatic_oop_maps->print_on(tty);
 828     tty->print_cr("Instance size = %d * heapWordSize", _info->instance_size);
 829     tty->print_cr("Non-static field size = %d * heapWordSize", _info->nonstatic_field_size);
 830     tty->print_cr("Static field size = %d * heapWordSize", _info->static_field_size);
 831     if (_cfp->is_value_type()) {
 832       tty->print_cr("alignment = %d", _alignment);
 833       tty->print_cr("exact_size_in_bytes = %d", _exact_size_in_bytes);
 834       tty->print_cr("first_field_offset = %d", _first_field_offset);
 835     }
 836     tty->print_cr("---");
 837   }
 838 }