1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/genOopClosures.inline.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/binaryTreeDictionary.inline.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/access.inline.hpp"
  44 #include "oops/compressedOops.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/handles.inline.hpp"
  48 #include "runtime/init.hpp"
  49 #include "runtime/mutexLocker.inline.hpp"
  50 #include "runtime/java.hpp"
  51 #include "runtime/orderAccess.hpp"
  52 #include "runtime/vmThread.hpp"
  53 #include "utilities/align.hpp"
  54 #include "utilities/copy.hpp"
  55 
  56 // Specialize for AdaptiveFreeList which tries to avoid
  57 // splitting a chunk of a size that is under populated in favor of
  58 // an over populated size.  The general get_better_list() just returns
  59 // the current list.
  60 template <>
  61 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
  62 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
  63   BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
  64   // A candidate chunk has been found.  If it is already under
  65   // populated, get a chunk associated with the hint for this
  66   // chunk.
  67 
  68   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
  69   if (curTL->surplus() <= 0) {
  70     /* Use the hint to find a size with a surplus, and reset the hint. */
  71     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
  72     while (hintTL->hint() != 0) {
  73       assert(hintTL->hint() > hintTL->size(),
  74         "hint points in the wrong direction");
  75       hintTL = dictionary->find_list(hintTL->hint());
  76       assert(curTL != hintTL, "Infinite loop");
  77       if (hintTL == NULL ||
  78           hintTL == curTL /* Should not happen but protect against it */ ) {
  79         // No useful hint.  Set the hint to NULL and go on.
  80         curTL->set_hint(0);
  81         break;
  82       }
  83       assert(hintTL->size() > curTL->size(), "hint is inconsistent");
  84       if (hintTL->surplus() > 0) {
  85         // The hint led to a list that has a surplus.  Use it.
  86         // Set the hint for the candidate to an overpopulated
  87         // size.
  88         curTL->set_hint(hintTL->size());
  89         // Change the candidate.
  90         curTL = hintTL;
  91         break;
  92       }
  93     }
  94   }
  95   return curTL;
  96 }
  97 
  98 void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
  99   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
 100   if (nd) {
 101     if (split) {
 102       if (birth) {
 103         nd->increment_split_births();
 104         nd->increment_surplus();
 105       }  else {
 106         nd->increment_split_deaths();
 107         nd->decrement_surplus();
 108       }
 109     } else {
 110       if (birth) {
 111         nd->increment_coal_births();
 112         nd->increment_surplus();
 113       } else {
 114         nd->increment_coal_deaths();
 115         nd->decrement_surplus();
 116       }
 117     }
 118   }
 119   // A list for this size may not be found (nd == 0) if
 120   //   This is a death where the appropriate list is now
 121   //     empty and has been removed from the list.
 122   //   This is a birth associated with a LinAB.  The chunk
 123   //     for the LinAB is not in the dictionary.
 124 }
 125 
 126 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
 127   if (FLSAlwaysCoalesceLarge) return true;
 128 
 129   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
 130   // None of requested size implies overpopulated.
 131   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
 132          list_of_size->count() > list_of_size->coal_desired();
 133 }
 134 
 135 // For each list in the tree, calculate the desired, desired
 136 // coalesce, count before sweep, and surplus before sweep.
 137 class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 138   double _percentage;
 139   float _inter_sweep_current;
 140   float _inter_sweep_estimate;
 141   float _intra_sweep_estimate;
 142 
 143  public:
 144   BeginSweepClosure(double p, float inter_sweep_current,
 145                               float inter_sweep_estimate,
 146                               float intra_sweep_estimate) :
 147    _percentage(p),
 148    _inter_sweep_current(inter_sweep_current),
 149    _inter_sweep_estimate(inter_sweep_estimate),
 150    _intra_sweep_estimate(intra_sweep_estimate) { }
 151 
 152   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 153     double coalSurplusPercent = _percentage;
 154     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
 155     fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
 156     fl->set_before_sweep(fl->count());
 157     fl->set_bfr_surp(fl->surplus());
 158   }
 159 };
 160 
 161 void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
 162   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
 163   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
 164                         inter_sweep_estimate,
 165                         intra_sweep_estimate);
 166   bsc.do_tree(root());
 167 }
 168 
 169 // Calculate surpluses for the lists in the tree.
 170 class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 171   double percentage;
 172  public:
 173   setTreeSurplusClosure(double v) { percentage = v; }
 174 
 175   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 176     double splitSurplusPercent = percentage;
 177     fl->set_surplus(fl->count() -
 178                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
 179   }
 180 };
 181 
 182 void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
 183   setTreeSurplusClosure sts(splitSurplusPercent);
 184   sts.do_tree(root());
 185 }
 186 
 187 // Set hints for the lists in the tree.
 188 class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 189   size_t hint;
 190  public:
 191   setTreeHintsClosure(size_t v) { hint = v; }
 192 
 193   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 194     fl->set_hint(hint);
 195     assert(fl->hint() == 0 || fl->hint() > fl->size(),
 196       "Current hint is inconsistent");
 197     if (fl->surplus() > 0) {
 198       hint = fl->size();
 199     }
 200   }
 201 };
 202 
 203 void AFLBinaryTreeDictionary::set_tree_hints(void) {
 204   setTreeHintsClosure sth(0);
 205   sth.do_tree(root());
 206 }
 207 
 208 // Save count before previous sweep and splits and coalesces.
 209 class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 210   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 211     fl->set_prev_sweep(fl->count());
 212     fl->set_coal_births(0);
 213     fl->set_coal_deaths(0);
 214     fl->set_split_births(0);
 215     fl->set_split_deaths(0);
 216   }
 217 };
 218 
 219 void AFLBinaryTreeDictionary::clear_tree_census(void) {
 220   clearTreeCensusClosure ctc;
 221   ctc.do_tree(root());
 222 }
 223 
 224 // Do reporting and post sweep clean up.
 225 void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
 226   // Does walking the tree 3 times hurt?
 227   set_tree_surplus(splitSurplusPercent);
 228   set_tree_hints();
 229   LogTarget(Trace, gc, freelist, stats) log;
 230   if (log.is_enabled()) {
 231     LogStream out(log);
 232     report_statistics(&out);
 233   }
 234   clear_tree_census();
 235 }
 236 
 237 // Print census information - counts, births, deaths, etc.
 238 // for each list in the tree.  Also print some summary
 239 // information.
 240 class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 241   int _print_line;
 242   size_t _total_free;
 243   AdaptiveFreeList<FreeChunk> _total;
 244 
 245  public:
 246   PrintTreeCensusClosure() {
 247     _print_line = 0;
 248     _total_free = 0;
 249   }
 250   AdaptiveFreeList<FreeChunk>* total() { return &_total; }
 251   size_t total_free() { return _total_free; }
 252 
 253   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 254     LogStreamHandle(Debug, gc, freelist, census) out;
 255 
 256     if (++_print_line >= 40) {
 257       AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
 258       _print_line = 0;
 259     }
 260     fl->print_on(&out);
 261     _total_free +=           fl->count()             * fl->size()        ;
 262     total()->set_count(      total()->count()        + fl->count()      );
 263     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
 264     total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
 265     total()->set_desired(    total()->desired()      + fl->desired()    );
 266     total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
 267     total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
 268     total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
 269     total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
 270     total()->set_split_births(total()->split_births() + fl->split_births());
 271     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
 272   }
 273 };
 274 
 275 void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
 276 
 277   st->print_cr("BinaryTree");
 278   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 279   PrintTreeCensusClosure ptc;
 280   ptc.do_tree(root());
 281 
 282   AdaptiveFreeList<FreeChunk>* total = ptc.total();
 283   AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
 284   total->print_on(st, "TOTAL\t");
 285   st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
 286                ptc.total_free(),
 287                (double)(total->split_births() + total->coal_births()
 288                       - total->split_deaths() - total->coal_deaths())
 289                /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
 290               (double)(total->desired() - total->count())
 291               /(total->desired() != 0 ? (double)total->desired() : 1.0));
 292 }
 293 
 294 /////////////////////////////////////////////////////////////////////////
 295 //// CompactibleFreeListSpace
 296 /////////////////////////////////////////////////////////////////////////
 297 
 298 // highest ranked  free list lock rank
 299 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 300 
 301 // Defaults are 0 so things will break badly if incorrectly initialized.
 302 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
 303 size_t CompactibleFreeListSpace::IndexSetStride = 0;
 304 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
 305 
 306 size_t MinChunkSize = 0;
 307 
 308 void CompactibleFreeListSpace::set_cms_values() {
 309   // Set CMS global values
 310   assert(MinChunkSize == 0, "already set");
 311 
 312   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
 313   // for chunks to contain a FreeChunk.
 314   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
 315   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
 316 
 317   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
 318   IndexSetStart  = MinChunkSize;
 319   IndexSetStride = MinObjAlignment;
 320 }
 321 
 322 // Constructor
 323 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
 324   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
 325                     CMSRescanMultiple),
 326   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
 327                     CMSConcMarkMultiple),
 328   _bt(bs, mr),
 329   _collector(NULL),
 330   // free list locks are in the range of values taken by _lockRank
 331   // This range currently is [_leaf+2, _leaf+3]
 332   // Note: this requires that CFLspace c'tors
 333   // are called serially in the order in which the locks are
 334   // are acquired in the program text. This is true today.
 335   _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
 336                 Monitor::_safepoint_check_never),
 337   _preconsumptionDirtyCardClosure(NULL),
 338   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
 339                           "CompactibleFreeListSpace_dict_par_lock", true,
 340                           Monitor::_safepoint_check_never)
 341 {
 342   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 343          "FreeChunk is larger than expected");
 344   _bt.set_space(this);
 345   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 346 
 347   _dictionary = new AFLBinaryTreeDictionary(mr);
 348 
 349   assert(_dictionary != NULL, "CMS dictionary initialization");
 350   // The indexed free lists are initially all empty and are lazily
 351   // filled in on demand. Initialize the array elements to NULL.
 352   initializeIndexedFreeListArray();
 353 
 354   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 355                              SmallForLinearAlloc);
 356 
 357   // CMSIndexedFreeListReplenish should be at least 1
 358   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 359   _promoInfo.setSpace(this);
 360   if (UseCMSBestFit) {
 361     _fitStrategy = FreeBlockBestFitFirst;
 362   } else {
 363     _fitStrategy = FreeBlockStrategyNone;
 364   }
 365   check_free_list_consistency();
 366 
 367   // Initialize locks for parallel case.
 368   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 369     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 370                                             "a freelist par lock", true, Mutex::_safepoint_check_never);
 371     DEBUG_ONLY(
 372       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 373     )
 374   }
 375   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 376 
 377   _used_stable = 0;
 378 }
 379 
 380 // Like CompactibleSpace forward() but always calls cross_threshold() to
 381 // update the block offset table.  Removed initialize_threshold call because
 382 // CFLS does not use a block offset array for contiguous spaces.
 383 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 384                                     CompactPoint* cp, HeapWord* compact_top) {
 385   // q is alive
 386   // First check if we should switch compaction space
 387   assert(this == cp->space, "'this' should be current compaction space.");
 388   size_t compaction_max_size = pointer_delta(end(), compact_top);
 389   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 390     "virtual adjustObjectSize_v() method is not correct");
 391   size_t adjusted_size = adjustObjectSize(size);
 392   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 393          "no small fragments allowed");
 394   assert(minimum_free_block_size() == MinChunkSize,
 395          "for de-virtualized reference below");
 396   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 397   if (adjusted_size + MinChunkSize > compaction_max_size &&
 398       adjusted_size != compaction_max_size) {
 399     do {
 400       // switch to next compaction space
 401       cp->space->set_compaction_top(compact_top);
 402       cp->space = cp->space->next_compaction_space();
 403       if (cp->space == NULL) {
 404         cp->gen = CMSHeap::heap()->young_gen();
 405         assert(cp->gen != NULL, "compaction must succeed");
 406         cp->space = cp->gen->first_compaction_space();
 407         assert(cp->space != NULL, "generation must have a first compaction space");
 408       }
 409       compact_top = cp->space->bottom();
 410       cp->space->set_compaction_top(compact_top);
 411       // The correct adjusted_size may not be the same as that for this method
 412       // (i.e., cp->space may no longer be "this" so adjust the size again.
 413       // Use the virtual method which is not used above to save the virtual
 414       // dispatch.
 415       adjusted_size = cp->space->adjust_object_size_v(size);
 416       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 417       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 418     } while (adjusted_size > compaction_max_size);
 419   }
 420 
 421   // store the forwarding pointer into the mark word
 422   if ((HeapWord*)q != compact_top) {
 423     q->forward_to(oop(compact_top));
 424     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 425   } else {
 426     // if the object isn't moving we can just set the mark to the default
 427     // mark and handle it specially later on.
 428     q->init_mark_raw();
 429     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 430   }
 431 
 432   compact_top += adjusted_size;
 433 
 434   // we need to update the offset table so that the beginnings of objects can be
 435   // found during scavenge.  Note that we are updating the offset table based on
 436   // where the object will be once the compaction phase finishes.
 437 
 438   // Always call cross_threshold().  A contiguous space can only call it when
 439   // the compaction_top exceeds the current threshold but not for an
 440   // non-contiguous space.
 441   cp->threshold =
 442     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 443   return compact_top;
 444 }
 445 
 446 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 447 // and use of single_block instead of alloc_block.  The name here is not really
 448 // appropriate - maybe a more general name could be invented for both the
 449 // contiguous and noncontiguous spaces.
 450 
 451 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 452   _bt.single_block(start, the_end);
 453   return end();
 454 }
 455 
 456 // Initialize them to NULL.
 457 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 458   for (size_t i = 0; i < IndexSetSize; i++) {
 459     // Note that on platforms where objects are double word aligned,
 460     // the odd array elements are not used.  It is convenient, however,
 461     // to map directly from the object size to the array element.
 462     _indexedFreeList[i].reset(IndexSetSize);
 463     _indexedFreeList[i].set_size(i);
 464     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 465     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 466     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 467     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 468   }
 469 }
 470 
 471 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 472   return adjustObjectSize(oop(addr)->size());
 473 }
 474 
 475 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 476   for (size_t i = 1; i < IndexSetSize; i++) {
 477     assert(_indexedFreeList[i].size() == (size_t) i,
 478       "Indexed free list sizes are incorrect");
 479     _indexedFreeList[i].reset(IndexSetSize);
 480     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 481     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 482     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 483     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 484   }
 485 }
 486 
 487 void CompactibleFreeListSpace::reset(MemRegion mr) {
 488   resetIndexedFreeListArray();
 489   dictionary()->reset();
 490   if (BlockOffsetArrayUseUnallocatedBlock) {
 491     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 492     // Everything's allocated until proven otherwise.
 493     _bt.set_unallocated_block(end());
 494   }
 495   if (!mr.is_empty()) {
 496     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 497     _bt.single_block(mr.start(), mr.word_size());
 498     FreeChunk* fc = (FreeChunk*) mr.start();
 499     fc->set_size(mr.word_size());
 500     if (mr.word_size() >= IndexSetSize ) {
 501       returnChunkToDictionary(fc);
 502     } else {
 503       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 504       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 505     }
 506     coalBirth(mr.word_size());
 507   }
 508   _promoInfo.reset();
 509   _smallLinearAllocBlock._ptr = NULL;
 510   _smallLinearAllocBlock._word_size = 0;
 511 }
 512 
 513 void CompactibleFreeListSpace::reset_after_compaction() {
 514   // Reset the space to the new reality - one free chunk.
 515   MemRegion mr(compaction_top(), end());
 516   reset(mr);
 517   // Now refill the linear allocation block(s) if possible.
 518   refillLinearAllocBlocksIfNeeded();
 519 }
 520 
 521 // Walks the entire dictionary, returning a coterminal
 522 // chunk, if it exists. Use with caution since it involves
 523 // a potentially complete walk of a potentially large tree.
 524 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 525 
 526   assert_lock_strong(&_freelistLock);
 527 
 528   return dictionary()->find_chunk_ends_at(end());
 529 }
 530 
 531 
 532 #ifndef PRODUCT
 533 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 534   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 535     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 536   }
 537 }
 538 
 539 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 540   size_t sum = 0;
 541   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 542     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 543   }
 544   return sum;
 545 }
 546 
 547 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 548   size_t count = 0;
 549   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 550     debug_only(
 551       ssize_t total_list_count = 0;
 552       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 553          fc = fc->next()) {
 554         total_list_count++;
 555       }
 556       assert(total_list_count ==  _indexedFreeList[i].count(),
 557         "Count in list is incorrect");
 558     )
 559     count += _indexedFreeList[i].count();
 560   }
 561   return count;
 562 }
 563 
 564 size_t CompactibleFreeListSpace::totalCount() {
 565   size_t num = totalCountInIndexedFreeLists();
 566   num +=  dictionary()->total_count();
 567   if (_smallLinearAllocBlock._word_size != 0) {
 568     num++;
 569   }
 570   return num;
 571 }
 572 #endif
 573 
 574 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 575   FreeChunk* fc = (FreeChunk*) p;
 576   return fc->is_free();
 577 }
 578 
 579 size_t CompactibleFreeListSpace::used() const {
 580   return capacity() - free();
 581 }
 582 
 583 size_t CompactibleFreeListSpace::used_stable() const {
 584   return _used_stable;
 585 }
 586 
 587 void CompactibleFreeListSpace::recalculate_used_stable() {
 588   _used_stable = used();
 589 }
 590 
 591 size_t CompactibleFreeListSpace::free() const {
 592   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 593   // if you do this while the structures are in flux you
 594   // may get an approximate answer only; for instance
 595   // because there is concurrent allocation either
 596   // directly by mutators or for promotion during a GC.
 597   // It's "MT-safe", however, in the sense that you are guaranteed
 598   // not to crash and burn, for instance, because of walking
 599   // pointers that could disappear as you were walking them.
 600   // The approximation is because the various components
 601   // that are read below are not read atomically (and
 602   // further the computation of totalSizeInIndexedFreeLists()
 603   // is itself a non-atomic computation. The normal use of
 604   // this is during a resize operation at the end of GC
 605   // and at that time you are guaranteed to get the
 606   // correct actual value. However, for instance, this is
 607   // also read completely asynchronously by the "perf-sampler"
 608   // that supports jvmstat, and you are apt to see the values
 609   // flicker in such cases.
 610   assert(_dictionary != NULL, "No _dictionary?");
 611   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 612           totalSizeInIndexedFreeLists() +
 613           _smallLinearAllocBlock._word_size) * HeapWordSize;
 614 }
 615 
 616 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 617   assert(_dictionary != NULL, "No _dictionary?");
 618   assert_locked();
 619   size_t res = _dictionary->max_chunk_size();
 620   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 621                        (size_t) SmallForLinearAlloc - 1));
 622   // XXX the following could potentially be pretty slow;
 623   // should one, pessimistically for the rare cases when res
 624   // calculated above is less than IndexSetSize,
 625   // just return res calculated above? My reasoning was that
 626   // those cases will be so rare that the extra time spent doesn't
 627   // really matter....
 628   // Note: do not change the loop test i >= res + IndexSetStride
 629   // to i > res below, because i is unsigned and res may be zero.
 630   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 631        i -= IndexSetStride) {
 632     if (_indexedFreeList[i].head() != NULL) {
 633       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 634       return i;
 635     }
 636   }
 637   return res;
 638 }
 639 
 640 void LinearAllocBlock::print_on(outputStream* st) const {
 641   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 642             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 643             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 644 }
 645 
 646 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 647   st->print_cr("COMPACTIBLE FREELIST SPACE");
 648   st->print_cr(" Space:");
 649   Space::print_on(st);
 650 
 651   st->print_cr("promoInfo:");
 652   _promoInfo.print_on(st);
 653 
 654   st->print_cr("_smallLinearAllocBlock");
 655   _smallLinearAllocBlock.print_on(st);
 656 
 657   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 658 
 659   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 660 }
 661 
 662 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 663 const {
 664   reportIndexedFreeListStatistics(st);
 665   st->print_cr("Layout of Indexed Freelists");
 666   st->print_cr("---------------------------");
 667   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 668   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 669     _indexedFreeList[i].print_on(st);
 670     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 671       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 672                    p2i(fc), p2i((HeapWord*)fc + i),
 673                    fc->cantCoalesce() ? "\t CC" : "");
 674     }
 675   }
 676 }
 677 
 678 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 679 const {
 680   _promoInfo.print_on(st);
 681 }
 682 
 683 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 684 const {
 685   _dictionary->report_statistics(st);
 686   st->print_cr("Layout of Freelists in Tree");
 687   st->print_cr("---------------------------");
 688   _dictionary->print_free_lists(st);
 689 }
 690 
 691 class BlkPrintingClosure: public BlkClosure {
 692   const CMSCollector*             _collector;
 693   const CompactibleFreeListSpace* _sp;
 694   const CMSBitMap*                _live_bit_map;
 695   const bool                      _post_remark;
 696   outputStream*                   _st;
 697 public:
 698   BlkPrintingClosure(const CMSCollector* collector,
 699                      const CompactibleFreeListSpace* sp,
 700                      const CMSBitMap* live_bit_map,
 701                      outputStream* st):
 702     _collector(collector),
 703     _sp(sp),
 704     _live_bit_map(live_bit_map),
 705     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 706     _st(st) { }
 707   size_t do_blk(HeapWord* addr);
 708 };
 709 
 710 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 711   size_t sz = _sp->block_size_no_stall(addr, _collector);
 712   assert(sz != 0, "Should always be able to compute a size");
 713   if (_sp->block_is_obj(addr)) {
 714     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 715     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 716       p2i(addr),
 717       dead ? "dead" : "live",
 718       sz,
 719       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 720     if (CMSPrintObjectsInDump && !dead) {
 721       oop(addr)->print_on(_st);
 722       _st->print_cr("--------------------------------------");
 723     }
 724   } else { // free block
 725     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 726       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 727     if (CMSPrintChunksInDump) {
 728       ((FreeChunk*)addr)->print_on(_st);
 729       _st->print_cr("--------------------------------------");
 730     }
 731   }
 732   return sz;
 733 }
 734 
 735 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 736   st->print_cr("=========================");
 737   st->print_cr("Block layout in CMS Heap:");
 738   st->print_cr("=========================");
 739   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 740   blk_iterate(&bpcl);
 741 
 742   st->print_cr("=======================================");
 743   st->print_cr("Order & Layout of Promotion Info Blocks");
 744   st->print_cr("=======================================");
 745   print_promo_info_blocks(st);
 746 
 747   st->print_cr("===========================");
 748   st->print_cr("Order of Indexed Free Lists");
 749   st->print_cr("=========================");
 750   print_indexed_free_lists(st);
 751 
 752   st->print_cr("=================================");
 753   st->print_cr("Order of Free Lists in Dictionary");
 754   st->print_cr("=================================");
 755   print_dictionary_free_lists(st);
 756 }
 757 
 758 
 759 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 760   assert_lock_strong(&_freelistLock);
 761   Log(gc, freelist, stats) log;
 762   if (!log.is_debug()) {
 763     return;
 764   }
 765   log.debug("%s", title);
 766 
 767   LogStream out(log.debug());
 768   _dictionary->report_statistics(&out);
 769 
 770   if (log.is_trace()) {
 771     LogStream trace_out(log.trace());
 772     reportIndexedFreeListStatistics(&trace_out);
 773     size_t total_size = totalSizeInIndexedFreeLists() +
 774                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 775     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 776   }
 777 }
 778 
 779 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 780   assert_lock_strong(&_freelistLock);
 781   st->print_cr("Statistics for IndexedFreeLists:");
 782   st->print_cr("--------------------------------");
 783   size_t total_size = totalSizeInIndexedFreeLists();
 784   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 785   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 786   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 787   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 788   if (free_blocks != 0) {
 789     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 790   }
 791 }
 792 
 793 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 794   size_t res = 0;
 795   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 796     debug_only(
 797       ssize_t recount = 0;
 798       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 799          fc = fc->next()) {
 800         recount += 1;
 801       }
 802       assert(recount == _indexedFreeList[i].count(),
 803         "Incorrect count in list");
 804     )
 805     res += _indexedFreeList[i].count();
 806   }
 807   return res;
 808 }
 809 
 810 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 811   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 812     if (_indexedFreeList[i].head() != NULL) {
 813       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 814       return (size_t)i;
 815     }
 816   }
 817   return 0;
 818 }
 819 
 820 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 821   HeapWord* prevEnd = end();
 822   assert(prevEnd != value, "unnecessary set_end call");
 823   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 824         "New end is below unallocated block");
 825   _end = value;
 826   if (prevEnd != NULL) {
 827     // Resize the underlying block offset table.
 828     _bt.resize(pointer_delta(value, bottom()));
 829     if (value <= prevEnd) {
 830       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 831              "New end is below unallocated block");
 832     } else {
 833       // Now, take this new chunk and add it to the free blocks.
 834       // Note that the BOT has not yet been updated for this block.
 835       size_t newFcSize = pointer_delta(value, prevEnd);
 836       // Add the block to the free lists, if possible coalescing it
 837       // with the last free block, and update the BOT and census data.
 838       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 839     }
 840   }
 841 }
 842 
 843 class FreeListSpaceDCTOC : public FilteringDCTOC {
 844   CompactibleFreeListSpace* _cfls;
 845   CMSCollector* _collector;
 846   bool _parallel;
 847 protected:
 848   // Override.
 849 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 850   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 851                                        HeapWord* bottom, HeapWord* top, \
 852                                        ClosureType* cl);                \
 853       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 854                                        HeapWord* bottom, HeapWord* top, \
 855                                        ClosureType* cl);                \
 856     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 857                                        HeapWord* bottom, HeapWord* top, \
 858                                        ClosureType* cl)
 859   walk_mem_region_with_cl_DECL(OopIterateClosure);
 860   walk_mem_region_with_cl_DECL(FilteringClosure);
 861 
 862 public:
 863   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 864                      CMSCollector* collector,
 865                      OopIterateClosure* cl,
 866                      CardTable::PrecisionStyle precision,
 867                      HeapWord* boundary,
 868                      bool parallel) :
 869     FilteringDCTOC(sp, cl, precision, boundary),
 870     _cfls(sp), _collector(collector), _parallel(parallel) {}
 871 };
 872 
 873 // We de-virtualize the block-related calls below, since we know that our
 874 // space is a CompactibleFreeListSpace.
 875 
 876 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 877 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 878                                                  HeapWord* bottom,              \
 879                                                  HeapWord* top,                 \
 880                                                  ClosureType* cl) {             \
 881    if (_parallel) {                                                             \
 882      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 883    } else {                                                                     \
 884      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 885    }                                                                            \
 886 }                                                                               \
 887 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 888                                                      HeapWord* bottom,          \
 889                                                      HeapWord* top,             \
 890                                                      ClosureType* cl) {         \
 891   /* Skip parts that are before "mr", in case "block_start" sent us             \
 892      back too far. */                                                           \
 893   HeapWord* mr_start = mr.start();                                              \
 894   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 895   HeapWord* next = bottom + bot_size;                                           \
 896   while (next < mr_start) {                                                     \
 897     bottom = next;                                                              \
 898     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 899     next = bottom + bot_size;                                                   \
 900   }                                                                             \
 901                                                                                 \
 902   while (bottom < top) {                                                        \
 903     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 904         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 905                     oop(bottom)) &&                                             \
 906         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 907       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 908       bottom += _cfls->adjustObjectSize(word_sz);                               \
 909     } else {                                                                    \
 910       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 911     }                                                                           \
 912   }                                                                             \
 913 }                                                                               \
 914 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 915                                                        HeapWord* bottom,        \
 916                                                        HeapWord* top,           \
 917                                                        ClosureType* cl) {       \
 918   /* Skip parts that are before "mr", in case "block_start" sent us             \
 919      back too far. */                                                           \
 920   HeapWord* mr_start = mr.start();                                              \
 921   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 922   HeapWord* next = bottom + bot_size;                                           \
 923   while (next < mr_start) {                                                     \
 924     bottom = next;                                                              \
 925     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 926     next = bottom + bot_size;                                                   \
 927   }                                                                             \
 928                                                                                 \
 929   while (bottom < top) {                                                        \
 930     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 931         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 932                     oop(bottom)) &&                                             \
 933         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 934       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 935       bottom += _cfls->adjustObjectSize(word_sz);                               \
 936     } else {                                                                    \
 937       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 938     }                                                                           \
 939   }                                                                             \
 940 }
 941 
 942 // (There are only two of these, rather than N, because the split is due
 943 // only to the introduction of the FilteringClosure, a local part of the
 944 // impl of this abstraction.)
 945 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
 946 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 947 
 948 DirtyCardToOopClosure*
 949 CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
 950                                       CardTable::PrecisionStyle precision,
 951                                       HeapWord* boundary,
 952                                       bool parallel) {
 953   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 954 }
 955 
 956 
 957 // Note on locking for the space iteration functions:
 958 // since the collector's iteration activities are concurrent with
 959 // allocation activities by mutators, absent a suitable mutual exclusion
 960 // mechanism the iterators may go awry. For instance a block being iterated
 961 // may suddenly be allocated or divided up and part of it allocated and
 962 // so on.
 963 
 964 // Apply the given closure to each block in the space.
 965 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 966   assert_lock_strong(freelistLock());
 967   HeapWord *cur, *limit;
 968   for (cur = bottom(), limit = end(); cur < limit;
 969        cur += cl->do_blk_careful(cur));
 970 }
 971 
 972 // Apply the given closure to each block in the space.
 973 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 974   assert_lock_strong(freelistLock());
 975   HeapWord *cur, *limit;
 976   for (cur = bottom(), limit = end(); cur < limit;
 977        cur += cl->do_blk(cur));
 978 }
 979 
 980 // Apply the given closure to each oop in the space.
 981 void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
 982   assert_lock_strong(freelistLock());
 983   HeapWord *cur, *limit;
 984   size_t curSize;
 985   for (cur = bottom(), limit = end(); cur < limit;
 986        cur += curSize) {
 987     curSize = block_size(cur);
 988     if (block_is_obj(cur)) {
 989       oop(cur)->oop_iterate(cl);
 990     }
 991   }
 992 }
 993 
 994 // NOTE: In the following methods, in order to safely be able to
 995 // apply the closure to an object, we need to be sure that the
 996 // object has been initialized. We are guaranteed that an object
 997 // is initialized if we are holding the Heap_lock with the
 998 // world stopped.
 999 void CompactibleFreeListSpace::verify_objects_initialized() const {
1000   if (is_init_completed()) {
1001     assert_locked_or_safepoint(Heap_lock);
1002     if (Universe::is_fully_initialized()) {
1003       guarantee(SafepointSynchronize::is_at_safepoint(),
1004                 "Required for objects to be initialized");
1005     }
1006   } // else make a concession at vm start-up
1007 }
1008 
1009 // Apply the given closure to each object in the space
1010 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
1011   assert_lock_strong(freelistLock());
1012   NOT_PRODUCT(verify_objects_initialized());
1013   HeapWord *cur, *limit;
1014   size_t curSize;
1015   for (cur = bottom(), limit = end(); cur < limit;
1016        cur += curSize) {
1017     curSize = block_size(cur);
1018     if (block_is_obj(cur)) {
1019       blk->do_object(oop(cur));
1020     }
1021   }
1022 }
1023 
1024 // Apply the given closure to each live object in the space
1025 //   The usage of CompactibleFreeListSpace
1026 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
1027 // objects in the space with references to objects that are no longer
1028 // valid.  For example, an object may reference another object
1029 // that has already been sweep up (collected).  This method uses
1030 // obj_is_alive() to determine whether it is safe to apply the closure to
1031 // an object.  See obj_is_alive() for details on how liveness of an
1032 // object is decided.
1033 
1034 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
1035   assert_lock_strong(freelistLock());
1036   NOT_PRODUCT(verify_objects_initialized());
1037   HeapWord *cur, *limit;
1038   size_t curSize;
1039   for (cur = bottom(), limit = end(); cur < limit;
1040        cur += curSize) {
1041     curSize = block_size(cur);
1042     if (block_is_obj(cur) && obj_is_alive(cur)) {
1043       blk->do_object(oop(cur));
1044     }
1045   }
1046 }
1047 
1048 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
1049                                                   UpwardsObjectClosure* cl) {
1050   assert_locked(freelistLock());
1051   NOT_PRODUCT(verify_objects_initialized());
1052   assert(!mr.is_empty(), "Should be non-empty");
1053   // We use MemRegion(bottom(), end()) rather than used_region() below
1054   // because the two are not necessarily equal for some kinds of
1055   // spaces, in particular, certain kinds of free list spaces.
1056   // We could use the more complicated but more precise:
1057   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
1058   // but the slight imprecision seems acceptable in the assertion check.
1059   assert(MemRegion(bottom(), end()).contains(mr),
1060          "Should be within used space");
1061   HeapWord* prev = cl->previous();   // max address from last time
1062   if (prev >= mr.end()) { // nothing to do
1063     return;
1064   }
1065   // This assert will not work when we go from cms space to perm
1066   // space, and use same closure. Easy fix deferred for later. XXX YSR
1067   // assert(prev == NULL || contains(prev), "Should be within space");
1068 
1069   bool last_was_obj_array = false;
1070   HeapWord *blk_start_addr, *region_start_addr;
1071   if (prev > mr.start()) {
1072     region_start_addr = prev;
1073     blk_start_addr    = prev;
1074     // The previous invocation may have pushed "prev" beyond the
1075     // last allocated block yet there may be still be blocks
1076     // in this region due to a particular coalescing policy.
1077     // Relax the assertion so that the case where the unallocated
1078     // block is maintained and "prev" is beyond the unallocated
1079     // block does not cause the assertion to fire.
1080     assert((BlockOffsetArrayUseUnallocatedBlock &&
1081             (!is_in(prev))) ||
1082            (blk_start_addr == block_start(region_start_addr)), "invariant");
1083   } else {
1084     region_start_addr = mr.start();
1085     blk_start_addr    = block_start(region_start_addr);
1086   }
1087   HeapWord* region_end_addr = mr.end();
1088   MemRegion derived_mr(region_start_addr, region_end_addr);
1089   while (blk_start_addr < region_end_addr) {
1090     const size_t size = block_size(blk_start_addr);
1091     if (block_is_obj(blk_start_addr)) {
1092       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
1093     } else {
1094       last_was_obj_array = false;
1095     }
1096     blk_start_addr += size;
1097   }
1098   if (!last_was_obj_array) {
1099     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
1100            "Should be within (closed) used space");
1101     assert(blk_start_addr > prev, "Invariant");
1102     cl->set_previous(blk_start_addr); // min address for next time
1103   }
1104 }
1105 
1106 // Callers of this iterator beware: The closure application should
1107 // be robust in the face of uninitialized objects and should (always)
1108 // return a correct size so that the next addr + size below gives us a
1109 // valid block boundary. [See for instance,
1110 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
1111 // in ConcurrentMarkSweepGeneration.cpp.]
1112 HeapWord*
1113 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
1114   ObjectClosureCareful* cl) {
1115   assert_lock_strong(freelistLock());
1116   // Can't use used_region() below because it may not necessarily
1117   // be the same as [bottom(),end()); although we could
1118   // use [used_region().start(),align_up(used_region().end(),CardSize)),
1119   // that appears too cumbersome, so we just do the simpler check
1120   // in the assertion below.
1121   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
1122          "mr should be non-empty and within used space");
1123   HeapWord *addr, *end;
1124   size_t size;
1125   for (addr = block_start_careful(mr.start()), end  = mr.end();
1126        addr < end; addr += size) {
1127     FreeChunk* fc = (FreeChunk*)addr;
1128     if (fc->is_free()) {
1129       // Since we hold the free list lock, which protects direct
1130       // allocation in this generation by mutators, a free object
1131       // will remain free throughout this iteration code.
1132       size = fc->size();
1133     } else {
1134       // Note that the object need not necessarily be initialized,
1135       // because (for instance) the free list lock does NOT protect
1136       // object initialization. The closure application below must
1137       // therefore be correct in the face of uninitialized objects.
1138       size = cl->do_object_careful_m(oop(addr), mr);
1139       if (size == 0) {
1140         // An unparsable object found. Signal early termination.
1141         return addr;
1142       }
1143     }
1144   }
1145   return NULL;
1146 }
1147 
1148 
1149 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
1150   NOT_PRODUCT(verify_objects_initialized());
1151   return _bt.block_start(p);
1152 }
1153 
1154 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
1155   return _bt.block_start_careful(p);
1156 }
1157 
1158 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
1159   NOT_PRODUCT(verify_objects_initialized());
1160   // This must be volatile, or else there is a danger that the compiler
1161   // will compile the code below into a sometimes-infinite loop, by keeping
1162   // the value read the first time in a register.
1163   while (true) {
1164     // We must do this until we get a consistent view of the object.
1165     if (FreeChunk::indicatesFreeChunk(p)) {
1166       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1167       size_t res = fc->size();
1168 
1169       // Bugfix for systems with weak memory model (PPC64/IA64). The
1170       // block's free bit was set and we have read the size of the
1171       // block. Acquire and check the free bit again. If the block is
1172       // still free, the read size is correct.
1173       OrderAccess::acquire();
1174 
1175       // If the object is still a free chunk, return the size, else it
1176       // has been allocated so try again.
1177       if (FreeChunk::indicatesFreeChunk(p)) {
1178         assert(res != 0, "Block size should not be 0");
1179         return res;
1180       }
1181     } else {
1182       // Ensure klass read before size.
1183       Klass* k = oop(p)->klass_or_null_acquire();
1184       if (k != NULL) {
1185         assert(k->is_klass(), "Should really be klass oop.");
1186         oop o = (oop)p;
1187         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1188 
1189         size_t res = o->size_given_klass(k);
1190         res = adjustObjectSize(res);
1191         assert(res != 0, "Block size should not be 0");
1192         return res;
1193       }
1194     }
1195   }
1196 }
1197 
1198 // TODO: Now that is_parsable is gone, we should combine these two functions.
1199 // A variant of the above that uses the Printezis bits for
1200 // unparsable but allocated objects. This avoids any possible
1201 // stalls waiting for mutators to initialize objects, and is
1202 // thus potentially faster than the variant above. However,
1203 // this variant may return a zero size for a block that is
1204 // under mutation and for which a consistent size cannot be
1205 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1206 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1207                                                      const CMSCollector* c)
1208 const {
1209   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1210   // This must be volatile, or else there is a danger that the compiler
1211   // will compile the code below into a sometimes-infinite loop, by keeping
1212   // the value read the first time in a register.
1213   DEBUG_ONLY(uint loops = 0;)
1214   while (true) {
1215     // We must do this until we get a consistent view of the object.
1216     if (FreeChunk::indicatesFreeChunk(p)) {
1217       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1218       size_t res = fc->size();
1219 
1220       // Bugfix for systems with weak memory model (PPC64/IA64). The
1221       // free bit of the block was set and we have read the size of
1222       // the block. Acquire and check the free bit again. If the
1223       // block is still free, the read size is correct.
1224       OrderAccess::acquire();
1225 
1226       if (FreeChunk::indicatesFreeChunk(p)) {
1227         assert(res != 0, "Block size should not be 0");
1228         assert(loops == 0, "Should be 0");
1229         return res;
1230       }
1231     } else {
1232       // Ensure klass read before size.
1233       Klass* k = oop(p)->klass_or_null_acquire();
1234       if (k != NULL) {
1235         assert(k->is_klass(), "Should really be klass oop.");
1236         oop o = (oop)p;
1237         assert(oopDesc::is_oop(o), "Should be an oop");
1238 
1239         size_t res = o->size_given_klass(k);
1240         res = adjustObjectSize(res);
1241         assert(res != 0, "Block size should not be 0");
1242         return res;
1243       } else {
1244         // May return 0 if P-bits not present.
1245         return c->block_size_if_printezis_bits(p);
1246       }
1247     }
1248     assert(loops == 0, "Can loop at most once");
1249     DEBUG_ONLY(loops++;)
1250   }
1251 }
1252 
1253 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1254   NOT_PRODUCT(verify_objects_initialized());
1255   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1256   FreeChunk* fc = (FreeChunk*)p;
1257   if (fc->is_free()) {
1258     return fc->size();
1259   } else {
1260     // Ignore mark word because this may be a recently promoted
1261     // object whose mark word is used to chain together grey
1262     // objects (the last one would have a null value).
1263     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1264     return adjustObjectSize(oop(p)->size());
1265   }
1266 }
1267 
1268 // This implementation assumes that the property of "being an object" is
1269 // stable.  But being a free chunk may not be (because of parallel
1270 // promotion.)
1271 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1272   FreeChunk* fc = (FreeChunk*)p;
1273   assert(is_in_reserved(p), "Should be in space");
1274   if (FreeChunk::indicatesFreeChunk(p)) return false;
1275   Klass* k = oop(p)->klass_or_null_acquire();
1276   if (k != NULL) {
1277     // Ignore mark word because it may have been used to
1278     // chain together promoted objects (the last one
1279     // would have a null value).
1280     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1281     return true;
1282   } else {
1283     return false;  // Was not an object at the start of collection.
1284   }
1285 }
1286 
1287 // Check if the object is alive. This fact is checked either by consulting
1288 // the main marking bitmap in the sweeping phase or, if it's a permanent
1289 // generation and we're not in the sweeping phase, by checking the
1290 // perm_gen_verify_bit_map where we store the "deadness" information if
1291 // we did not sweep the perm gen in the most recent previous GC cycle.
1292 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1293   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1294          "Else races are possible");
1295   assert(block_is_obj(p), "The address should point to an object");
1296 
1297   // If we're sweeping, we use object liveness information from the main bit map
1298   // for both perm gen and old gen.
1299   // We don't need to lock the bitmap (live_map or dead_map below), because
1300   // EITHER we are in the middle of the sweeping phase, and the
1301   // main marking bit map (live_map below) is locked,
1302   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1303   // is stable, because it's mutated only in the sweeping phase.
1304   // NOTE: This method is also used by jmap where, if class unloading is
1305   // off, the results can return "false" for legitimate perm objects,
1306   // when we are not in the midst of a sweeping phase, which can result
1307   // in jmap not reporting certain perm gen objects. This will be moot
1308   // if/when the perm gen goes away in the future.
1309   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1310     CMSBitMap* live_map = _collector->markBitMap();
1311     return live_map->par_isMarked((HeapWord*) p);
1312   }
1313   return true;
1314 }
1315 
1316 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1317   FreeChunk* fc = (FreeChunk*)p;
1318   assert(is_in_reserved(p), "Should be in space");
1319   assert(_bt.block_start(p) == p, "Should be a block boundary");
1320   if (!fc->is_free()) {
1321     // Ignore mark word because it may have been used to
1322     // chain together promoted objects (the last one
1323     // would have a null value).
1324     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1325     return true;
1326   }
1327   return false;
1328 }
1329 
1330 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1331 // approximate answer if you don't hold the freelistlock when you call this.
1332 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1333   size_t size = 0;
1334   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1335     debug_only(
1336       // We may be calling here without the lock in which case we
1337       // won't do this modest sanity check.
1338       if (freelistLock()->owned_by_self()) {
1339         size_t total_list_size = 0;
1340         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1341           fc = fc->next()) {
1342           total_list_size += i;
1343         }
1344         assert(total_list_size == i * _indexedFreeList[i].count(),
1345                "Count in list is incorrect");
1346       }
1347     )
1348     size += i * _indexedFreeList[i].count();
1349   }
1350   return size;
1351 }
1352 
1353 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1354   MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
1355   return allocate(size);
1356 }
1357 
1358 HeapWord*
1359 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1360   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1361 }
1362 
1363 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1364   assert_lock_strong(freelistLock());
1365   HeapWord* res = NULL;
1366   assert(size == adjustObjectSize(size),
1367          "use adjustObjectSize() before calling into allocate()");
1368 
1369   res = allocate_adaptive_freelists(size);
1370 
1371   if (res != NULL) {
1372     // check that res does lie in this space!
1373     assert(is_in_reserved(res), "Not in this space!");
1374     assert(is_aligned((void*)res), "alignment check");
1375 
1376     FreeChunk* fc = (FreeChunk*)res;
1377     fc->markNotFree();
1378     assert(!fc->is_free(), "shouldn't be marked free");
1379     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1380     // Verify that the block offset table shows this to
1381     // be a single block, but not one which is unallocated.
1382     _bt.verify_single_block(res, size);
1383     _bt.verify_not_unallocated(res, size);
1384     // mangle a just allocated object with a distinct pattern.
1385     debug_only(fc->mangleAllocated(size));
1386   }
1387 
1388   // During GC we do not need to recalculate the stable used value for
1389   // every allocation in old gen. It is done once at the end of GC instead
1390   // for performance reasons.
1391   if (!CMSHeap::heap()->is_gc_active()) {
1392     recalculate_used_stable();
1393   }
1394 
1395   return res;
1396 }
1397 
1398 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1399   assert_lock_strong(freelistLock());
1400   HeapWord* res = NULL;
1401   assert(size == adjustObjectSize(size),
1402          "use adjustObjectSize() before calling into allocate()");
1403 
1404   // Strategy
1405   //   if small
1406   //     exact size from small object indexed list if small
1407   //     small or large linear allocation block (linAB) as appropriate
1408   //     take from lists of greater sized chunks
1409   //   else
1410   //     dictionary
1411   //     small or large linear allocation block if it has the space
1412   // Try allocating exact size from indexTable first
1413   if (size < IndexSetSize) {
1414     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1415     if(res != NULL) {
1416       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1417         "Not removed from free list");
1418       // no block offset table adjustment is necessary on blocks in
1419       // the indexed lists.
1420 
1421     // Try allocating from the small LinAB
1422     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1423         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1424         // if successful, the above also adjusts block offset table
1425         // Note that this call will refill the LinAB to
1426         // satisfy the request.  This is different that
1427         // evm.
1428         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1429     } else {
1430       // Raid the exact free lists larger than size, even if they are not
1431       // overpopulated.
1432       res = (HeapWord*) getChunkFromGreater(size);
1433     }
1434   } else {
1435     // Big objects get allocated directly from the dictionary.
1436     res = (HeapWord*) getChunkFromDictionaryExact(size);
1437     if (res == NULL) {
1438       // Try hard not to fail since an allocation failure will likely
1439       // trigger a synchronous GC.  Try to get the space from the
1440       // allocation blocks.
1441       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1442     }
1443   }
1444 
1445   return res;
1446 }
1447 
1448 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1449 // when promoting obj.
1450 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1451   // Depending on the object size, expansion may require refilling either a
1452   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1453   // is added because the dictionary may over-allocate to avoid fragmentation.
1454   size_t space = obj_size;
1455   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1456   return space;
1457 }
1458 
1459 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1460   FreeChunk* ret;
1461 
1462   assert(numWords >= MinChunkSize, "Size is less than minimum");
1463   assert(linearAllocationWouldFail() || bestFitFirst(),
1464     "Should not be here");
1465 
1466   size_t i;
1467   size_t currSize = numWords + MinChunkSize;
1468   assert(is_object_aligned(currSize), "currSize should be aligned");
1469   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1470     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1471     if (fl->head()) {
1472       ret = getFromListGreater(fl, numWords);
1473       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1474       return ret;
1475     }
1476   }
1477 
1478   currSize = MAX2((size_t)SmallForDictionary,
1479                   (size_t)(numWords + MinChunkSize));
1480 
1481   /* Try to get a chunk that satisfies request, while avoiding
1482      fragmentation that can't be handled. */
1483   {
1484     ret =  dictionary()->get_chunk(currSize);
1485     if (ret != NULL) {
1486       assert(ret->size() - numWords >= MinChunkSize,
1487              "Chunk is too small");
1488       _bt.allocated((HeapWord*)ret, ret->size());
1489       /* Carve returned chunk. */
1490       (void) splitChunkAndReturnRemainder(ret, numWords);
1491       /* Label this as no longer a free chunk. */
1492       assert(ret->is_free(), "This chunk should be free");
1493       ret->link_prev(NULL);
1494     }
1495     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1496     return ret;
1497   }
1498   ShouldNotReachHere();
1499 }
1500 
1501 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1502   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1503   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1504 }
1505 
1506 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1507   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1508          (_smallLinearAllocBlock._word_size == fc->size()),
1509          "Linear allocation block shows incorrect size");
1510   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1511           (_smallLinearAllocBlock._word_size == fc->size()));
1512 }
1513 
1514 // Check if the purported free chunk is present either as a linear
1515 // allocation block, the size-indexed table of (smaller) free blocks,
1516 // or the larger free blocks kept in the binary tree dictionary.
1517 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1518   if (verify_chunk_is_linear_alloc_block(fc)) {
1519     return true;
1520   } else if (fc->size() < IndexSetSize) {
1521     return verifyChunkInIndexedFreeLists(fc);
1522   } else {
1523     return dictionary()->verify_chunk_in_free_list(fc);
1524   }
1525 }
1526 
1527 #ifndef PRODUCT
1528 void CompactibleFreeListSpace::assert_locked() const {
1529   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1530 }
1531 
1532 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1533   CMSLockVerifier::assert_locked(lock);
1534 }
1535 #endif
1536 
1537 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1538   // In the parallel case, the main thread holds the free list lock
1539   // on behalf the parallel threads.
1540   FreeChunk* fc;
1541   {
1542     // If GC is parallel, this might be called by several threads.
1543     // This should be rare enough that the locking overhead won't affect
1544     // the sequential code.
1545     MutexLocker x(parDictionaryAllocLock(),
1546                   Mutex::_no_safepoint_check_flag);
1547     fc = getChunkFromDictionary(size);
1548   }
1549   if (fc != NULL) {
1550     fc->dontCoalesce();
1551     assert(fc->is_free(), "Should be free, but not coalescable");
1552     // Verify that the block offset table shows this to
1553     // be a single block, but not one which is unallocated.
1554     _bt.verify_single_block((HeapWord*)fc, fc->size());
1555     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1556   }
1557   return fc;
1558 }
1559 
1560 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1561   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1562   assert_locked();
1563 
1564   // if we are tracking promotions, then first ensure space for
1565   // promotion (including spooling space for saving header if necessary).
1566   // then allocate and copy, then track promoted info if needed.
1567   // When tracking (see PromotionInfo::track()), the mark word may
1568   // be displaced and in this case restoration of the mark word
1569   // occurs in the (oop_since_save_marks_)iterate phase.
1570   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1571     return NULL;
1572   }
1573   // Call the allocate(size_t, bool) form directly to avoid the
1574   // additional call through the allocate(size_t) form.  Having
1575   // the compile inline the call is problematic because allocate(size_t)
1576   // is a virtual method.
1577   HeapWord* res = allocate(adjustObjectSize(obj_size));
1578   if (res != NULL) {
1579     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1580     // if we should be tracking promotions, do so.
1581     if (_promoInfo.tracking()) {
1582         _promoInfo.track((PromotedObject*)res);
1583     }
1584   }
1585   return oop(res);
1586 }
1587 
1588 HeapWord*
1589 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1590   assert_locked();
1591   assert(size >= MinChunkSize, "minimum chunk size");
1592   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1593     "maximum from smallLinearAllocBlock");
1594   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1595 }
1596 
1597 HeapWord*
1598 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1599                                                        size_t size) {
1600   assert_locked();
1601   assert(size >= MinChunkSize, "too small");
1602   HeapWord* res = NULL;
1603   // Try to do linear allocation from blk, making sure that
1604   if (blk->_word_size == 0) {
1605     // We have probably been unable to fill this either in the prologue or
1606     // when it was exhausted at the last linear allocation. Bail out until
1607     // next time.
1608     assert(blk->_ptr == NULL, "consistency check");
1609     return NULL;
1610   }
1611   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1612   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1613   if (res != NULL) return res;
1614 
1615   // about to exhaust this linear allocation block
1616   if (blk->_word_size == size) { // exactly satisfied
1617     res = blk->_ptr;
1618     _bt.allocated(res, blk->_word_size);
1619   } else if (size + MinChunkSize <= blk->_refillSize) {
1620     size_t sz = blk->_word_size;
1621     // Update _unallocated_block if the size is such that chunk would be
1622     // returned to the indexed free list.  All other chunks in the indexed
1623     // free lists are allocated from the dictionary so that _unallocated_block
1624     // has already been adjusted for them.  Do it here so that the cost
1625     // for all chunks added back to the indexed free lists.
1626     if (sz < SmallForDictionary) {
1627       _bt.allocated(blk->_ptr, sz);
1628     }
1629     // Return the chunk that isn't big enough, and then refill below.
1630     addChunkToFreeLists(blk->_ptr, sz);
1631     split_birth(sz);
1632     // Don't keep statistics on adding back chunk from a LinAB.
1633   } else {
1634     // A refilled block would not satisfy the request.
1635     return NULL;
1636   }
1637 
1638   blk->_ptr = NULL; blk->_word_size = 0;
1639   refillLinearAllocBlock(blk);
1640   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1641          "block was replenished");
1642   if (res != NULL) {
1643     split_birth(size);
1644     repairLinearAllocBlock(blk);
1645   } else if (blk->_ptr != NULL) {
1646     res = blk->_ptr;
1647     size_t blk_size = blk->_word_size;
1648     blk->_word_size -= size;
1649     blk->_ptr  += size;
1650     split_birth(size);
1651     repairLinearAllocBlock(blk);
1652     // Update BOT last so that other (parallel) GC threads see a consistent
1653     // view of the BOT and free blocks.
1654     // Above must occur before BOT is updated below.
1655     OrderAccess::storestore();
1656     _bt.split_block(res, blk_size, size);  // adjust block offset table
1657   }
1658   return res;
1659 }
1660 
1661 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1662                                         LinearAllocBlock* blk,
1663                                         size_t size) {
1664   assert_locked();
1665   assert(size >= MinChunkSize, "too small");
1666 
1667   HeapWord* res = NULL;
1668   // This is the common case.  Keep it simple.
1669   if (blk->_word_size >= size + MinChunkSize) {
1670     assert(blk->_ptr != NULL, "consistency check");
1671     res = blk->_ptr;
1672     // Note that the BOT is up-to-date for the linAB before allocation.  It
1673     // indicates the start of the linAB.  The split_block() updates the
1674     // BOT for the linAB after the allocation (indicates the start of the
1675     // next chunk to be allocated).
1676     size_t blk_size = blk->_word_size;
1677     blk->_word_size -= size;
1678     blk->_ptr  += size;
1679     split_birth(size);
1680     repairLinearAllocBlock(blk);
1681     // Update BOT last so that other (parallel) GC threads see a consistent
1682     // view of the BOT and free blocks.
1683     // Above must occur before BOT is updated below.
1684     OrderAccess::storestore();
1685     _bt.split_block(res, blk_size, size);  // adjust block offset table
1686     _bt.allocated(res, size);
1687   }
1688   return res;
1689 }
1690 
1691 FreeChunk*
1692 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1693   assert_locked();
1694   assert(size < SmallForDictionary, "just checking");
1695   FreeChunk* res;
1696   res = _indexedFreeList[size].get_chunk_at_head();
1697   if (res == NULL) {
1698     res = getChunkFromIndexedFreeListHelper(size);
1699   }
1700   _bt.verify_not_unallocated((HeapWord*) res, size);
1701   assert(res == NULL || res->size() == size, "Incorrect block size");
1702   return res;
1703 }
1704 
1705 FreeChunk*
1706 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1707   bool replenish) {
1708   assert_locked();
1709   FreeChunk* fc = NULL;
1710   if (size < SmallForDictionary) {
1711     assert(_indexedFreeList[size].head() == NULL ||
1712       _indexedFreeList[size].surplus() <= 0,
1713       "List for this size should be empty or under populated");
1714     // Try best fit in exact lists before replenishing the list
1715     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1716       // Replenish list.
1717       //
1718       // Things tried that failed.
1719       //   Tried allocating out of the two LinAB's first before
1720       // replenishing lists.
1721       //   Tried small linAB of size 256 (size in indexed list)
1722       // and replenishing indexed lists from the small linAB.
1723       //
1724       FreeChunk* newFc = NULL;
1725       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1726       if (replenish_size < SmallForDictionary) {
1727         // Do not replenish from an underpopulated size.
1728         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1729             _indexedFreeList[replenish_size].head() != NULL) {
1730           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1731         } else if (bestFitFirst()) {
1732           newFc = bestFitSmall(replenish_size);
1733         }
1734       }
1735       if (newFc == NULL && replenish_size > size) {
1736         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1737         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1738       }
1739       // Note: The stats update re split-death of block obtained above
1740       // will be recorded below precisely when we know we are going to
1741       // be actually splitting it into more than one pieces below.
1742       if (newFc != NULL) {
1743         if  (replenish || CMSReplenishIntermediate) {
1744           // Replenish this list and return one block to caller.
1745           size_t i;
1746           FreeChunk *curFc, *nextFc;
1747           size_t num_blk = newFc->size() / size;
1748           assert(num_blk >= 1, "Smaller than requested?");
1749           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1750           if (num_blk > 1) {
1751             // we are sure we will be splitting the block just obtained
1752             // into multiple pieces; record the split-death of the original
1753             splitDeath(replenish_size);
1754           }
1755           // carve up and link blocks 0, ..., num_blk - 2
1756           // The last chunk is not added to the lists but is returned as the
1757           // free chunk.
1758           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1759                i = 0;
1760                i < (num_blk - 1);
1761                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1762                i++) {
1763             curFc->set_size(size);
1764             // Don't record this as a return in order to try and
1765             // determine the "returns" from a GC.
1766             _bt.verify_not_unallocated((HeapWord*) fc, size);
1767             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1768             _bt.mark_block((HeapWord*)curFc, size);
1769             split_birth(size);
1770             // Don't record the initial population of the indexed list
1771             // as a split birth.
1772           }
1773 
1774           // check that the arithmetic was OK above
1775           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1776             "inconsistency in carving newFc");
1777           curFc->set_size(size);
1778           _bt.mark_block((HeapWord*)curFc, size);
1779           split_birth(size);
1780           fc = curFc;
1781         } else {
1782           // Return entire block to caller
1783           fc = newFc;
1784         }
1785       }
1786     }
1787   } else {
1788     // Get a free chunk from the free chunk dictionary to be returned to
1789     // replenish the indexed free list.
1790     fc = getChunkFromDictionaryExact(size);
1791   }
1792   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1793   return fc;
1794 }
1795 
1796 FreeChunk*
1797 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1798   assert_locked();
1799   FreeChunk* fc = _dictionary->get_chunk(size);
1800   if (fc == NULL) {
1801     return NULL;
1802   }
1803   _bt.allocated((HeapWord*)fc, fc->size());
1804   if (fc->size() >= size + MinChunkSize) {
1805     fc = splitChunkAndReturnRemainder(fc, size);
1806   }
1807   assert(fc->size() >= size, "chunk too small");
1808   assert(fc->size() < size + MinChunkSize, "chunk too big");
1809   _bt.verify_single_block((HeapWord*)fc, fc->size());
1810   return fc;
1811 }
1812 
1813 FreeChunk*
1814 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1815   assert_locked();
1816   FreeChunk* fc = _dictionary->get_chunk(size);
1817   if (fc == NULL) {
1818     return fc;
1819   }
1820   _bt.allocated((HeapWord*)fc, fc->size());
1821   if (fc->size() == size) {
1822     _bt.verify_single_block((HeapWord*)fc, size);
1823     return fc;
1824   }
1825   assert(fc->size() > size, "get_chunk() guarantee");
1826   if (fc->size() < size + MinChunkSize) {
1827     // Return the chunk to the dictionary and go get a bigger one.
1828     returnChunkToDictionary(fc);
1829     fc = _dictionary->get_chunk(size + MinChunkSize);
1830     if (fc == NULL) {
1831       return NULL;
1832     }
1833     _bt.allocated((HeapWord*)fc, fc->size());
1834   }
1835   assert(fc->size() >= size + MinChunkSize, "tautology");
1836   fc = splitChunkAndReturnRemainder(fc, size);
1837   assert(fc->size() == size, "chunk is wrong size");
1838   _bt.verify_single_block((HeapWord*)fc, size);
1839   return fc;
1840 }
1841 
1842 void
1843 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1844   assert_locked();
1845 
1846   size_t size = chunk->size();
1847   _bt.verify_single_block((HeapWord*)chunk, size);
1848   // adjust _unallocated_block downward, as necessary
1849   _bt.freed((HeapWord*)chunk, size);
1850   _dictionary->return_chunk(chunk);
1851 #ifndef PRODUCT
1852   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1853     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1854     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1855     tl->verify_stats();
1856   }
1857 #endif // PRODUCT
1858 }
1859 
1860 void
1861 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1862   assert_locked();
1863   size_t size = fc->size();
1864   _bt.verify_single_block((HeapWord*) fc, size);
1865   _bt.verify_not_unallocated((HeapWord*) fc, size);
1866   _indexedFreeList[size].return_chunk_at_tail(fc);
1867 #ifndef PRODUCT
1868   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1869      _indexedFreeList[size].verify_stats();
1870   }
1871 #endif // PRODUCT
1872 }
1873 
1874 // Add chunk to end of last block -- if it's the largest
1875 // block -- and update BOT and census data. We would
1876 // of course have preferred to coalesce it with the
1877 // last block, but it's currently less expensive to find the
1878 // largest block than it is to find the last.
1879 void
1880 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1881   HeapWord* chunk, size_t     size) {
1882   // check that the chunk does lie in this space!
1883   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1884   // One of the parallel gc task threads may be here
1885   // whilst others are allocating.
1886   Mutex* lock = &_parDictionaryAllocLock;
1887   FreeChunk* ec;
1888   {
1889     MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1890     ec = dictionary()->find_largest_dict();  // get largest block
1891     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1892       // It's a coterminal block - we can coalesce.
1893       size_t old_size = ec->size();
1894       coalDeath(old_size);
1895       removeChunkFromDictionary(ec);
1896       size += old_size;
1897     } else {
1898       ec = (FreeChunk*)chunk;
1899     }
1900   }
1901   ec->set_size(size);
1902   debug_only(ec->mangleFreed(size));
1903   if (size < SmallForDictionary) {
1904     lock = _indexedFreeListParLocks[size];
1905   }
1906   MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1907   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1908   // record the birth under the lock since the recording involves
1909   // manipulation of the list on which the chunk lives and
1910   // if the chunk is allocated and is the last on the list,
1911   // the list can go away.
1912   coalBirth(size);
1913 }
1914 
1915 void
1916 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1917                                               size_t     size) {
1918   // check that the chunk does lie in this space!
1919   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1920   assert_locked();
1921   _bt.verify_single_block(chunk, size);
1922 
1923   FreeChunk* fc = (FreeChunk*) chunk;
1924   fc->set_size(size);
1925   debug_only(fc->mangleFreed(size));
1926   if (size < SmallForDictionary) {
1927     returnChunkToFreeList(fc);
1928   } else {
1929     returnChunkToDictionary(fc);
1930   }
1931 }
1932 
1933 void
1934 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1935   size_t size, bool coalesced) {
1936   assert_locked();
1937   assert(chunk != NULL, "null chunk");
1938   if (coalesced) {
1939     // repair BOT
1940     _bt.single_block(chunk, size);
1941   }
1942   addChunkToFreeLists(chunk, size);
1943 }
1944 
1945 // We _must_ find the purported chunk on our free lists;
1946 // we assert if we don't.
1947 void
1948 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1949   size_t size = fc->size();
1950   assert_locked();
1951   debug_only(verifyFreeLists());
1952   if (size < SmallForDictionary) {
1953     removeChunkFromIndexedFreeList(fc);
1954   } else {
1955     removeChunkFromDictionary(fc);
1956   }
1957   _bt.verify_single_block((HeapWord*)fc, size);
1958   debug_only(verifyFreeLists());
1959 }
1960 
1961 void
1962 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1963   size_t size = fc->size();
1964   assert_locked();
1965   assert(fc != NULL, "null chunk");
1966   _bt.verify_single_block((HeapWord*)fc, size);
1967   _dictionary->remove_chunk(fc);
1968   // adjust _unallocated_block upward, as necessary
1969   _bt.allocated((HeapWord*)fc, size);
1970 }
1971 
1972 void
1973 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1974   assert_locked();
1975   size_t size = fc->size();
1976   _bt.verify_single_block((HeapWord*)fc, size);
1977   NOT_PRODUCT(
1978     if (FLSVerifyIndexTable) {
1979       verifyIndexedFreeList(size);
1980     }
1981   )
1982   _indexedFreeList[size].remove_chunk(fc);
1983   NOT_PRODUCT(
1984     if (FLSVerifyIndexTable) {
1985       verifyIndexedFreeList(size);
1986     }
1987   )
1988 }
1989 
1990 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1991   /* A hint is the next larger size that has a surplus.
1992      Start search at a size large enough to guarantee that
1993      the excess is >= MIN_CHUNK. */
1994   size_t start = align_object_size(numWords + MinChunkSize);
1995   if (start < IndexSetSize) {
1996     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1997     size_t    hint = _indexedFreeList[start].hint();
1998     while (hint < IndexSetSize) {
1999       assert(is_object_aligned(hint), "hint should be aligned");
2000       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
2001       if (fl->surplus() > 0 && fl->head() != NULL) {
2002         // Found a list with surplus, reset original hint
2003         // and split out a free chunk which is returned.
2004         _indexedFreeList[start].set_hint(hint);
2005         FreeChunk* res = getFromListGreater(fl, numWords);
2006         assert(res == NULL || res->is_free(),
2007           "Should be returning a free chunk");
2008         return res;
2009       }
2010       hint = fl->hint(); /* keep looking */
2011     }
2012     /* None found. */
2013     it[start].set_hint(IndexSetSize);
2014   }
2015   return NULL;
2016 }
2017 
2018 /* Requires fl->size >= numWords + MinChunkSize */
2019 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
2020   size_t numWords) {
2021   FreeChunk *curr = fl->head();
2022   size_t oldNumWords = curr->size();
2023   assert(numWords >= MinChunkSize, "Word size is too small");
2024   assert(curr != NULL, "List is empty");
2025   assert(oldNumWords >= numWords + MinChunkSize,
2026         "Size of chunks in the list is too small");
2027 
2028   fl->remove_chunk(curr);
2029   // recorded indirectly by splitChunkAndReturnRemainder -
2030   // smallSplit(oldNumWords, numWords);
2031   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
2032   // Does anything have to be done for the remainder in terms of
2033   // fixing the card table?
2034   assert(new_chunk == NULL || new_chunk->is_free(),
2035     "Should be returning a free chunk");
2036   return new_chunk;
2037 }
2038 
2039 FreeChunk*
2040 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
2041   size_t new_size) {
2042   assert_locked();
2043   size_t size = chunk->size();
2044   assert(size > new_size, "Split from a smaller block?");
2045   assert(is_aligned(chunk), "alignment problem");
2046   assert(size == adjustObjectSize(size), "alignment problem");
2047   size_t rem_sz = size - new_size;
2048   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
2049   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
2050   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
2051   assert(is_aligned(ffc), "alignment problem");
2052   ffc->set_size(rem_sz);
2053   ffc->link_next(NULL);
2054   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2055   // Above must occur before BOT is updated below.
2056   // adjust block offset table
2057   OrderAccess::storestore();
2058   assert(chunk->is_free() && ffc->is_free(), "Error");
2059   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
2060   if (rem_sz < SmallForDictionary) {
2061     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
2062     bool is_par = Thread::current()->is_GC_task_thread();
2063     if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
2064     returnChunkToFreeList(ffc);
2065     split(size, rem_sz);
2066     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
2067   } else {
2068     returnChunkToDictionary(ffc);
2069     split(size, rem_sz);
2070   }
2071   chunk->set_size(new_size);
2072   return chunk;
2073 }
2074 
2075 void
2076 CompactibleFreeListSpace::sweep_completed() {
2077   // Now that space is probably plentiful, refill linear
2078   // allocation blocks as needed.
2079   refillLinearAllocBlocksIfNeeded();
2080 }
2081 
2082 void
2083 CompactibleFreeListSpace::gc_prologue() {
2084   assert_locked();
2085   reportFreeListStatistics("Before GC:");
2086   refillLinearAllocBlocksIfNeeded();
2087 }
2088 
2089 void
2090 CompactibleFreeListSpace::gc_epilogue() {
2091   assert_locked();
2092   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2093   _promoInfo.stopTrackingPromotions();
2094   repairLinearAllocationBlocks();
2095   reportFreeListStatistics("After GC:");
2096 }
2097 
2098 // Iteration support, mostly delegated from a CMS generation
2099 
2100 void CompactibleFreeListSpace::save_marks() {
2101   assert(Thread::current()->is_VM_thread(),
2102          "Global variable should only be set when single-threaded");
2103   // Mark the "end" of the used space at the time of this call;
2104   // note, however, that promoted objects from this point
2105   // on are tracked in the _promoInfo below.
2106   set_saved_mark_word(unallocated_block());
2107 #ifdef ASSERT
2108   // Check the sanity of save_marks() etc.
2109   MemRegion ur    = used_region();
2110   MemRegion urasm = used_region_at_save_marks();
2111   assert(ur.contains(urasm),
2112          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2113          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2114          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
2115 #endif
2116   // inform allocator that promotions should be tracked.
2117   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2118   _promoInfo.startTrackingPromotions();
2119 }
2120 
2121 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2122   assert(_promoInfo.tracking(), "No preceding save_marks?");
2123   return _promoInfo.noPromotions();
2124 }
2125 
2126 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2127   return _smallLinearAllocBlock._word_size == 0;
2128 }
2129 
2130 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2131   // Fix up linear allocation blocks to look like free blocks
2132   repairLinearAllocBlock(&_smallLinearAllocBlock);
2133 }
2134 
2135 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2136   assert_locked();
2137   if (blk->_ptr != NULL) {
2138     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2139            "Minimum block size requirement");
2140     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2141     fc->set_size(blk->_word_size);
2142     fc->link_prev(NULL);   // mark as free
2143     fc->dontCoalesce();
2144     assert(fc->is_free(), "just marked it free");
2145     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2146   }
2147 }
2148 
2149 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2150   assert_locked();
2151   if (_smallLinearAllocBlock._ptr == NULL) {
2152     assert(_smallLinearAllocBlock._word_size == 0,
2153       "Size of linAB should be zero if the ptr is NULL");
2154     // Reset the linAB refill and allocation size limit.
2155     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2156   }
2157   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2158 }
2159 
2160 void
2161 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2162   assert_locked();
2163   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2164          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2165          "blk invariant");
2166   if (blk->_ptr == NULL) {
2167     refillLinearAllocBlock(blk);
2168   }
2169 }
2170 
2171 void
2172 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2173   assert_locked();
2174   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2175          "linear allocation block should be empty");
2176   FreeChunk* fc;
2177   if (blk->_refillSize < SmallForDictionary &&
2178       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2179     // A linAB's strategy might be to use small sizes to reduce
2180     // fragmentation but still get the benefits of allocation from a
2181     // linAB.
2182   } else {
2183     fc = getChunkFromDictionary(blk->_refillSize);
2184   }
2185   if (fc != NULL) {
2186     blk->_ptr  = (HeapWord*)fc;
2187     blk->_word_size = fc->size();
2188     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2189   }
2190 }
2191 
2192 // Support for compaction
2193 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2194   scan_and_forward(this, cp);
2195   // Prepare_for_compaction() uses the space between live objects
2196   // so that later phase can skip dead space quickly.  So verification
2197   // of the free lists doesn't work after.
2198 }
2199 
2200 void CompactibleFreeListSpace::adjust_pointers() {
2201   // In other versions of adjust_pointers(), a bail out
2202   // based on the amount of live data in the generation
2203   // (i.e., if 0, bail out) may be used.
2204   // Cannot test used() == 0 here because the free lists have already
2205   // been mangled by the compaction.
2206 
2207   scan_and_adjust_pointers(this);
2208   // See note about verification in prepare_for_compaction().
2209 }
2210 
2211 void CompactibleFreeListSpace::compact() {
2212   scan_and_compact(this);
2213 }
2214 
2215 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2216 // where fbs is free block sizes
2217 double CompactibleFreeListSpace::flsFrag() const {
2218   size_t itabFree = totalSizeInIndexedFreeLists();
2219   double frag = 0.0;
2220   size_t i;
2221 
2222   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2223     double sz  = i;
2224     frag      += _indexedFreeList[i].count() * (sz * sz);
2225   }
2226 
2227   double totFree = itabFree +
2228                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2229   if (totFree > 0) {
2230     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2231             (totFree * totFree));
2232     frag = (double)1.0  - frag;
2233   } else {
2234     assert(frag == 0.0, "Follows from totFree == 0");
2235   }
2236   return frag;
2237 }
2238 
2239 void CompactibleFreeListSpace::beginSweepFLCensus(
2240   float inter_sweep_current,
2241   float inter_sweep_estimate,
2242   float intra_sweep_estimate) {
2243   assert_locked();
2244   size_t i;
2245   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2246     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2247     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2248     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2249     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2250     fl->set_before_sweep(fl->count());
2251     fl->set_bfr_surp(fl->surplus());
2252   }
2253   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2254                                     inter_sweep_current,
2255                                     inter_sweep_estimate,
2256                                     intra_sweep_estimate);
2257 }
2258 
2259 void CompactibleFreeListSpace::setFLSurplus() {
2260   assert_locked();
2261   size_t i;
2262   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2263     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2264     fl->set_surplus(fl->count() -
2265                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2266   }
2267 }
2268 
2269 void CompactibleFreeListSpace::setFLHints() {
2270   assert_locked();
2271   size_t i;
2272   size_t h = IndexSetSize;
2273   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2274     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2275     fl->set_hint(h);
2276     if (fl->surplus() > 0) {
2277       h = i;
2278     }
2279   }
2280 }
2281 
2282 void CompactibleFreeListSpace::clearFLCensus() {
2283   assert_locked();
2284   size_t i;
2285   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2286     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2287     fl->set_prev_sweep(fl->count());
2288     fl->set_coal_births(0);
2289     fl->set_coal_deaths(0);
2290     fl->set_split_births(0);
2291     fl->set_split_deaths(0);
2292   }
2293 }
2294 
2295 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2296   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2297   setFLSurplus();
2298   setFLHints();
2299   printFLCensus(sweep_count);
2300   clearFLCensus();
2301   assert_locked();
2302   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2303 }
2304 
2305 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2306   if (size < SmallForDictionary) {
2307     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2308     return (fl->coal_desired() < 0) ||
2309            ((int)fl->count() > fl->coal_desired());
2310   } else {
2311     return dictionary()->coal_dict_over_populated(size);
2312   }
2313 }
2314 
2315 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2316   assert(size < SmallForDictionary, "Size too large for indexed list");
2317   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2318   fl->increment_coal_births();
2319   fl->increment_surplus();
2320 }
2321 
2322 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2323   assert(size < SmallForDictionary, "Size too large for indexed list");
2324   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2325   fl->increment_coal_deaths();
2326   fl->decrement_surplus();
2327 }
2328 
2329 void CompactibleFreeListSpace::coalBirth(size_t size) {
2330   if (size  < SmallForDictionary) {
2331     smallCoalBirth(size);
2332   } else {
2333     dictionary()->dict_census_update(size,
2334                                    false /* split */,
2335                                    true /* birth */);
2336   }
2337 }
2338 
2339 void CompactibleFreeListSpace::coalDeath(size_t size) {
2340   if(size  < SmallForDictionary) {
2341     smallCoalDeath(size);
2342   } else {
2343     dictionary()->dict_census_update(size,
2344                                    false /* split */,
2345                                    false /* birth */);
2346   }
2347 }
2348 
2349 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2350   assert(size < SmallForDictionary, "Size too large for indexed list");
2351   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2352   fl->increment_split_births();
2353   fl->increment_surplus();
2354 }
2355 
2356 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2357   assert(size < SmallForDictionary, "Size too large for indexed list");
2358   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2359   fl->increment_split_deaths();
2360   fl->decrement_surplus();
2361 }
2362 
2363 void CompactibleFreeListSpace::split_birth(size_t size) {
2364   if (size  < SmallForDictionary) {
2365     smallSplitBirth(size);
2366   } else {
2367     dictionary()->dict_census_update(size,
2368                                    true /* split */,
2369                                    true /* birth */);
2370   }
2371 }
2372 
2373 void CompactibleFreeListSpace::splitDeath(size_t size) {
2374   if (size  < SmallForDictionary) {
2375     smallSplitDeath(size);
2376   } else {
2377     dictionary()->dict_census_update(size,
2378                                    true /* split */,
2379                                    false /* birth */);
2380   }
2381 }
2382 
2383 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2384   size_t to2 = from - to1;
2385   splitDeath(from);
2386   split_birth(to1);
2387   split_birth(to2);
2388 }
2389 
2390 void CompactibleFreeListSpace::print() const {
2391   print_on(tty);
2392 }
2393 
2394 void CompactibleFreeListSpace::prepare_for_verify() {
2395   assert_locked();
2396   repairLinearAllocationBlocks();
2397   // Verify that the SpoolBlocks look like free blocks of
2398   // appropriate sizes... To be done ...
2399 }
2400 
2401 class VerifyAllBlksClosure: public BlkClosure {
2402  private:
2403   const CompactibleFreeListSpace* _sp;
2404   const MemRegion                 _span;
2405   HeapWord*                       _last_addr;
2406   size_t                          _last_size;
2407   bool                            _last_was_obj;
2408   bool                            _last_was_live;
2409 
2410  public:
2411   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2412     MemRegion span) :  _sp(sp), _span(span),
2413                        _last_addr(NULL), _last_size(0),
2414                        _last_was_obj(false), _last_was_live(false) { }
2415 
2416   virtual size_t do_blk(HeapWord* addr) {
2417     size_t res;
2418     bool   was_obj  = false;
2419     bool   was_live = false;
2420     if (_sp->block_is_obj(addr)) {
2421       was_obj = true;
2422       oop p = oop(addr);
2423       guarantee(oopDesc::is_oop(p), "Should be an oop");
2424       res = _sp->adjustObjectSize(p->size());
2425       if (_sp->obj_is_alive(addr)) {
2426         was_live = true;
2427         oopDesc::verify(p);
2428       }
2429     } else {
2430       FreeChunk* fc = (FreeChunk*)addr;
2431       res = fc->size();
2432       if (FLSVerifyLists && !fc->cantCoalesce()) {
2433         guarantee(_sp->verify_chunk_in_free_list(fc),
2434                   "Chunk should be on a free list");
2435       }
2436     }
2437     if (res == 0) {
2438       Log(gc, verify) log;
2439       log.error("Livelock: no rank reduction!");
2440       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2441                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2442         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2443         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2444       LogStream ls(log.error());
2445       _sp->print_on(&ls);
2446       guarantee(false, "Verification failed.");
2447     }
2448     _last_addr = addr;
2449     _last_size = res;
2450     _last_was_obj  = was_obj;
2451     _last_was_live = was_live;
2452     return res;
2453   }
2454 };
2455 
2456 class VerifyAllOopsClosure: public BasicOopIterateClosure {
2457  private:
2458   const CMSCollector*             _collector;
2459   const CompactibleFreeListSpace* _sp;
2460   const MemRegion                 _span;
2461   const bool                      _past_remark;
2462   const CMSBitMap*                _bit_map;
2463 
2464  protected:
2465   void do_oop(void* p, oop obj) {
2466     if (_span.contains(obj)) { // the interior oop points into CMS heap
2467       if (!_span.contains(p)) { // reference from outside CMS heap
2468         // Should be a valid object; the first disjunct below allows
2469         // us to sidestep an assertion in block_is_obj() that insists
2470         // that p be in _sp. Note that several generations (and spaces)
2471         // are spanned by _span (CMS heap) above.
2472         guarantee(!_sp->is_in_reserved(obj) ||
2473                   _sp->block_is_obj((HeapWord*)obj),
2474                   "Should be an object");
2475         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2476         oopDesc::verify(obj);
2477         if (_past_remark) {
2478           // Remark has been completed, the object should be marked
2479           _bit_map->isMarked((HeapWord*)obj);
2480         }
2481       } else { // reference within CMS heap
2482         if (_past_remark) {
2483           // Remark has been completed -- so the referent should have
2484           // been marked, if referring object is.
2485           if (_bit_map->isMarked(_collector->block_start(p))) {
2486             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2487           }
2488         }
2489       }
2490     } else if (_sp->is_in_reserved(p)) {
2491       // the reference is from FLS, and points out of FLS
2492       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2493       oopDesc::verify(obj);
2494     }
2495   }
2496 
2497   template <class T> void do_oop_work(T* p) {
2498     T heap_oop = RawAccess<>::oop_load(p);
2499     if (!CompressedOops::is_null(heap_oop)) {
2500       oop obj = CompressedOops::decode_not_null(heap_oop);
2501       do_oop(p, obj);
2502     }
2503   }
2504 
2505  public:
2506   VerifyAllOopsClosure(const CMSCollector* collector,
2507     const CompactibleFreeListSpace* sp, MemRegion span,
2508     bool past_remark, CMSBitMap* bit_map) :
2509     _collector(collector), _sp(sp), _span(span),
2510     _past_remark(past_remark), _bit_map(bit_map) { }
2511 
2512   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2513   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2514 };
2515 
2516 void CompactibleFreeListSpace::verify() const {
2517   assert_lock_strong(&_freelistLock);
2518   verify_objects_initialized();
2519   MemRegion span = _collector->_span;
2520   bool past_remark = (_collector->abstract_state() ==
2521                       CMSCollector::Sweeping);
2522 
2523   ResourceMark rm;
2524   HandleMark  hm;
2525 
2526   // Check integrity of CFL data structures
2527   _promoInfo.verify();
2528   _dictionary->verify();
2529   if (FLSVerifyIndexTable) {
2530     verifyIndexedFreeLists();
2531   }
2532   // Check integrity of all objects and free blocks in space
2533   {
2534     VerifyAllBlksClosure cl(this, span);
2535     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2536   }
2537   // Check that all references in the heap to FLS
2538   // are to valid objects in FLS or that references in
2539   // FLS are to valid objects elsewhere in the heap
2540   if (FLSVerifyAllHeapReferences)
2541   {
2542     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2543       _collector->markBitMap());
2544 
2545     // Iterate over all oops in the heap.
2546     CMSHeap::heap()->oop_iterate(&cl);
2547   }
2548 
2549   if (VerifyObjectStartArray) {
2550     // Verify the block offset table
2551     _bt.verify();
2552   }
2553 }
2554 
2555 #ifndef PRODUCT
2556 void CompactibleFreeListSpace::verifyFreeLists() const {
2557   if (FLSVerifyLists) {
2558     _dictionary->verify();
2559     verifyIndexedFreeLists();
2560   } else {
2561     if (FLSVerifyDictionary) {
2562       _dictionary->verify();
2563     }
2564     if (FLSVerifyIndexTable) {
2565       verifyIndexedFreeLists();
2566     }
2567   }
2568 }
2569 #endif
2570 
2571 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2572   size_t i = 0;
2573   for (; i < IndexSetStart; i++) {
2574     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2575   }
2576   for (; i < IndexSetSize; i++) {
2577     verifyIndexedFreeList(i);
2578   }
2579 }
2580 
2581 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2582   FreeChunk* fc   =  _indexedFreeList[size].head();
2583   FreeChunk* tail =  _indexedFreeList[size].tail();
2584   size_t    num = _indexedFreeList[size].count();
2585   size_t      n = 0;
2586   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2587             "Slot should have been empty");
2588   for (; fc != NULL; fc = fc->next(), n++) {
2589     guarantee(fc->size() == size, "Size inconsistency");
2590     guarantee(fc->is_free(), "!free?");
2591     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2592     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2593   }
2594   guarantee(n == num, "Incorrect count");
2595 }
2596 
2597 #ifndef PRODUCT
2598 void CompactibleFreeListSpace::check_free_list_consistency() const {
2599   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2600     "Some sizes can't be allocated without recourse to"
2601     " linear allocation buffers");
2602   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2603     "else MIN_TREE_CHUNK_SIZE is wrong");
2604   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2605   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2606 }
2607 #endif
2608 
2609 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2610   assert_lock_strong(&_freelistLock);
2611   LogTarget(Debug, gc, freelist, census) log;
2612   if (!log.is_enabled()) {
2613     return;
2614   }
2615   AdaptiveFreeList<FreeChunk> total;
2616   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2617   ResourceMark rm;
2618   LogStream ls(log);
2619   outputStream* out = &ls;
2620   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2621   size_t total_free = 0;
2622   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2623     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2624     total_free += fl->count() * fl->size();
2625     if (i % (40*IndexSetStride) == 0) {
2626       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2627     }
2628     fl->print_on(out);
2629     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2630     total.set_surplus(    total.surplus()     + fl->surplus()    );
2631     total.set_desired(    total.desired()     + fl->desired()    );
2632     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2633     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2634     total.set_count(      total.count()       + fl->count()      );
2635     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2636     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2637     total.set_split_births(total.split_births() + fl->split_births());
2638     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2639   }
2640   total.print_on(out, "TOTAL");
2641   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2642   log.print("growth: %8.5f  deficit: %8.5f",
2643             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2644                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2645             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2646   _dictionary->print_dict_census(out);
2647 }
2648 
2649 ///////////////////////////////////////////////////////////////////////////
2650 // CompactibleFreeListSpaceLAB
2651 ///////////////////////////////////////////////////////////////////////////
2652 
2653 #define VECTOR_257(x)                                                                                  \
2654   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2655   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2656      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2657      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2658      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2659      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2660      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2661      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2662      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2663      x }
2664 
2665 // Initialize with default setting for CMS, _not_
2666 // generic OldPLABSize, whose static default is different; if overridden at the
2667 // command-line, this will get reinitialized via a call to
2668 // modify_initialization() below.
2669 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2670   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2671 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2672 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2673 
2674 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2675   _cfls(cfls)
2676 {
2677   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2678   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2679        i < CompactibleFreeListSpace::IndexSetSize;
2680        i += CompactibleFreeListSpace::IndexSetStride) {
2681     _indexedFreeList[i].set_size(i);
2682     _num_blocks[i] = 0;
2683   }
2684 }
2685 
2686 static bool _CFLS_LAB_modified = false;
2687 
2688 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2689   assert(!_CFLS_LAB_modified, "Call only once");
2690   _CFLS_LAB_modified = true;
2691   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2692        i < CompactibleFreeListSpace::IndexSetSize;
2693        i += CompactibleFreeListSpace::IndexSetStride) {
2694     _blocks_to_claim[i].modify(n, wt, true /* force */);
2695   }
2696 }
2697 
2698 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2699   FreeChunk* res;
2700   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2701   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2702     // This locking manages sync with other large object allocations.
2703     MutexLocker x(_cfls->parDictionaryAllocLock(),
2704                   Mutex::_no_safepoint_check_flag);
2705     res = _cfls->getChunkFromDictionaryExact(word_sz);
2706     if (res == NULL) return NULL;
2707   } else {
2708     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2709     if (fl->count() == 0) {
2710       // Attempt to refill this local free list.
2711       get_from_global_pool(word_sz, fl);
2712       // If it didn't work, give up.
2713       if (fl->count() == 0) return NULL;
2714     }
2715     res = fl->get_chunk_at_head();
2716     assert(res != NULL, "Why was count non-zero?");
2717   }
2718   res->markNotFree();
2719   assert(!res->is_free(), "shouldn't be marked free");
2720   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2721   // mangle a just allocated object with a distinct pattern.
2722   debug_only(res->mangleAllocated(word_sz));
2723   return (HeapWord*)res;
2724 }
2725 
2726 // Get a chunk of blocks of the right size and update related
2727 // book-keeping stats
2728 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2729   // Get the #blocks we want to claim
2730   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2731   assert(n_blks > 0, "Error");
2732   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2733   // In some cases, when the application has a phase change,
2734   // there may be a sudden and sharp shift in the object survival
2735   // profile, and updating the counts at the end of a scavenge
2736   // may not be quick enough, giving rise to large scavenge pauses
2737   // during these phase changes. It is beneficial to detect such
2738   // changes on-the-fly during a scavenge and avoid such a phase-change
2739   // pothole. The following code is a heuristic attempt to do that.
2740   // It is protected by a product flag until we have gained
2741   // enough experience with this heuristic and fine-tuned its behavior.
2742   // WARNING: This might increase fragmentation if we overreact to
2743   // small spikes, so some kind of historical smoothing based on
2744   // previous experience with the greater reactivity might be useful.
2745   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2746   // default.
2747   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2748     //
2749     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2750     // which is why there is a cast to double.
2751     //
2752     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2753     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2754     n_blks = MIN2(n_blks, CMSOldPLABMax);
2755   }
2756   assert(n_blks > 0, "Error");
2757   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2758   // Update stats table entry for this block size
2759   _num_blocks[word_sz] += fl->count();
2760 }
2761 
2762 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2763   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2764        i < CompactibleFreeListSpace::IndexSetSize;
2765        i += CompactibleFreeListSpace::IndexSetStride) {
2766     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2767            "Counter inconsistency");
2768     if (_global_num_workers[i] > 0) {
2769       // Need to smooth wrt historical average
2770       if (ResizeOldPLAB) {
2771         _blocks_to_claim[i].sample(
2772           MAX2(CMSOldPLABMin,
2773           MIN2(CMSOldPLABMax,
2774                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2775       }
2776       // Reset counters for next round
2777       _global_num_workers[i] = 0;
2778       _global_num_blocks[i] = 0;
2779       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2780     }
2781   }
2782 }
2783 
2784 // If this is changed in the future to allow parallel
2785 // access, one would need to take the FL locks and,
2786 // depending on how it is used, stagger access from
2787 // parallel threads to reduce contention.
2788 void CompactibleFreeListSpaceLAB::retire(int tid) {
2789   // We run this single threaded with the world stopped;
2790   // so no need for locks and such.
2791   NOT_PRODUCT(Thread* t = Thread::current();)
2792   assert(Thread::current()->is_VM_thread(), "Error");
2793   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2794        i < CompactibleFreeListSpace::IndexSetSize;
2795        i += CompactibleFreeListSpace::IndexSetStride) {
2796     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2797            "Can't retire more than what we obtained");
2798     if (_num_blocks[i] > 0) {
2799       size_t num_retire =  _indexedFreeList[i].count();
2800       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2801       {
2802         // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
2803         //               Mutex::_no_safepoint_check_flag);
2804 
2805         // Update globals stats for num_blocks used
2806         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2807         _global_num_workers[i]++;
2808         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2809         if (num_retire > 0) {
2810           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2811           // Reset this list.
2812           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2813           _indexedFreeList[i].set_size(i);
2814         }
2815       }
2816       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2817                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2818       // Reset stats for next round
2819       _num_blocks[i]         = 0;
2820     }
2821   }
2822 }
2823 
2824 // Used by par_get_chunk_of_blocks() for the chunks from the
2825 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2826 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2827 // to the free list "fl".  "n" is the maximum number of chunks to
2828 // be added to "fl".
2829 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2830 
2831   // We'll try all multiples of word_sz in the indexed set, starting with
2832   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2833   // then try getting a big chunk and splitting it.
2834   {
2835     bool found;
2836     int  k;
2837     size_t cur_sz;
2838     for (k = 1, cur_sz = k * word_sz, found = false;
2839          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2840          (CMSSplitIndexedFreeListBlocks || k <= 1);
2841          k++, cur_sz = k * word_sz) {
2842       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2843       fl_for_cur_sz.set_size(cur_sz);
2844       {
2845         MutexLocker x(_indexedFreeListParLocks[cur_sz],
2846                       Mutex::_no_safepoint_check_flag);
2847         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2848         if (gfl->count() != 0) {
2849           // nn is the number of chunks of size cur_sz that
2850           // we'd need to split k-ways each, in order to create
2851           // "n" chunks of size word_sz each.
2852           const size_t nn = MAX2(n/k, (size_t)1);
2853           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2854           found = true;
2855           if (k > 1) {
2856             // Update split death stats for the cur_sz-size blocks list:
2857             // we increment the split death count by the number of blocks
2858             // we just took from the cur_sz-size blocks list and which
2859             // we will be splitting below.
2860             ssize_t deaths = gfl->split_deaths() +
2861                              fl_for_cur_sz.count();
2862             gfl->set_split_deaths(deaths);
2863           }
2864         }
2865       }
2866       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2867       if (found) {
2868         if (k == 1) {
2869           fl->prepend(&fl_for_cur_sz);
2870         } else {
2871           // Divide each block on fl_for_cur_sz up k ways.
2872           FreeChunk* fc;
2873           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2874             // Must do this in reverse order, so that anybody attempting to
2875             // access the main chunk sees it as a single free block until we
2876             // change it.
2877             size_t fc_size = fc->size();
2878             assert(fc->is_free(), "Error");
2879             for (int i = k-1; i >= 0; i--) {
2880               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2881               assert((i != 0) ||
2882                         ((fc == ffc) && ffc->is_free() &&
2883                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2884                         "Counting error");
2885               ffc->set_size(word_sz);
2886               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2887               ffc->link_next(NULL);
2888               // Above must occur before BOT is updated below.
2889               OrderAccess::storestore();
2890               // splitting from the right, fc_size == i * word_sz
2891               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2892               fc_size -= word_sz;
2893               assert(fc_size == i*word_sz, "Error");
2894               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2895               _bt.verify_single_block((HeapWord*)fc, fc_size);
2896               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2897               // Push this on "fl".
2898               fl->return_chunk_at_head(ffc);
2899             }
2900             // TRAP
2901             assert(fl->tail()->next() == NULL, "List invariant.");
2902           }
2903         }
2904         // Update birth stats for this block size.
2905         size_t num = fl->count();
2906         MutexLocker x(_indexedFreeListParLocks[word_sz],
2907                       Mutex::_no_safepoint_check_flag);
2908         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2909         _indexedFreeList[word_sz].set_split_births(births);
2910         return true;
2911       }
2912     }
2913     return found;
2914   }
2915 }
2916 
2917 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2918 
2919   FreeChunk* fc = NULL;
2920   FreeChunk* rem_fc = NULL;
2921   size_t rem;
2922   {
2923     MutexLocker x(parDictionaryAllocLock(),
2924                   Mutex::_no_safepoint_check_flag);
2925     while (n > 0) {
2926       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2927       if (fc != NULL) {
2928         break;
2929       } else {
2930         n--;
2931       }
2932     }
2933     if (fc == NULL) return NULL;
2934     // Otherwise, split up that block.
2935     assert((ssize_t)n >= 1, "Control point invariant");
2936     assert(fc->is_free(), "Error: should be a free block");
2937     _bt.verify_single_block((HeapWord*)fc, fc->size());
2938     const size_t nn = fc->size() / word_sz;
2939     n = MIN2(nn, n);
2940     assert((ssize_t)n >= 1, "Control point invariant");
2941     rem = fc->size() - n * word_sz;
2942     // If there is a remainder, and it's too small, allocate one fewer.
2943     if (rem > 0 && rem < MinChunkSize) {
2944       n--; rem += word_sz;
2945     }
2946     // Note that at this point we may have n == 0.
2947     assert((ssize_t)n >= 0, "Control point invariant");
2948 
2949     // If n is 0, the chunk fc that was found is not large
2950     // enough to leave a viable remainder.  We are unable to
2951     // allocate even one block.  Return fc to the
2952     // dictionary and return, leaving "fl" empty.
2953     if (n == 0) {
2954       returnChunkToDictionary(fc);
2955       return NULL;
2956     }
2957 
2958     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2959     dictionary()->dict_census_update(fc->size(),
2960                                      true /*split*/,
2961                                      false /*birth*/);
2962 
2963     // First return the remainder, if any.
2964     // Note that we hold the lock until we decide if we're going to give
2965     // back the remainder to the dictionary, since a concurrent allocation
2966     // may otherwise see the heap as empty.  (We're willing to take that
2967     // hit if the block is a small block.)
2968     if (rem > 0) {
2969       size_t prefix_size = n * word_sz;
2970       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2971       rem_fc->set_size(rem);
2972       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2973       rem_fc->link_next(NULL);
2974       // Above must occur before BOT is updated below.
2975       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2976       OrderAccess::storestore();
2977       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2978       assert(fc->is_free(), "Error");
2979       fc->set_size(prefix_size);
2980       if (rem >= IndexSetSize) {
2981         returnChunkToDictionary(rem_fc);
2982         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2983         rem_fc = NULL;
2984       }
2985       // Otherwise, return it to the small list below.
2986     }
2987   }
2988   if (rem_fc != NULL) {
2989     MutexLocker x(_indexedFreeListParLocks[rem],
2990                   Mutex::_no_safepoint_check_flag);
2991     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2992     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2993     smallSplitBirth(rem);
2994   }
2995   assert(n * word_sz == fc->size(),
2996          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2997          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2998          fc->size(), n, word_sz);
2999   return fc;
3000 }
3001 
3002 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
3003 
3004   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
3005 
3006   if (fc == NULL) {
3007     return;
3008   }
3009 
3010   size_t n = fc->size() / word_sz;
3011 
3012   assert((ssize_t)n > 0, "Consistency");
3013   // Now do the splitting up.
3014   // Must do this in reverse order, so that anybody attempting to
3015   // access the main chunk sees it as a single free block until we
3016   // change it.
3017   size_t fc_size = n * word_sz;
3018   // All but first chunk in this loop
3019   for (ssize_t i = n-1; i > 0; i--) {
3020     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
3021     ffc->set_size(word_sz);
3022     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
3023     ffc->link_next(NULL);
3024     // Above must occur before BOT is updated below.
3025     OrderAccess::storestore();
3026     // splitting from the right, fc_size == (n - i + 1) * wordsize
3027     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
3028     fc_size -= word_sz;
3029     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
3030     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
3031     _bt.verify_single_block((HeapWord*)fc, fc_size);
3032     // Push this on "fl".
3033     fl->return_chunk_at_head(ffc);
3034   }
3035   // First chunk
3036   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
3037   // The blocks above should show their new sizes before the first block below
3038   fc->set_size(word_sz);
3039   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
3040   fc->link_next(NULL);
3041   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
3042   _bt.verify_single_block((HeapWord*)fc, fc->size());
3043   fl->return_chunk_at_head(fc);
3044 
3045   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
3046   {
3047     // Update the stats for this block size.
3048     MutexLocker x(_indexedFreeListParLocks[word_sz],
3049                   Mutex::_no_safepoint_check_flag);
3050     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
3051     _indexedFreeList[word_sz].set_split_births(births);
3052     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
3053     // _indexedFreeList[word_sz].set_surplus(new_surplus);
3054   }
3055 
3056   // TRAP
3057   assert(fl->tail()->next() == NULL, "List invariant.");
3058 }
3059 
3060 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
3061   assert(fl->count() == 0, "Precondition.");
3062   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
3063          "Precondition");
3064 
3065   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
3066     // Got it
3067     return;
3068   }
3069 
3070   // Otherwise, we'll split a block from the dictionary.
3071   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
3072 }
3073 
3074 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
3075   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
3076   return ergo_max;
3077 }
3078 
3079 // Set up the space's par_seq_tasks structure for work claiming
3080 // for parallel rescan. See CMSParRemarkTask where this is currently used.
3081 // XXX Need to suitably abstract and generalize this and the next
3082 // method into one.
3083 void
3084 CompactibleFreeListSpace::
3085 initialize_sequential_subtasks_for_rescan(int n_threads) {
3086   // The "size" of each task is fixed according to rescan_task_size.
3087   assert(n_threads > 0, "Unexpected n_threads argument");
3088   const size_t task_size = rescan_task_size();
3089   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
3090   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
3091   assert(n_tasks == 0 ||
3092          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3093           (used_region().start() + n_tasks*task_size >= used_region().end())),
3094          "n_tasks calculation incorrect");
3095   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3096   assert(!pst->valid(), "Clobbering existing data?");
3097   // Sets the condition for completion of the subtask (how many threads
3098   // need to finish in order to be done).
3099   pst->set_n_threads(n_threads);
3100   pst->set_n_tasks((int)n_tasks);
3101 }
3102 
3103 // Set up the space's par_seq_tasks structure for work claiming
3104 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3105 void
3106 CompactibleFreeListSpace::
3107 initialize_sequential_subtasks_for_marking(int n_threads,
3108                                            HeapWord* low) {
3109   // The "size" of each task is fixed according to rescan_task_size.
3110   assert(n_threads > 0, "Unexpected n_threads argument");
3111   const size_t task_size = marking_task_size();
3112   assert(task_size > CardTable::card_size_in_words &&
3113          (task_size %  CardTable::card_size_in_words == 0),
3114          "Otherwise arithmetic below would be incorrect");
3115   MemRegion span = _old_gen->reserved();
3116   if (low != NULL) {
3117     if (span.contains(low)) {
3118       // Align low down to  a card boundary so that
3119       // we can use block_offset_careful() on span boundaries.
3120       HeapWord* aligned_low = align_down(low, CardTable::card_size);
3121       // Clip span prefix at aligned_low
3122       span = span.intersection(MemRegion(aligned_low, span.end()));
3123     } else if (low > span.end()) {
3124       span = MemRegion(low, low);  // Null region
3125     } // else use entire span
3126   }
3127   assert(span.is_empty() ||
3128          ((uintptr_t)span.start() %  CardTable::card_size == 0),
3129         "span should start at a card boundary");
3130   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3131   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3132   assert(n_tasks == 0 ||
3133          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3134           (span.start() + n_tasks*task_size >= span.end())),
3135          "n_tasks calculation incorrect");
3136   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3137   assert(!pst->valid(), "Clobbering existing data?");
3138   // Sets the condition for completion of the subtask (how many threads
3139   // need to finish in order to be done).
3140   pst->set_n_threads(n_threads);
3141   pst->set_n_tasks((int)n_tasks);
3142 }