1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/genOopClosures.inline.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.inline.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/binaryTreeDictionary.inline.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/access.inline.hpp"
  44 #include "oops/compressedOops.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/handles.inline.hpp"
  48 #include "runtime/init.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/orderAccess.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 
  55 // Specialize for AdaptiveFreeList which tries to avoid
  56 // splitting a chunk of a size that is under populated in favor of
  57 // an over populated size.  The general get_better_list() just returns
  58 // the current list.
  59 template <>
  60 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
  61 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
  62   BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
  63   // A candidate chunk has been found.  If it is already under
  64   // populated, get a chunk associated with the hint for this
  65   // chunk.
  66 
  67   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
  68   if (curTL->surplus() <= 0) {
  69     /* Use the hint to find a size with a surplus, and reset the hint. */
  70     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
  71     while (hintTL->hint() != 0) {
  72       assert(hintTL->hint() > hintTL->size(),
  73         "hint points in the wrong direction");
  74       hintTL = dictionary->find_list(hintTL->hint());
  75       assert(curTL != hintTL, "Infinite loop");
  76       if (hintTL == NULL ||
  77           hintTL == curTL /* Should not happen but protect against it */ ) {
  78         // No useful hint.  Set the hint to NULL and go on.
  79         curTL->set_hint(0);
  80         break;
  81       }
  82       assert(hintTL->size() > curTL->size(), "hint is inconsistent");
  83       if (hintTL->surplus() > 0) {
  84         // The hint led to a list that has a surplus.  Use it.
  85         // Set the hint for the candidate to an overpopulated
  86         // size.
  87         curTL->set_hint(hintTL->size());
  88         // Change the candidate.
  89         curTL = hintTL;
  90         break;
  91       }
  92     }
  93   }
  94   return curTL;
  95 }
  96 
  97 void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
  98   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
  99   if (nd) {
 100     if (split) {
 101       if (birth) {
 102         nd->increment_split_births();
 103         nd->increment_surplus();
 104       }  else {
 105         nd->increment_split_deaths();
 106         nd->decrement_surplus();
 107       }
 108     } else {
 109       if (birth) {
 110         nd->increment_coal_births();
 111         nd->increment_surplus();
 112       } else {
 113         nd->increment_coal_deaths();
 114         nd->decrement_surplus();
 115       }
 116     }
 117   }
 118   // A list for this size may not be found (nd == 0) if
 119   //   This is a death where the appropriate list is now
 120   //     empty and has been removed from the list.
 121   //   This is a birth associated with a LinAB.  The chunk
 122   //     for the LinAB is not in the dictionary.
 123 }
 124 
 125 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
 126   if (FLSAlwaysCoalesceLarge) return true;
 127 
 128   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
 129   // None of requested size implies overpopulated.
 130   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
 131          list_of_size->count() > list_of_size->coal_desired();
 132 }
 133 
 134 // For each list in the tree, calculate the desired, desired
 135 // coalesce, count before sweep, and surplus before sweep.
 136 class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 137   double _percentage;
 138   float _inter_sweep_current;
 139   float _inter_sweep_estimate;
 140   float _intra_sweep_estimate;
 141 
 142  public:
 143   BeginSweepClosure(double p, float inter_sweep_current,
 144                               float inter_sweep_estimate,
 145                               float intra_sweep_estimate) :
 146    _percentage(p),
 147    _inter_sweep_current(inter_sweep_current),
 148    _inter_sweep_estimate(inter_sweep_estimate),
 149    _intra_sweep_estimate(intra_sweep_estimate) { }
 150 
 151   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 152     double coalSurplusPercent = _percentage;
 153     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
 154     fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
 155     fl->set_before_sweep(fl->count());
 156     fl->set_bfr_surp(fl->surplus());
 157   }
 158 };
 159 
 160 void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
 161   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
 162   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
 163                         inter_sweep_estimate,
 164                         intra_sweep_estimate);
 165   bsc.do_tree(root());
 166 }
 167 
 168 // Calculate surpluses for the lists in the tree.
 169 class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 170   double percentage;
 171  public:
 172   setTreeSurplusClosure(double v) { percentage = v; }
 173 
 174   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 175     double splitSurplusPercent = percentage;
 176     fl->set_surplus(fl->count() -
 177                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
 178   }
 179 };
 180 
 181 void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
 182   setTreeSurplusClosure sts(splitSurplusPercent);
 183   sts.do_tree(root());
 184 }
 185 
 186 // Set hints for the lists in the tree.
 187 class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 188   size_t hint;
 189  public:
 190   setTreeHintsClosure(size_t v) { hint = v; }
 191 
 192   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 193     fl->set_hint(hint);
 194     assert(fl->hint() == 0 || fl->hint() > fl->size(),
 195       "Current hint is inconsistent");
 196     if (fl->surplus() > 0) {
 197       hint = fl->size();
 198     }
 199   }
 200 };
 201 
 202 void AFLBinaryTreeDictionary::set_tree_hints(void) {
 203   setTreeHintsClosure sth(0);
 204   sth.do_tree(root());
 205 }
 206 
 207 // Save count before previous sweep and splits and coalesces.
 208 class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 209   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 210     fl->set_prev_sweep(fl->count());
 211     fl->set_coal_births(0);
 212     fl->set_coal_deaths(0);
 213     fl->set_split_births(0);
 214     fl->set_split_deaths(0);
 215   }
 216 };
 217 
 218 void AFLBinaryTreeDictionary::clear_tree_census(void) {
 219   clearTreeCensusClosure ctc;
 220   ctc.do_tree(root());
 221 }
 222 
 223 // Do reporting and post sweep clean up.
 224 void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
 225   // Does walking the tree 3 times hurt?
 226   set_tree_surplus(splitSurplusPercent);
 227   set_tree_hints();
 228   LogTarget(Trace, gc, freelist, stats) log;
 229   if (log.is_enabled()) {
 230     LogStream out(log);
 231     report_statistics(&out);
 232   }
 233   clear_tree_census();
 234 }
 235 
 236 // Print census information - counts, births, deaths, etc.
 237 // for each list in the tree.  Also print some summary
 238 // information.
 239 class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 240   int _print_line;
 241   size_t _total_free;
 242   AdaptiveFreeList<FreeChunk> _total;
 243 
 244  public:
 245   PrintTreeCensusClosure() {
 246     _print_line = 0;
 247     _total_free = 0;
 248   }
 249   AdaptiveFreeList<FreeChunk>* total() { return &_total; }
 250   size_t total_free() { return _total_free; }
 251 
 252   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 253     LogStreamHandle(Debug, gc, freelist, census) out;
 254 
 255     if (++_print_line >= 40) {
 256       AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
 257       _print_line = 0;
 258     }
 259     fl->print_on(&out);
 260     _total_free +=           fl->count()             * fl->size()        ;
 261     total()->set_count(      total()->count()        + fl->count()      );
 262     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
 263     total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
 264     total()->set_desired(    total()->desired()      + fl->desired()    );
 265     total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
 266     total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
 267     total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
 268     total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
 269     total()->set_split_births(total()->split_births() + fl->split_births());
 270     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
 271   }
 272 };
 273 
 274 void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
 275 
 276   st->print_cr("BinaryTree");
 277   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 278   PrintTreeCensusClosure ptc;
 279   ptc.do_tree(root());
 280 
 281   AdaptiveFreeList<FreeChunk>* total = ptc.total();
 282   AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
 283   total->print_on(st, "TOTAL\t");
 284   st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
 285                ptc.total_free(),
 286                (double)(total->split_births() + total->coal_births()
 287                       - total->split_deaths() - total->coal_deaths())
 288                /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
 289               (double)(total->desired() - total->count())
 290               /(total->desired() != 0 ? (double)total->desired() : 1.0));
 291 }
 292 
 293 /////////////////////////////////////////////////////////////////////////
 294 //// CompactibleFreeListSpace
 295 /////////////////////////////////////////////////////////////////////////
 296 
 297 // highest ranked  free list lock rank
 298 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 299 
 300 // Defaults are 0 so things will break badly if incorrectly initialized.
 301 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
 302 size_t CompactibleFreeListSpace::IndexSetStride = 0;
 303 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
 304 
 305 size_t MinChunkSize = 0;
 306 
 307 void CompactibleFreeListSpace::set_cms_values() {
 308   // Set CMS global values
 309   assert(MinChunkSize == 0, "already set");
 310 
 311   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
 312   // for chunks to contain a FreeChunk.
 313   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
 314   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
 315 
 316   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
 317   IndexSetStart  = MinChunkSize;
 318   IndexSetStride = MinObjAlignment;
 319 }
 320 
 321 // Constructor
 322 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
 323   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
 324                     CMSRescanMultiple),
 325   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
 326                     CMSConcMarkMultiple),
 327   _bt(bs, mr),
 328   _collector(NULL),
 329   // free list locks are in the range of values taken by _lockRank
 330   // This range currently is [_leaf+2, _leaf+3]
 331   // Note: this requires that CFLspace c'tors
 332   // are called serially in the order in which the locks are
 333   // are acquired in the program text. This is true today.
 334   _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
 335                 Monitor::_safepoint_check_never),
 336   _preconsumptionDirtyCardClosure(NULL),
 337   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
 338                           "CompactibleFreeListSpace_dict_par_lock", true,
 339                           Monitor::_safepoint_check_never)
 340 {
 341   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 342          "FreeChunk is larger than expected");
 343   _bt.set_space(this);
 344   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 345 
 346   _dictionary = new AFLBinaryTreeDictionary(mr);
 347 
 348   assert(_dictionary != NULL, "CMS dictionary initialization");
 349   // The indexed free lists are initially all empty and are lazily
 350   // filled in on demand. Initialize the array elements to NULL.
 351   initializeIndexedFreeListArray();
 352 
 353   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 354                              SmallForLinearAlloc);
 355 
 356   // CMSIndexedFreeListReplenish should be at least 1
 357   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 358   _promoInfo.setSpace(this);
 359   if (UseCMSBestFit) {
 360     _fitStrategy = FreeBlockBestFitFirst;
 361   } else {
 362     _fitStrategy = FreeBlockStrategyNone;
 363   }
 364   check_free_list_consistency();
 365 
 366   // Initialize locks for parallel case.
 367   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 368     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 369                                             "a freelist par lock", true, Mutex::_safepoint_check_never);
 370     DEBUG_ONLY(
 371       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 372     )
 373   }
 374   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 375 
 376   _used_stable = 0;
 377 }
 378 
 379 // Like CompactibleSpace forward() but always calls cross_threshold() to
 380 // update the block offset table.  Removed initialize_threshold call because
 381 // CFLS does not use a block offset array for contiguous spaces.
 382 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 383                                     CompactPoint* cp, HeapWord* compact_top) {
 384   // q is alive
 385   // First check if we should switch compaction space
 386   assert(this == cp->space, "'this' should be current compaction space.");
 387   size_t compaction_max_size = pointer_delta(end(), compact_top);
 388   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 389     "virtual adjustObjectSize_v() method is not correct");
 390   size_t adjusted_size = adjustObjectSize(size);
 391   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 392          "no small fragments allowed");
 393   assert(minimum_free_block_size() == MinChunkSize,
 394          "for de-virtualized reference below");
 395   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 396   if (adjusted_size + MinChunkSize > compaction_max_size &&
 397       adjusted_size != compaction_max_size) {
 398     do {
 399       // switch to next compaction space
 400       cp->space->set_compaction_top(compact_top);
 401       cp->space = cp->space->next_compaction_space();
 402       if (cp->space == NULL) {
 403         cp->gen = CMSHeap::heap()->young_gen();
 404         assert(cp->gen != NULL, "compaction must succeed");
 405         cp->space = cp->gen->first_compaction_space();
 406         assert(cp->space != NULL, "generation must have a first compaction space");
 407       }
 408       compact_top = cp->space->bottom();
 409       cp->space->set_compaction_top(compact_top);
 410       // The correct adjusted_size may not be the same as that for this method
 411       // (i.e., cp->space may no longer be "this" so adjust the size again.
 412       // Use the virtual method which is not used above to save the virtual
 413       // dispatch.
 414       adjusted_size = cp->space->adjust_object_size_v(size);
 415       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 416       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 417     } while (adjusted_size > compaction_max_size);
 418   }
 419 
 420   // store the forwarding pointer into the mark word
 421   if ((HeapWord*)q != compact_top) {
 422     q->forward_to(oop(compact_top));
 423     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 424   } else {
 425     // if the object isn't moving we can just set the mark to the default
 426     // mark and handle it specially later on.
 427     q->init_mark_raw();
 428     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 429   }
 430 
 431   compact_top += adjusted_size;
 432 
 433   // we need to update the offset table so that the beginnings of objects can be
 434   // found during scavenge.  Note that we are updating the offset table based on
 435   // where the object will be once the compaction phase finishes.
 436 
 437   // Always call cross_threshold().  A contiguous space can only call it when
 438   // the compaction_top exceeds the current threshold but not for an
 439   // non-contiguous space.
 440   cp->threshold =
 441     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 442   return compact_top;
 443 }
 444 
 445 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 446 // and use of single_block instead of alloc_block.  The name here is not really
 447 // appropriate - maybe a more general name could be invented for both the
 448 // contiguous and noncontiguous spaces.
 449 
 450 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 451   _bt.single_block(start, the_end);
 452   return end();
 453 }
 454 
 455 // Initialize them to NULL.
 456 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 457   for (size_t i = 0; i < IndexSetSize; i++) {
 458     // Note that on platforms where objects are double word aligned,
 459     // the odd array elements are not used.  It is convenient, however,
 460     // to map directly from the object size to the array element.
 461     _indexedFreeList[i].reset(IndexSetSize);
 462     _indexedFreeList[i].set_size(i);
 463     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 464     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 465     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 466     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 467   }
 468 }
 469 
 470 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 471   return adjustObjectSize(oop(addr)->size());
 472 }
 473 
 474 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 475   for (size_t i = 1; i < IndexSetSize; i++) {
 476     assert(_indexedFreeList[i].size() == (size_t) i,
 477       "Indexed free list sizes are incorrect");
 478     _indexedFreeList[i].reset(IndexSetSize);
 479     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 480     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 481     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 482     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 483   }
 484 }
 485 
 486 void CompactibleFreeListSpace::reset(MemRegion mr) {
 487   resetIndexedFreeListArray();
 488   dictionary()->reset();
 489   if (BlockOffsetArrayUseUnallocatedBlock) {
 490     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 491     // Everything's allocated until proven otherwise.
 492     _bt.set_unallocated_block(end());
 493   }
 494   if (!mr.is_empty()) {
 495     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 496     _bt.single_block(mr.start(), mr.word_size());
 497     FreeChunk* fc = (FreeChunk*) mr.start();
 498     fc->set_size(mr.word_size());
 499     if (mr.word_size() >= IndexSetSize ) {
 500       returnChunkToDictionary(fc);
 501     } else {
 502       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 503       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 504     }
 505     coalBirth(mr.word_size());
 506   }
 507   _promoInfo.reset();
 508   _smallLinearAllocBlock._ptr = NULL;
 509   _smallLinearAllocBlock._word_size = 0;
 510 }
 511 
 512 void CompactibleFreeListSpace::reset_after_compaction() {
 513   // Reset the space to the new reality - one free chunk.
 514   MemRegion mr(compaction_top(), end());
 515   reset(mr);
 516   // Now refill the linear allocation block(s) if possible.
 517   refillLinearAllocBlocksIfNeeded();
 518 }
 519 
 520 // Walks the entire dictionary, returning a coterminal
 521 // chunk, if it exists. Use with caution since it involves
 522 // a potentially complete walk of a potentially large tree.
 523 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 524 
 525   assert_lock_strong(&_freelistLock);
 526 
 527   return dictionary()->find_chunk_ends_at(end());
 528 }
 529 
 530 
 531 #ifndef PRODUCT
 532 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 533   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 534     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 535   }
 536 }
 537 
 538 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 539   size_t sum = 0;
 540   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 541     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 542   }
 543   return sum;
 544 }
 545 
 546 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 547   size_t count = 0;
 548   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 549     debug_only(
 550       ssize_t total_list_count = 0;
 551       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 552          fc = fc->next()) {
 553         total_list_count++;
 554       }
 555       assert(total_list_count ==  _indexedFreeList[i].count(),
 556         "Count in list is incorrect");
 557     )
 558     count += _indexedFreeList[i].count();
 559   }
 560   return count;
 561 }
 562 
 563 size_t CompactibleFreeListSpace::totalCount() {
 564   size_t num = totalCountInIndexedFreeLists();
 565   num +=  dictionary()->total_count();
 566   if (_smallLinearAllocBlock._word_size != 0) {
 567     num++;
 568   }
 569   return num;
 570 }
 571 #endif
 572 
 573 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 574   FreeChunk* fc = (FreeChunk*) p;
 575   return fc->is_free();
 576 }
 577 
 578 size_t CompactibleFreeListSpace::used() const {
 579   return capacity() - free();
 580 }
 581 
 582 size_t CompactibleFreeListSpace::used_stable() const {
 583   return _used_stable;
 584 }
 585 
 586 void CompactibleFreeListSpace::recalculate_used_stable() {
 587   _used_stable = used();
 588 }
 589 
 590 size_t CompactibleFreeListSpace::free() const {
 591   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 592   // if you do this while the structures are in flux you
 593   // may get an approximate answer only; for instance
 594   // because there is concurrent allocation either
 595   // directly by mutators or for promotion during a GC.
 596   // It's "MT-safe", however, in the sense that you are guaranteed
 597   // not to crash and burn, for instance, because of walking
 598   // pointers that could disappear as you were walking them.
 599   // The approximation is because the various components
 600   // that are read below are not read atomically (and
 601   // further the computation of totalSizeInIndexedFreeLists()
 602   // is itself a non-atomic computation. The normal use of
 603   // this is during a resize operation at the end of GC
 604   // and at that time you are guaranteed to get the
 605   // correct actual value. However, for instance, this is
 606   // also read completely asynchronously by the "perf-sampler"
 607   // that supports jvmstat, and you are apt to see the values
 608   // flicker in such cases.
 609   assert(_dictionary != NULL, "No _dictionary?");
 610   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 611           totalSizeInIndexedFreeLists() +
 612           _smallLinearAllocBlock._word_size) * HeapWordSize;
 613 }
 614 
 615 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 616   assert(_dictionary != NULL, "No _dictionary?");
 617   assert_locked();
 618   size_t res = _dictionary->max_chunk_size();
 619   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 620                        (size_t) SmallForLinearAlloc - 1));
 621   // XXX the following could potentially be pretty slow;
 622   // should one, pessimistically for the rare cases when res
 623   // calculated above is less than IndexSetSize,
 624   // just return res calculated above? My reasoning was that
 625   // those cases will be so rare that the extra time spent doesn't
 626   // really matter....
 627   // Note: do not change the loop test i >= res + IndexSetStride
 628   // to i > res below, because i is unsigned and res may be zero.
 629   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 630        i -= IndexSetStride) {
 631     if (_indexedFreeList[i].head() != NULL) {
 632       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 633       return i;
 634     }
 635   }
 636   return res;
 637 }
 638 
 639 void LinearAllocBlock::print_on(outputStream* st) const {
 640   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 641             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 642             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 643 }
 644 
 645 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 646   st->print_cr("COMPACTIBLE FREELIST SPACE");
 647   st->print_cr(" Space:");
 648   Space::print_on(st);
 649 
 650   st->print_cr("promoInfo:");
 651   _promoInfo.print_on(st);
 652 
 653   st->print_cr("_smallLinearAllocBlock");
 654   _smallLinearAllocBlock.print_on(st);
 655 
 656   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 657 
 658   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 659 }
 660 
 661 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 662 const {
 663   reportIndexedFreeListStatistics(st);
 664   st->print_cr("Layout of Indexed Freelists");
 665   st->print_cr("---------------------------");
 666   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 667   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 668     _indexedFreeList[i].print_on(st);
 669     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 670       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 671                    p2i(fc), p2i((HeapWord*)fc + i),
 672                    fc->cantCoalesce() ? "\t CC" : "");
 673     }
 674   }
 675 }
 676 
 677 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 678 const {
 679   _promoInfo.print_on(st);
 680 }
 681 
 682 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 683 const {
 684   _dictionary->report_statistics(st);
 685   st->print_cr("Layout of Freelists in Tree");
 686   st->print_cr("---------------------------");
 687   _dictionary->print_free_lists(st);
 688 }
 689 
 690 class BlkPrintingClosure: public BlkClosure {
 691   const CMSCollector*             _collector;
 692   const CompactibleFreeListSpace* _sp;
 693   const CMSBitMap*                _live_bit_map;
 694   const bool                      _post_remark;
 695   outputStream*                   _st;
 696 public:
 697   BlkPrintingClosure(const CMSCollector* collector,
 698                      const CompactibleFreeListSpace* sp,
 699                      const CMSBitMap* live_bit_map,
 700                      outputStream* st):
 701     _collector(collector),
 702     _sp(sp),
 703     _live_bit_map(live_bit_map),
 704     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 705     _st(st) { }
 706   size_t do_blk(HeapWord* addr);
 707 };
 708 
 709 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 710   size_t sz = _sp->block_size_no_stall(addr, _collector);
 711   assert(sz != 0, "Should always be able to compute a size");
 712   if (_sp->block_is_obj(addr)) {
 713     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 714     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 715       p2i(addr),
 716       dead ? "dead" : "live",
 717       sz,
 718       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 719     if (CMSPrintObjectsInDump && !dead) {
 720       oop(addr)->print_on(_st);
 721       _st->print_cr("--------------------------------------");
 722     }
 723   } else { // free block
 724     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 725       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 726     if (CMSPrintChunksInDump) {
 727       ((FreeChunk*)addr)->print_on(_st);
 728       _st->print_cr("--------------------------------------");
 729     }
 730   }
 731   return sz;
 732 }
 733 
 734 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 735   st->print_cr("=========================");
 736   st->print_cr("Block layout in CMS Heap:");
 737   st->print_cr("=========================");
 738   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 739   blk_iterate(&bpcl);
 740 
 741   st->print_cr("=======================================");
 742   st->print_cr("Order & Layout of Promotion Info Blocks");
 743   st->print_cr("=======================================");
 744   print_promo_info_blocks(st);
 745 
 746   st->print_cr("===========================");
 747   st->print_cr("Order of Indexed Free Lists");
 748   st->print_cr("=========================");
 749   print_indexed_free_lists(st);
 750 
 751   st->print_cr("=================================");
 752   st->print_cr("Order of Free Lists in Dictionary");
 753   st->print_cr("=================================");
 754   print_dictionary_free_lists(st);
 755 }
 756 
 757 
 758 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 759   assert_lock_strong(&_freelistLock);
 760   Log(gc, freelist, stats) log;
 761   if (!log.is_debug()) {
 762     return;
 763   }
 764   log.debug("%s", title);
 765 
 766   LogStream out(log.debug());
 767   _dictionary->report_statistics(&out);
 768 
 769   if (log.is_trace()) {
 770     LogStream trace_out(log.trace());
 771     reportIndexedFreeListStatistics(&trace_out);
 772     size_t total_size = totalSizeInIndexedFreeLists() +
 773                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 774     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 775   }
 776 }
 777 
 778 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 779   assert_lock_strong(&_freelistLock);
 780   st->print_cr("Statistics for IndexedFreeLists:");
 781   st->print_cr("--------------------------------");
 782   size_t total_size = totalSizeInIndexedFreeLists();
 783   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 784   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 785   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 786   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 787   if (free_blocks != 0) {
 788     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 789   }
 790 }
 791 
 792 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 793   size_t res = 0;
 794   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 795     debug_only(
 796       ssize_t recount = 0;
 797       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 798          fc = fc->next()) {
 799         recount += 1;
 800       }
 801       assert(recount == _indexedFreeList[i].count(),
 802         "Incorrect count in list");
 803     )
 804     res += _indexedFreeList[i].count();
 805   }
 806   return res;
 807 }
 808 
 809 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 810   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 811     if (_indexedFreeList[i].head() != NULL) {
 812       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 813       return (size_t)i;
 814     }
 815   }
 816   return 0;
 817 }
 818 
 819 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 820   HeapWord* prevEnd = end();
 821   assert(prevEnd != value, "unnecessary set_end call");
 822   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 823         "New end is below unallocated block");
 824   _end = value;
 825   if (prevEnd != NULL) {
 826     // Resize the underlying block offset table.
 827     _bt.resize(pointer_delta(value, bottom()));
 828     if (value <= prevEnd) {
 829       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 830              "New end is below unallocated block");
 831     } else {
 832       // Now, take this new chunk and add it to the free blocks.
 833       // Note that the BOT has not yet been updated for this block.
 834       size_t newFcSize = pointer_delta(value, prevEnd);
 835       // Add the block to the free lists, if possible coalescing it
 836       // with the last free block, and update the BOT and census data.
 837       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 838     }
 839   }
 840 }
 841 
 842 class FreeListSpaceDCTOC : public FilteringDCTOC {
 843   CompactibleFreeListSpace* _cfls;
 844   CMSCollector* _collector;
 845   bool _parallel;
 846 protected:
 847   // Override.
 848 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 849   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 850                                        HeapWord* bottom, HeapWord* top, \
 851                                        ClosureType* cl);                \
 852       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 853                                        HeapWord* bottom, HeapWord* top, \
 854                                        ClosureType* cl);                \
 855     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 856                                        HeapWord* bottom, HeapWord* top, \
 857                                        ClosureType* cl)
 858   walk_mem_region_with_cl_DECL(OopIterateClosure);
 859   walk_mem_region_with_cl_DECL(FilteringClosure);
 860 
 861 public:
 862   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 863                      CMSCollector* collector,
 864                      OopIterateClosure* cl,
 865                      CardTable::PrecisionStyle precision,
 866                      HeapWord* boundary,
 867                      bool parallel) :
 868     FilteringDCTOC(sp, cl, precision, boundary),
 869     _cfls(sp), _collector(collector), _parallel(parallel) {}
 870 };
 871 
 872 // We de-virtualize the block-related calls below, since we know that our
 873 // space is a CompactibleFreeListSpace.
 874 
 875 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 876 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 877                                                  HeapWord* bottom,              \
 878                                                  HeapWord* top,                 \
 879                                                  ClosureType* cl) {             \
 880    if (_parallel) {                                                             \
 881      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 882    } else {                                                                     \
 883      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 884    }                                                                            \
 885 }                                                                               \
 886 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 887                                                      HeapWord* bottom,          \
 888                                                      HeapWord* top,             \
 889                                                      ClosureType* cl) {         \
 890   /* Skip parts that are before "mr", in case "block_start" sent us             \
 891      back too far. */                                                           \
 892   HeapWord* mr_start = mr.start();                                              \
 893   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 894   HeapWord* next = bottom + bot_size;                                           \
 895   while (next < mr_start) {                                                     \
 896     bottom = next;                                                              \
 897     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 898     next = bottom + bot_size;                                                   \
 899   }                                                                             \
 900                                                                                 \
 901   while (bottom < top) {                                                        \
 902     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 903         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 904                     oop(bottom)) &&                                             \
 905         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 906       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 907       bottom += _cfls->adjustObjectSize(word_sz);                               \
 908     } else {                                                                    \
 909       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 910     }                                                                           \
 911   }                                                                             \
 912 }                                                                               \
 913 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 914                                                        HeapWord* bottom,        \
 915                                                        HeapWord* top,           \
 916                                                        ClosureType* cl) {       \
 917   /* Skip parts that are before "mr", in case "block_start" sent us             \
 918      back too far. */                                                           \
 919   HeapWord* mr_start = mr.start();                                              \
 920   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 921   HeapWord* next = bottom + bot_size;                                           \
 922   while (next < mr_start) {                                                     \
 923     bottom = next;                                                              \
 924     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 925     next = bottom + bot_size;                                                   \
 926   }                                                                             \
 927                                                                                 \
 928   while (bottom < top) {                                                        \
 929     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 930         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 931                     oop(bottom)) &&                                             \
 932         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 933       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 934       bottom += _cfls->adjustObjectSize(word_sz);                               \
 935     } else {                                                                    \
 936       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 937     }                                                                           \
 938   }                                                                             \
 939 }
 940 
 941 // (There are only two of these, rather than N, because the split is due
 942 // only to the introduction of the FilteringClosure, a local part of the
 943 // impl of this abstraction.)
 944 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
 945 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 946 
 947 DirtyCardToOopClosure*
 948 CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
 949                                       CardTable::PrecisionStyle precision,
 950                                       HeapWord* boundary,
 951                                       bool parallel) {
 952   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 953 }
 954 
 955 
 956 // Note on locking for the space iteration functions:
 957 // since the collector's iteration activities are concurrent with
 958 // allocation activities by mutators, absent a suitable mutual exclusion
 959 // mechanism the iterators may go awry. For instance a block being iterated
 960 // may suddenly be allocated or divided up and part of it allocated and
 961 // so on.
 962 
 963 // Apply the given closure to each block in the space.
 964 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 965   assert_lock_strong(freelistLock());
 966   HeapWord *cur, *limit;
 967   for (cur = bottom(), limit = end(); cur < limit;
 968        cur += cl->do_blk_careful(cur));
 969 }
 970 
 971 // Apply the given closure to each block in the space.
 972 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 973   assert_lock_strong(freelistLock());
 974   HeapWord *cur, *limit;
 975   for (cur = bottom(), limit = end(); cur < limit;
 976        cur += cl->do_blk(cur));
 977 }
 978 
 979 // Apply the given closure to each oop in the space.
 980 void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
 981   assert_lock_strong(freelistLock());
 982   HeapWord *cur, *limit;
 983   size_t curSize;
 984   for (cur = bottom(), limit = end(); cur < limit;
 985        cur += curSize) {
 986     curSize = block_size(cur);
 987     if (block_is_obj(cur)) {
 988       oop(cur)->oop_iterate(cl);
 989     }
 990   }
 991 }
 992 
 993 // NOTE: In the following methods, in order to safely be able to
 994 // apply the closure to an object, we need to be sure that the
 995 // object has been initialized. We are guaranteed that an object
 996 // is initialized if we are holding the Heap_lock with the
 997 // world stopped.
 998 void CompactibleFreeListSpace::verify_objects_initialized() const {
 999   if (is_init_completed()) {
1000     assert_locked_or_safepoint(Heap_lock);
1001     if (Universe::is_fully_initialized()) {
1002       guarantee(SafepointSynchronize::is_at_safepoint(),
1003                 "Required for objects to be initialized");
1004     }
1005   } // else make a concession at vm start-up
1006 }
1007 
1008 // Apply the given closure to each object in the space
1009 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
1010   assert_lock_strong(freelistLock());
1011   NOT_PRODUCT(verify_objects_initialized());
1012   HeapWord *cur, *limit;
1013   size_t curSize;
1014   for (cur = bottom(), limit = end(); cur < limit;
1015        cur += curSize) {
1016     curSize = block_size(cur);
1017     if (block_is_obj(cur)) {
1018       blk->do_object(oop(cur));
1019     }
1020   }
1021 }
1022 
1023 // Apply the given closure to each live object in the space
1024 //   The usage of CompactibleFreeListSpace
1025 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
1026 // objects in the space with references to objects that are no longer
1027 // valid.  For example, an object may reference another object
1028 // that has already been sweep up (collected).  This method uses
1029 // obj_is_alive() to determine whether it is safe to apply the closure to
1030 // an object.  See obj_is_alive() for details on how liveness of an
1031 // object is decided.
1032 
1033 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
1034   assert_lock_strong(freelistLock());
1035   NOT_PRODUCT(verify_objects_initialized());
1036   HeapWord *cur, *limit;
1037   size_t curSize;
1038   for (cur = bottom(), limit = end(); cur < limit;
1039        cur += curSize) {
1040     curSize = block_size(cur);
1041     if (block_is_obj(cur) && obj_is_alive(cur)) {
1042       blk->do_object(oop(cur));
1043     }
1044   }
1045 }
1046 
1047 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
1048                                                   UpwardsObjectClosure* cl) {
1049   assert_locked(freelistLock());
1050   NOT_PRODUCT(verify_objects_initialized());
1051   assert(!mr.is_empty(), "Should be non-empty");
1052   // We use MemRegion(bottom(), end()) rather than used_region() below
1053   // because the two are not necessarily equal for some kinds of
1054   // spaces, in particular, certain kinds of free list spaces.
1055   // We could use the more complicated but more precise:
1056   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
1057   // but the slight imprecision seems acceptable in the assertion check.
1058   assert(MemRegion(bottom(), end()).contains(mr),
1059          "Should be within used space");
1060   HeapWord* prev = cl->previous();   // max address from last time
1061   if (prev >= mr.end()) { // nothing to do
1062     return;
1063   }
1064   // This assert will not work when we go from cms space to perm
1065   // space, and use same closure. Easy fix deferred for later. XXX YSR
1066   // assert(prev == NULL || contains(prev), "Should be within space");
1067 
1068   bool last_was_obj_array = false;
1069   HeapWord *blk_start_addr, *region_start_addr;
1070   if (prev > mr.start()) {
1071     region_start_addr = prev;
1072     blk_start_addr    = prev;
1073     // The previous invocation may have pushed "prev" beyond the
1074     // last allocated block yet there may be still be blocks
1075     // in this region due to a particular coalescing policy.
1076     // Relax the assertion so that the case where the unallocated
1077     // block is maintained and "prev" is beyond the unallocated
1078     // block does not cause the assertion to fire.
1079     assert((BlockOffsetArrayUseUnallocatedBlock &&
1080             (!is_in(prev))) ||
1081            (blk_start_addr == block_start(region_start_addr)), "invariant");
1082   } else {
1083     region_start_addr = mr.start();
1084     blk_start_addr    = block_start(region_start_addr);
1085   }
1086   HeapWord* region_end_addr = mr.end();
1087   MemRegion derived_mr(region_start_addr, region_end_addr);
1088   while (blk_start_addr < region_end_addr) {
1089     const size_t size = block_size(blk_start_addr);
1090     if (block_is_obj(blk_start_addr)) {
1091       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
1092     } else {
1093       last_was_obj_array = false;
1094     }
1095     blk_start_addr += size;
1096   }
1097   if (!last_was_obj_array) {
1098     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
1099            "Should be within (closed) used space");
1100     assert(blk_start_addr > prev, "Invariant");
1101     cl->set_previous(blk_start_addr); // min address for next time
1102   }
1103 }
1104 
1105 // Callers of this iterator beware: The closure application should
1106 // be robust in the face of uninitialized objects and should (always)
1107 // return a correct size so that the next addr + size below gives us a
1108 // valid block boundary. [See for instance,
1109 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
1110 // in ConcurrentMarkSweepGeneration.cpp.]
1111 HeapWord*
1112 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
1113   ObjectClosureCareful* cl) {
1114   assert_lock_strong(freelistLock());
1115   // Can't use used_region() below because it may not necessarily
1116   // be the same as [bottom(),end()); although we could
1117   // use [used_region().start(),align_up(used_region().end(),CardSize)),
1118   // that appears too cumbersome, so we just do the simpler check
1119   // in the assertion below.
1120   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
1121          "mr should be non-empty and within used space");
1122   HeapWord *addr, *end;
1123   size_t size;
1124   for (addr = block_start_careful(mr.start()), end  = mr.end();
1125        addr < end; addr += size) {
1126     FreeChunk* fc = (FreeChunk*)addr;
1127     if (fc->is_free()) {
1128       // Since we hold the free list lock, which protects direct
1129       // allocation in this generation by mutators, a free object
1130       // will remain free throughout this iteration code.
1131       size = fc->size();
1132     } else {
1133       // Note that the object need not necessarily be initialized,
1134       // because (for instance) the free list lock does NOT protect
1135       // object initialization. The closure application below must
1136       // therefore be correct in the face of uninitialized objects.
1137       size = cl->do_object_careful_m(oop(addr), mr);
1138       if (size == 0) {
1139         // An unparsable object found. Signal early termination.
1140         return addr;
1141       }
1142     }
1143   }
1144   return NULL;
1145 }
1146 
1147 
1148 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
1149   NOT_PRODUCT(verify_objects_initialized());
1150   return _bt.block_start(p);
1151 }
1152 
1153 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
1154   return _bt.block_start_careful(p);
1155 }
1156 
1157 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
1158   NOT_PRODUCT(verify_objects_initialized());
1159   // This must be volatile, or else there is a danger that the compiler
1160   // will compile the code below into a sometimes-infinite loop, by keeping
1161   // the value read the first time in a register.
1162   while (true) {
1163     // We must do this until we get a consistent view of the object.
1164     if (FreeChunk::indicatesFreeChunk(p)) {
1165       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1166       size_t res = fc->size();
1167 
1168       // Bugfix for systems with weak memory model (PPC64/IA64). The
1169       // block's free bit was set and we have read the size of the
1170       // block. Acquire and check the free bit again. If the block is
1171       // still free, the read size is correct.
1172       OrderAccess::acquire();
1173 
1174       // If the object is still a free chunk, return the size, else it
1175       // has been allocated so try again.
1176       if (FreeChunk::indicatesFreeChunk(p)) {
1177         assert(res != 0, "Block size should not be 0");
1178         return res;
1179       }
1180     } else {
1181       // Ensure klass read before size.
1182       Klass* k = oop(p)->klass_or_null_acquire();
1183       if (k != NULL) {
1184         assert(k->is_klass(), "Should really be klass oop.");
1185         oop o = (oop)p;
1186         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1187 
1188         size_t res = o->size_given_klass(k);
1189         res = adjustObjectSize(res);
1190         assert(res != 0, "Block size should not be 0");
1191         return res;
1192       }
1193     }
1194   }
1195 }
1196 
1197 // TODO: Now that is_parsable is gone, we should combine these two functions.
1198 // A variant of the above that uses the Printezis bits for
1199 // unparsable but allocated objects. This avoids any possible
1200 // stalls waiting for mutators to initialize objects, and is
1201 // thus potentially faster than the variant above. However,
1202 // this variant may return a zero size for a block that is
1203 // under mutation and for which a consistent size cannot be
1204 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1205 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1206                                                      const CMSCollector* c)
1207 const {
1208   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1209   // This must be volatile, or else there is a danger that the compiler
1210   // will compile the code below into a sometimes-infinite loop, by keeping
1211   // the value read the first time in a register.
1212   DEBUG_ONLY(uint loops = 0;)
1213   while (true) {
1214     // We must do this until we get a consistent view of the object.
1215     if (FreeChunk::indicatesFreeChunk(p)) {
1216       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1217       size_t res = fc->size();
1218 
1219       // Bugfix for systems with weak memory model (PPC64/IA64). The
1220       // free bit of the block was set and we have read the size of
1221       // the block. Acquire and check the free bit again. If the
1222       // block is still free, the read size is correct.
1223       OrderAccess::acquire();
1224 
1225       if (FreeChunk::indicatesFreeChunk(p)) {
1226         assert(res != 0, "Block size should not be 0");
1227         assert(loops == 0, "Should be 0");
1228         return res;
1229       }
1230     } else {
1231       // Ensure klass read before size.
1232       Klass* k = oop(p)->klass_or_null_acquire();
1233       if (k != NULL) {
1234         assert(k->is_klass(), "Should really be klass oop.");
1235         oop o = (oop)p;
1236         assert(oopDesc::is_oop(o), "Should be an oop");
1237 
1238         size_t res = o->size_given_klass(k);
1239         res = adjustObjectSize(res);
1240         assert(res != 0, "Block size should not be 0");
1241         return res;
1242       } else {
1243         // May return 0 if P-bits not present.
1244         return c->block_size_if_printezis_bits(p);
1245       }
1246     }
1247     assert(loops == 0, "Can loop at most once");
1248     DEBUG_ONLY(loops++;)
1249   }
1250 }
1251 
1252 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1253   NOT_PRODUCT(verify_objects_initialized());
1254   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1255   FreeChunk* fc = (FreeChunk*)p;
1256   if (fc->is_free()) {
1257     return fc->size();
1258   } else {
1259     // Ignore mark word because this may be a recently promoted
1260     // object whose mark word is used to chain together grey
1261     // objects (the last one would have a null value).
1262     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1263     return adjustObjectSize(oop(p)->size());
1264   }
1265 }
1266 
1267 // This implementation assumes that the property of "being an object" is
1268 // stable.  But being a free chunk may not be (because of parallel
1269 // promotion.)
1270 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1271   FreeChunk* fc = (FreeChunk*)p;
1272   assert(is_in_reserved(p), "Should be in space");
1273   if (FreeChunk::indicatesFreeChunk(p)) return false;
1274   Klass* k = oop(p)->klass_or_null_acquire();
1275   if (k != NULL) {
1276     // Ignore mark word because it may have been used to
1277     // chain together promoted objects (the last one
1278     // would have a null value).
1279     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1280     return true;
1281   } else {
1282     return false;  // Was not an object at the start of collection.
1283   }
1284 }
1285 
1286 // Check if the object is alive. This fact is checked either by consulting
1287 // the main marking bitmap in the sweeping phase or, if it's a permanent
1288 // generation and we're not in the sweeping phase, by checking the
1289 // perm_gen_verify_bit_map where we store the "deadness" information if
1290 // we did not sweep the perm gen in the most recent previous GC cycle.
1291 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1292   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1293          "Else races are possible");
1294   assert(block_is_obj(p), "The address should point to an object");
1295 
1296   // If we're sweeping, we use object liveness information from the main bit map
1297   // for both perm gen and old gen.
1298   // We don't need to lock the bitmap (live_map or dead_map below), because
1299   // EITHER we are in the middle of the sweeping phase, and the
1300   // main marking bit map (live_map below) is locked,
1301   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1302   // is stable, because it's mutated only in the sweeping phase.
1303   // NOTE: This method is also used by jmap where, if class unloading is
1304   // off, the results can return "false" for legitimate perm objects,
1305   // when we are not in the midst of a sweeping phase, which can result
1306   // in jmap not reporting certain perm gen objects. This will be moot
1307   // if/when the perm gen goes away in the future.
1308   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1309     CMSBitMap* live_map = _collector->markBitMap();
1310     return live_map->par_isMarked((HeapWord*) p);
1311   }
1312   return true;
1313 }
1314 
1315 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1316   FreeChunk* fc = (FreeChunk*)p;
1317   assert(is_in_reserved(p), "Should be in space");
1318   assert(_bt.block_start(p) == p, "Should be a block boundary");
1319   if (!fc->is_free()) {
1320     // Ignore mark word because it may have been used to
1321     // chain together promoted objects (the last one
1322     // would have a null value).
1323     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1324     return true;
1325   }
1326   return false;
1327 }
1328 
1329 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1330 // approximate answer if you don't hold the freelistlock when you call this.
1331 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1332   size_t size = 0;
1333   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1334     debug_only(
1335       // We may be calling here without the lock in which case we
1336       // won't do this modest sanity check.
1337       if (freelistLock()->owned_by_self()) {
1338         size_t total_list_size = 0;
1339         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1340           fc = fc->next()) {
1341           total_list_size += i;
1342         }
1343         assert(total_list_size == i * _indexedFreeList[i].count(),
1344                "Count in list is incorrect");
1345       }
1346     )
1347     size += i * _indexedFreeList[i].count();
1348   }
1349   return size;
1350 }
1351 
1352 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1353   MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
1354   return allocate(size);
1355 }
1356 
1357 HeapWord*
1358 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1359   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1360 }
1361 
1362 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1363   assert_lock_strong(freelistLock());
1364   HeapWord* res = NULL;
1365   assert(size == adjustObjectSize(size),
1366          "use adjustObjectSize() before calling into allocate()");
1367 
1368   res = allocate_adaptive_freelists(size);
1369 
1370   if (res != NULL) {
1371     // check that res does lie in this space!
1372     assert(is_in_reserved(res), "Not in this space!");
1373     assert(is_aligned((void*)res), "alignment check");
1374 
1375     FreeChunk* fc = (FreeChunk*)res;
1376     fc->markNotFree();
1377     assert(!fc->is_free(), "shouldn't be marked free");
1378     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1379     // Verify that the block offset table shows this to
1380     // be a single block, but not one which is unallocated.
1381     _bt.verify_single_block(res, size);
1382     _bt.verify_not_unallocated(res, size);
1383     // mangle a just allocated object with a distinct pattern.
1384     debug_only(fc->mangleAllocated(size));
1385   }
1386 
1387   // During GC we do not need to recalculate the stable used value for
1388   // every allocation in old gen. It is done once at the end of GC instead
1389   // for performance reasons.
1390   if (!CMSHeap::heap()->is_gc_active()) {
1391     recalculate_used_stable();
1392   }
1393 
1394   return res;
1395 }
1396 
1397 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1398   assert_lock_strong(freelistLock());
1399   HeapWord* res = NULL;
1400   assert(size == adjustObjectSize(size),
1401          "use adjustObjectSize() before calling into allocate()");
1402 
1403   // Strategy
1404   //   if small
1405   //     exact size from small object indexed list if small
1406   //     small or large linear allocation block (linAB) as appropriate
1407   //     take from lists of greater sized chunks
1408   //   else
1409   //     dictionary
1410   //     small or large linear allocation block if it has the space
1411   // Try allocating exact size from indexTable first
1412   if (size < IndexSetSize) {
1413     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1414     if(res != NULL) {
1415       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1416         "Not removed from free list");
1417       // no block offset table adjustment is necessary on blocks in
1418       // the indexed lists.
1419 
1420     // Try allocating from the small LinAB
1421     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1422         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1423         // if successful, the above also adjusts block offset table
1424         // Note that this call will refill the LinAB to
1425         // satisfy the request.  This is different that
1426         // evm.
1427         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1428     } else {
1429       // Raid the exact free lists larger than size, even if they are not
1430       // overpopulated.
1431       res = (HeapWord*) getChunkFromGreater(size);
1432     }
1433   } else {
1434     // Big objects get allocated directly from the dictionary.
1435     res = (HeapWord*) getChunkFromDictionaryExact(size);
1436     if (res == NULL) {
1437       // Try hard not to fail since an allocation failure will likely
1438       // trigger a synchronous GC.  Try to get the space from the
1439       // allocation blocks.
1440       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1441     }
1442   }
1443 
1444   return res;
1445 }
1446 
1447 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1448 // when promoting obj.
1449 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1450   // Depending on the object size, expansion may require refilling either a
1451   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1452   // is added because the dictionary may over-allocate to avoid fragmentation.
1453   size_t space = obj_size;
1454   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1455   return space;
1456 }
1457 
1458 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1459   FreeChunk* ret;
1460 
1461   assert(numWords >= MinChunkSize, "Size is less than minimum");
1462   assert(linearAllocationWouldFail() || bestFitFirst(),
1463     "Should not be here");
1464 
1465   size_t i;
1466   size_t currSize = numWords + MinChunkSize;
1467   assert(is_object_aligned(currSize), "currSize should be aligned");
1468   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1469     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1470     if (fl->head()) {
1471       ret = getFromListGreater(fl, numWords);
1472       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1473       return ret;
1474     }
1475   }
1476 
1477   currSize = MAX2((size_t)SmallForDictionary,
1478                   (size_t)(numWords + MinChunkSize));
1479 
1480   /* Try to get a chunk that satisfies request, while avoiding
1481      fragmentation that can't be handled. */
1482   {
1483     ret =  dictionary()->get_chunk(currSize);
1484     if (ret != NULL) {
1485       assert(ret->size() - numWords >= MinChunkSize,
1486              "Chunk is too small");
1487       _bt.allocated((HeapWord*)ret, ret->size());
1488       /* Carve returned chunk. */
1489       (void) splitChunkAndReturnRemainder(ret, numWords);
1490       /* Label this as no longer a free chunk. */
1491       assert(ret->is_free(), "This chunk should be free");
1492       ret->link_prev(NULL);
1493     }
1494     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1495     return ret;
1496   }
1497   ShouldNotReachHere();
1498 }
1499 
1500 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1501   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1502   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1503 }
1504 
1505 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1506   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1507          (_smallLinearAllocBlock._word_size == fc->size()),
1508          "Linear allocation block shows incorrect size");
1509   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1510           (_smallLinearAllocBlock._word_size == fc->size()));
1511 }
1512 
1513 // Check if the purported free chunk is present either as a linear
1514 // allocation block, the size-indexed table of (smaller) free blocks,
1515 // or the larger free blocks kept in the binary tree dictionary.
1516 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1517   if (verify_chunk_is_linear_alloc_block(fc)) {
1518     return true;
1519   } else if (fc->size() < IndexSetSize) {
1520     return verifyChunkInIndexedFreeLists(fc);
1521   } else {
1522     return dictionary()->verify_chunk_in_free_list(fc);
1523   }
1524 }
1525 
1526 #ifndef PRODUCT
1527 void CompactibleFreeListSpace::assert_locked() const {
1528   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1529 }
1530 
1531 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1532   CMSLockVerifier::assert_locked(lock);
1533 }
1534 #endif
1535 
1536 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1537   // In the parallel case, the main thread holds the free list lock
1538   // on behalf the parallel threads.
1539   FreeChunk* fc;
1540   {
1541     // If GC is parallel, this might be called by several threads.
1542     // This should be rare enough that the locking overhead won't affect
1543     // the sequential code.
1544     MutexLocker x(parDictionaryAllocLock(),
1545                   Mutex::_no_safepoint_check_flag);
1546     fc = getChunkFromDictionary(size);
1547   }
1548   if (fc != NULL) {
1549     fc->dontCoalesce();
1550     assert(fc->is_free(), "Should be free, but not coalescable");
1551     // Verify that the block offset table shows this to
1552     // be a single block, but not one which is unallocated.
1553     _bt.verify_single_block((HeapWord*)fc, fc->size());
1554     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1555   }
1556   return fc;
1557 }
1558 
1559 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1560   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1561   assert_locked();
1562 
1563   // if we are tracking promotions, then first ensure space for
1564   // promotion (including spooling space for saving header if necessary).
1565   // then allocate and copy, then track promoted info if needed.
1566   // When tracking (see PromotionInfo::track()), the mark word may
1567   // be displaced and in this case restoration of the mark word
1568   // occurs in the (oop_since_save_marks_)iterate phase.
1569   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1570     return NULL;
1571   }
1572   // Call the allocate(size_t, bool) form directly to avoid the
1573   // additional call through the allocate(size_t) form.  Having
1574   // the compile inline the call is problematic because allocate(size_t)
1575   // is a virtual method.
1576   HeapWord* res = allocate(adjustObjectSize(obj_size));
1577   if (res != NULL) {
1578     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1579     // if we should be tracking promotions, do so.
1580     if (_promoInfo.tracking()) {
1581         _promoInfo.track((PromotedObject*)res);
1582     }
1583   }
1584   return oop(res);
1585 }
1586 
1587 HeapWord*
1588 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1589   assert_locked();
1590   assert(size >= MinChunkSize, "minimum chunk size");
1591   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1592     "maximum from smallLinearAllocBlock");
1593   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1594 }
1595 
1596 HeapWord*
1597 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1598                                                        size_t size) {
1599   assert_locked();
1600   assert(size >= MinChunkSize, "too small");
1601   HeapWord* res = NULL;
1602   // Try to do linear allocation from blk, making sure that
1603   if (blk->_word_size == 0) {
1604     // We have probably been unable to fill this either in the prologue or
1605     // when it was exhausted at the last linear allocation. Bail out until
1606     // next time.
1607     assert(blk->_ptr == NULL, "consistency check");
1608     return NULL;
1609   }
1610   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1611   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1612   if (res != NULL) return res;
1613 
1614   // about to exhaust this linear allocation block
1615   if (blk->_word_size == size) { // exactly satisfied
1616     res = blk->_ptr;
1617     _bt.allocated(res, blk->_word_size);
1618   } else if (size + MinChunkSize <= blk->_refillSize) {
1619     size_t sz = blk->_word_size;
1620     // Update _unallocated_block if the size is such that chunk would be
1621     // returned to the indexed free list.  All other chunks in the indexed
1622     // free lists are allocated from the dictionary so that _unallocated_block
1623     // has already been adjusted for them.  Do it here so that the cost
1624     // for all chunks added back to the indexed free lists.
1625     if (sz < SmallForDictionary) {
1626       _bt.allocated(blk->_ptr, sz);
1627     }
1628     // Return the chunk that isn't big enough, and then refill below.
1629     addChunkToFreeLists(blk->_ptr, sz);
1630     split_birth(sz);
1631     // Don't keep statistics on adding back chunk from a LinAB.
1632   } else {
1633     // A refilled block would not satisfy the request.
1634     return NULL;
1635   }
1636 
1637   blk->_ptr = NULL; blk->_word_size = 0;
1638   refillLinearAllocBlock(blk);
1639   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1640          "block was replenished");
1641   if (res != NULL) {
1642     split_birth(size);
1643     repairLinearAllocBlock(blk);
1644   } else if (blk->_ptr != NULL) {
1645     res = blk->_ptr;
1646     size_t blk_size = blk->_word_size;
1647     blk->_word_size -= size;
1648     blk->_ptr  += size;
1649     split_birth(size);
1650     repairLinearAllocBlock(blk);
1651     // Update BOT last so that other (parallel) GC threads see a consistent
1652     // view of the BOT and free blocks.
1653     // Above must occur before BOT is updated below.
1654     OrderAccess::storestore();
1655     _bt.split_block(res, blk_size, size);  // adjust block offset table
1656   }
1657   return res;
1658 }
1659 
1660 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1661                                         LinearAllocBlock* blk,
1662                                         size_t size) {
1663   assert_locked();
1664   assert(size >= MinChunkSize, "too small");
1665 
1666   HeapWord* res = NULL;
1667   // This is the common case.  Keep it simple.
1668   if (blk->_word_size >= size + MinChunkSize) {
1669     assert(blk->_ptr != NULL, "consistency check");
1670     res = blk->_ptr;
1671     // Note that the BOT is up-to-date for the linAB before allocation.  It
1672     // indicates the start of the linAB.  The split_block() updates the
1673     // BOT for the linAB after the allocation (indicates the start of the
1674     // next chunk to be allocated).
1675     size_t blk_size = blk->_word_size;
1676     blk->_word_size -= size;
1677     blk->_ptr  += size;
1678     split_birth(size);
1679     repairLinearAllocBlock(blk);
1680     // Update BOT last so that other (parallel) GC threads see a consistent
1681     // view of the BOT and free blocks.
1682     // Above must occur before BOT is updated below.
1683     OrderAccess::storestore();
1684     _bt.split_block(res, blk_size, size);  // adjust block offset table
1685     _bt.allocated(res, size);
1686   }
1687   return res;
1688 }
1689 
1690 FreeChunk*
1691 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1692   assert_locked();
1693   assert(size < SmallForDictionary, "just checking");
1694   FreeChunk* res;
1695   res = _indexedFreeList[size].get_chunk_at_head();
1696   if (res == NULL) {
1697     res = getChunkFromIndexedFreeListHelper(size);
1698   }
1699   _bt.verify_not_unallocated((HeapWord*) res, size);
1700   assert(res == NULL || res->size() == size, "Incorrect block size");
1701   return res;
1702 }
1703 
1704 FreeChunk*
1705 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1706   bool replenish) {
1707   assert_locked();
1708   FreeChunk* fc = NULL;
1709   if (size < SmallForDictionary) {
1710     assert(_indexedFreeList[size].head() == NULL ||
1711       _indexedFreeList[size].surplus() <= 0,
1712       "List for this size should be empty or under populated");
1713     // Try best fit in exact lists before replenishing the list
1714     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1715       // Replenish list.
1716       //
1717       // Things tried that failed.
1718       //   Tried allocating out of the two LinAB's first before
1719       // replenishing lists.
1720       //   Tried small linAB of size 256 (size in indexed list)
1721       // and replenishing indexed lists from the small linAB.
1722       //
1723       FreeChunk* newFc = NULL;
1724       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1725       if (replenish_size < SmallForDictionary) {
1726         // Do not replenish from an underpopulated size.
1727         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1728             _indexedFreeList[replenish_size].head() != NULL) {
1729           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1730         } else if (bestFitFirst()) {
1731           newFc = bestFitSmall(replenish_size);
1732         }
1733       }
1734       if (newFc == NULL && replenish_size > size) {
1735         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1736         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1737       }
1738       // Note: The stats update re split-death of block obtained above
1739       // will be recorded below precisely when we know we are going to
1740       // be actually splitting it into more than one pieces below.
1741       if (newFc != NULL) {
1742         if  (replenish || CMSReplenishIntermediate) {
1743           // Replenish this list and return one block to caller.
1744           size_t i;
1745           FreeChunk *curFc, *nextFc;
1746           size_t num_blk = newFc->size() / size;
1747           assert(num_blk >= 1, "Smaller than requested?");
1748           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1749           if (num_blk > 1) {
1750             // we are sure we will be splitting the block just obtained
1751             // into multiple pieces; record the split-death of the original
1752             splitDeath(replenish_size);
1753           }
1754           // carve up and link blocks 0, ..., num_blk - 2
1755           // The last chunk is not added to the lists but is returned as the
1756           // free chunk.
1757           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1758                i = 0;
1759                i < (num_blk - 1);
1760                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1761                i++) {
1762             curFc->set_size(size);
1763             // Don't record this as a return in order to try and
1764             // determine the "returns" from a GC.
1765             _bt.verify_not_unallocated((HeapWord*) fc, size);
1766             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1767             _bt.mark_block((HeapWord*)curFc, size);
1768             split_birth(size);
1769             // Don't record the initial population of the indexed list
1770             // as a split birth.
1771           }
1772 
1773           // check that the arithmetic was OK above
1774           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1775             "inconsistency in carving newFc");
1776           curFc->set_size(size);
1777           _bt.mark_block((HeapWord*)curFc, size);
1778           split_birth(size);
1779           fc = curFc;
1780         } else {
1781           // Return entire block to caller
1782           fc = newFc;
1783         }
1784       }
1785     }
1786   } else {
1787     // Get a free chunk from the free chunk dictionary to be returned to
1788     // replenish the indexed free list.
1789     fc = getChunkFromDictionaryExact(size);
1790   }
1791   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1792   return fc;
1793 }
1794 
1795 FreeChunk*
1796 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1797   assert_locked();
1798   FreeChunk* fc = _dictionary->get_chunk(size);
1799   if (fc == NULL) {
1800     return NULL;
1801   }
1802   _bt.allocated((HeapWord*)fc, fc->size());
1803   if (fc->size() >= size + MinChunkSize) {
1804     fc = splitChunkAndReturnRemainder(fc, size);
1805   }
1806   assert(fc->size() >= size, "chunk too small");
1807   assert(fc->size() < size + MinChunkSize, "chunk too big");
1808   _bt.verify_single_block((HeapWord*)fc, fc->size());
1809   return fc;
1810 }
1811 
1812 FreeChunk*
1813 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1814   assert_locked();
1815   FreeChunk* fc = _dictionary->get_chunk(size);
1816   if (fc == NULL) {
1817     return fc;
1818   }
1819   _bt.allocated((HeapWord*)fc, fc->size());
1820   if (fc->size() == size) {
1821     _bt.verify_single_block((HeapWord*)fc, size);
1822     return fc;
1823   }
1824   assert(fc->size() > size, "get_chunk() guarantee");
1825   if (fc->size() < size + MinChunkSize) {
1826     // Return the chunk to the dictionary and go get a bigger one.
1827     returnChunkToDictionary(fc);
1828     fc = _dictionary->get_chunk(size + MinChunkSize);
1829     if (fc == NULL) {
1830       return NULL;
1831     }
1832     _bt.allocated((HeapWord*)fc, fc->size());
1833   }
1834   assert(fc->size() >= size + MinChunkSize, "tautology");
1835   fc = splitChunkAndReturnRemainder(fc, size);
1836   assert(fc->size() == size, "chunk is wrong size");
1837   _bt.verify_single_block((HeapWord*)fc, size);
1838   return fc;
1839 }
1840 
1841 void
1842 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1843   assert_locked();
1844 
1845   size_t size = chunk->size();
1846   _bt.verify_single_block((HeapWord*)chunk, size);
1847   // adjust _unallocated_block downward, as necessary
1848   _bt.freed((HeapWord*)chunk, size);
1849   _dictionary->return_chunk(chunk);
1850 #ifndef PRODUCT
1851   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1852     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1853     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1854     tl->verify_stats();
1855   }
1856 #endif // PRODUCT
1857 }
1858 
1859 void
1860 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1861   assert_locked();
1862   size_t size = fc->size();
1863   _bt.verify_single_block((HeapWord*) fc, size);
1864   _bt.verify_not_unallocated((HeapWord*) fc, size);
1865   _indexedFreeList[size].return_chunk_at_tail(fc);
1866 #ifndef PRODUCT
1867   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1868      _indexedFreeList[size].verify_stats();
1869   }
1870 #endif // PRODUCT
1871 }
1872 
1873 // Add chunk to end of last block -- if it's the largest
1874 // block -- and update BOT and census data. We would
1875 // of course have preferred to coalesce it with the
1876 // last block, but it's currently less expensive to find the
1877 // largest block than it is to find the last.
1878 void
1879 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1880   HeapWord* chunk, size_t     size) {
1881   // check that the chunk does lie in this space!
1882   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1883   // One of the parallel gc task threads may be here
1884   // whilst others are allocating.
1885   Mutex* lock = &_parDictionaryAllocLock;
1886   FreeChunk* ec;
1887   {
1888     MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1889     ec = dictionary()->find_largest_dict();  // get largest block
1890     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1891       // It's a coterminal block - we can coalesce.
1892       size_t old_size = ec->size();
1893       coalDeath(old_size);
1894       removeChunkFromDictionary(ec);
1895       size += old_size;
1896     } else {
1897       ec = (FreeChunk*)chunk;
1898     }
1899   }
1900   ec->set_size(size);
1901   debug_only(ec->mangleFreed(size));
1902   if (size < SmallForDictionary) {
1903     lock = _indexedFreeListParLocks[size];
1904   }
1905   MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1906   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1907   // record the birth under the lock since the recording involves
1908   // manipulation of the list on which the chunk lives and
1909   // if the chunk is allocated and is the last on the list,
1910   // the list can go away.
1911   coalBirth(size);
1912 }
1913 
1914 void
1915 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1916                                               size_t     size) {
1917   // check that the chunk does lie in this space!
1918   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1919   assert_locked();
1920   _bt.verify_single_block(chunk, size);
1921 
1922   FreeChunk* fc = (FreeChunk*) chunk;
1923   fc->set_size(size);
1924   debug_only(fc->mangleFreed(size));
1925   if (size < SmallForDictionary) {
1926     returnChunkToFreeList(fc);
1927   } else {
1928     returnChunkToDictionary(fc);
1929   }
1930 }
1931 
1932 void
1933 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1934   size_t size, bool coalesced) {
1935   assert_locked();
1936   assert(chunk != NULL, "null chunk");
1937   if (coalesced) {
1938     // repair BOT
1939     _bt.single_block(chunk, size);
1940   }
1941   addChunkToFreeLists(chunk, size);
1942 }
1943 
1944 // We _must_ find the purported chunk on our free lists;
1945 // we assert if we don't.
1946 void
1947 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1948   size_t size = fc->size();
1949   assert_locked();
1950   debug_only(verifyFreeLists());
1951   if (size < SmallForDictionary) {
1952     removeChunkFromIndexedFreeList(fc);
1953   } else {
1954     removeChunkFromDictionary(fc);
1955   }
1956   _bt.verify_single_block((HeapWord*)fc, size);
1957   debug_only(verifyFreeLists());
1958 }
1959 
1960 void
1961 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1962   size_t size = fc->size();
1963   assert_locked();
1964   assert(fc != NULL, "null chunk");
1965   _bt.verify_single_block((HeapWord*)fc, size);
1966   _dictionary->remove_chunk(fc);
1967   // adjust _unallocated_block upward, as necessary
1968   _bt.allocated((HeapWord*)fc, size);
1969 }
1970 
1971 void
1972 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1973   assert_locked();
1974   size_t size = fc->size();
1975   _bt.verify_single_block((HeapWord*)fc, size);
1976   NOT_PRODUCT(
1977     if (FLSVerifyIndexTable) {
1978       verifyIndexedFreeList(size);
1979     }
1980   )
1981   _indexedFreeList[size].remove_chunk(fc);
1982   NOT_PRODUCT(
1983     if (FLSVerifyIndexTable) {
1984       verifyIndexedFreeList(size);
1985     }
1986   )
1987 }
1988 
1989 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1990   /* A hint is the next larger size that has a surplus.
1991      Start search at a size large enough to guarantee that
1992      the excess is >= MIN_CHUNK. */
1993   size_t start = align_object_size(numWords + MinChunkSize);
1994   if (start < IndexSetSize) {
1995     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1996     size_t    hint = _indexedFreeList[start].hint();
1997     while (hint < IndexSetSize) {
1998       assert(is_object_aligned(hint), "hint should be aligned");
1999       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
2000       if (fl->surplus() > 0 && fl->head() != NULL) {
2001         // Found a list with surplus, reset original hint
2002         // and split out a free chunk which is returned.
2003         _indexedFreeList[start].set_hint(hint);
2004         FreeChunk* res = getFromListGreater(fl, numWords);
2005         assert(res == NULL || res->is_free(),
2006           "Should be returning a free chunk");
2007         return res;
2008       }
2009       hint = fl->hint(); /* keep looking */
2010     }
2011     /* None found. */
2012     it[start].set_hint(IndexSetSize);
2013   }
2014   return NULL;
2015 }
2016 
2017 /* Requires fl->size >= numWords + MinChunkSize */
2018 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
2019   size_t numWords) {
2020   FreeChunk *curr = fl->head();
2021   size_t oldNumWords = curr->size();
2022   assert(numWords >= MinChunkSize, "Word size is too small");
2023   assert(curr != NULL, "List is empty");
2024   assert(oldNumWords >= numWords + MinChunkSize,
2025         "Size of chunks in the list is too small");
2026 
2027   fl->remove_chunk(curr);
2028   // recorded indirectly by splitChunkAndReturnRemainder -
2029   // smallSplit(oldNumWords, numWords);
2030   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
2031   // Does anything have to be done for the remainder in terms of
2032   // fixing the card table?
2033   assert(new_chunk == NULL || new_chunk->is_free(),
2034     "Should be returning a free chunk");
2035   return new_chunk;
2036 }
2037 
2038 FreeChunk*
2039 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
2040   size_t new_size) {
2041   assert_locked();
2042   size_t size = chunk->size();
2043   assert(size > new_size, "Split from a smaller block?");
2044   assert(is_aligned(chunk), "alignment problem");
2045   assert(size == adjustObjectSize(size), "alignment problem");
2046   size_t rem_sz = size - new_size;
2047   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
2048   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
2049   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
2050   assert(is_aligned(ffc), "alignment problem");
2051   ffc->set_size(rem_sz);
2052   ffc->link_next(NULL);
2053   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2054   // Above must occur before BOT is updated below.
2055   // adjust block offset table
2056   OrderAccess::storestore();
2057   assert(chunk->is_free() && ffc->is_free(), "Error");
2058   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
2059   if (rem_sz < SmallForDictionary) {
2060     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
2061     bool is_par = Thread::current()->is_GC_task_thread();
2062     if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
2063     returnChunkToFreeList(ffc);
2064     split(size, rem_sz);
2065     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
2066   } else {
2067     returnChunkToDictionary(ffc);
2068     split(size, rem_sz);
2069   }
2070   chunk->set_size(new_size);
2071   return chunk;
2072 }
2073 
2074 void
2075 CompactibleFreeListSpace::sweep_completed() {
2076   // Now that space is probably plentiful, refill linear
2077   // allocation blocks as needed.
2078   refillLinearAllocBlocksIfNeeded();
2079 }
2080 
2081 void
2082 CompactibleFreeListSpace::gc_prologue() {
2083   assert_locked();
2084   reportFreeListStatistics("Before GC:");
2085   refillLinearAllocBlocksIfNeeded();
2086 }
2087 
2088 void
2089 CompactibleFreeListSpace::gc_epilogue() {
2090   assert_locked();
2091   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2092   _promoInfo.stopTrackingPromotions();
2093   repairLinearAllocationBlocks();
2094   reportFreeListStatistics("After GC:");
2095 }
2096 
2097 // Iteration support, mostly delegated from a CMS generation
2098 
2099 void CompactibleFreeListSpace::save_marks() {
2100   assert(Thread::current()->is_VM_thread(),
2101          "Global variable should only be set when single-threaded");
2102   // Mark the "end" of the used space at the time of this call;
2103   // note, however, that promoted objects from this point
2104   // on are tracked in the _promoInfo below.
2105   set_saved_mark_word(unallocated_block());
2106 #ifdef ASSERT
2107   // Check the sanity of save_marks() etc.
2108   MemRegion ur    = used_region();
2109   MemRegion urasm = used_region_at_save_marks();
2110   assert(ur.contains(urasm),
2111          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2112          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2113          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
2114 #endif
2115   // inform allocator that promotions should be tracked.
2116   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2117   _promoInfo.startTrackingPromotions();
2118 }
2119 
2120 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2121   assert(_promoInfo.tracking(), "No preceding save_marks?");
2122   return _promoInfo.noPromotions();
2123 }
2124 
2125 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2126   return _smallLinearAllocBlock._word_size == 0;
2127 }
2128 
2129 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2130   // Fix up linear allocation blocks to look like free blocks
2131   repairLinearAllocBlock(&_smallLinearAllocBlock);
2132 }
2133 
2134 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2135   assert_locked();
2136   if (blk->_ptr != NULL) {
2137     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2138            "Minimum block size requirement");
2139     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2140     fc->set_size(blk->_word_size);
2141     fc->link_prev(NULL);   // mark as free
2142     fc->dontCoalesce();
2143     assert(fc->is_free(), "just marked it free");
2144     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2145   }
2146 }
2147 
2148 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2149   assert_locked();
2150   if (_smallLinearAllocBlock._ptr == NULL) {
2151     assert(_smallLinearAllocBlock._word_size == 0,
2152       "Size of linAB should be zero if the ptr is NULL");
2153     // Reset the linAB refill and allocation size limit.
2154     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2155   }
2156   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2157 }
2158 
2159 void
2160 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2161   assert_locked();
2162   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2163          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2164          "blk invariant");
2165   if (blk->_ptr == NULL) {
2166     refillLinearAllocBlock(blk);
2167   }
2168 }
2169 
2170 void
2171 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2172   assert_locked();
2173   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2174          "linear allocation block should be empty");
2175   FreeChunk* fc;
2176   if (blk->_refillSize < SmallForDictionary &&
2177       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2178     // A linAB's strategy might be to use small sizes to reduce
2179     // fragmentation but still get the benefits of allocation from a
2180     // linAB.
2181   } else {
2182     fc = getChunkFromDictionary(blk->_refillSize);
2183   }
2184   if (fc != NULL) {
2185     blk->_ptr  = (HeapWord*)fc;
2186     blk->_word_size = fc->size();
2187     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2188   }
2189 }
2190 
2191 // Support for compaction
2192 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2193   scan_and_forward(this, cp);
2194   // Prepare_for_compaction() uses the space between live objects
2195   // so that later phase can skip dead space quickly.  So verification
2196   // of the free lists doesn't work after.
2197 }
2198 
2199 void CompactibleFreeListSpace::adjust_pointers() {
2200   // In other versions of adjust_pointers(), a bail out
2201   // based on the amount of live data in the generation
2202   // (i.e., if 0, bail out) may be used.
2203   // Cannot test used() == 0 here because the free lists have already
2204   // been mangled by the compaction.
2205 
2206   scan_and_adjust_pointers(this);
2207   // See note about verification in prepare_for_compaction().
2208 }
2209 
2210 void CompactibleFreeListSpace::compact() {
2211   scan_and_compact(this);
2212 }
2213 
2214 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2215 // where fbs is free block sizes
2216 double CompactibleFreeListSpace::flsFrag() const {
2217   size_t itabFree = totalSizeInIndexedFreeLists();
2218   double frag = 0.0;
2219   size_t i;
2220 
2221   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2222     double sz  = i;
2223     frag      += _indexedFreeList[i].count() * (sz * sz);
2224   }
2225 
2226   double totFree = itabFree +
2227                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2228   if (totFree > 0) {
2229     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2230             (totFree * totFree));
2231     frag = (double)1.0  - frag;
2232   } else {
2233     assert(frag == 0.0, "Follows from totFree == 0");
2234   }
2235   return frag;
2236 }
2237 
2238 void CompactibleFreeListSpace::beginSweepFLCensus(
2239   float inter_sweep_current,
2240   float inter_sweep_estimate,
2241   float intra_sweep_estimate) {
2242   assert_locked();
2243   size_t i;
2244   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2245     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2246     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2247     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2248     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2249     fl->set_before_sweep(fl->count());
2250     fl->set_bfr_surp(fl->surplus());
2251   }
2252   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2253                                     inter_sweep_current,
2254                                     inter_sweep_estimate,
2255                                     intra_sweep_estimate);
2256 }
2257 
2258 void CompactibleFreeListSpace::setFLSurplus() {
2259   assert_locked();
2260   size_t i;
2261   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2262     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2263     fl->set_surplus(fl->count() -
2264                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2265   }
2266 }
2267 
2268 void CompactibleFreeListSpace::setFLHints() {
2269   assert_locked();
2270   size_t i;
2271   size_t h = IndexSetSize;
2272   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2273     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2274     fl->set_hint(h);
2275     if (fl->surplus() > 0) {
2276       h = i;
2277     }
2278   }
2279 }
2280 
2281 void CompactibleFreeListSpace::clearFLCensus() {
2282   assert_locked();
2283   size_t i;
2284   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2285     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2286     fl->set_prev_sweep(fl->count());
2287     fl->set_coal_births(0);
2288     fl->set_coal_deaths(0);
2289     fl->set_split_births(0);
2290     fl->set_split_deaths(0);
2291   }
2292 }
2293 
2294 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2295   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2296   setFLSurplus();
2297   setFLHints();
2298   printFLCensus(sweep_count);
2299   clearFLCensus();
2300   assert_locked();
2301   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2302 }
2303 
2304 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2305   if (size < SmallForDictionary) {
2306     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2307     return (fl->coal_desired() < 0) ||
2308            ((int)fl->count() > fl->coal_desired());
2309   } else {
2310     return dictionary()->coal_dict_over_populated(size);
2311   }
2312 }
2313 
2314 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2315   assert(size < SmallForDictionary, "Size too large for indexed list");
2316   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2317   fl->increment_coal_births();
2318   fl->increment_surplus();
2319 }
2320 
2321 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2322   assert(size < SmallForDictionary, "Size too large for indexed list");
2323   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2324   fl->increment_coal_deaths();
2325   fl->decrement_surplus();
2326 }
2327 
2328 void CompactibleFreeListSpace::coalBirth(size_t size) {
2329   if (size  < SmallForDictionary) {
2330     smallCoalBirth(size);
2331   } else {
2332     dictionary()->dict_census_update(size,
2333                                    false /* split */,
2334                                    true /* birth */);
2335   }
2336 }
2337 
2338 void CompactibleFreeListSpace::coalDeath(size_t size) {
2339   if(size  < SmallForDictionary) {
2340     smallCoalDeath(size);
2341   } else {
2342     dictionary()->dict_census_update(size,
2343                                    false /* split */,
2344                                    false /* birth */);
2345   }
2346 }
2347 
2348 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2349   assert(size < SmallForDictionary, "Size too large for indexed list");
2350   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2351   fl->increment_split_births();
2352   fl->increment_surplus();
2353 }
2354 
2355 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2356   assert(size < SmallForDictionary, "Size too large for indexed list");
2357   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2358   fl->increment_split_deaths();
2359   fl->decrement_surplus();
2360 }
2361 
2362 void CompactibleFreeListSpace::split_birth(size_t size) {
2363   if (size  < SmallForDictionary) {
2364     smallSplitBirth(size);
2365   } else {
2366     dictionary()->dict_census_update(size,
2367                                    true /* split */,
2368                                    true /* birth */);
2369   }
2370 }
2371 
2372 void CompactibleFreeListSpace::splitDeath(size_t size) {
2373   if (size  < SmallForDictionary) {
2374     smallSplitDeath(size);
2375   } else {
2376     dictionary()->dict_census_update(size,
2377                                    true /* split */,
2378                                    false /* birth */);
2379   }
2380 }
2381 
2382 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2383   size_t to2 = from - to1;
2384   splitDeath(from);
2385   split_birth(to1);
2386   split_birth(to2);
2387 }
2388 
2389 void CompactibleFreeListSpace::print() const {
2390   print_on(tty);
2391 }
2392 
2393 void CompactibleFreeListSpace::prepare_for_verify() {
2394   assert_locked();
2395   repairLinearAllocationBlocks();
2396   // Verify that the SpoolBlocks look like free blocks of
2397   // appropriate sizes... To be done ...
2398 }
2399 
2400 class VerifyAllBlksClosure: public BlkClosure {
2401  private:
2402   const CompactibleFreeListSpace* _sp;
2403   const MemRegion                 _span;
2404   HeapWord*                       _last_addr;
2405   size_t                          _last_size;
2406   bool                            _last_was_obj;
2407   bool                            _last_was_live;
2408 
2409  public:
2410   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2411     MemRegion span) :  _sp(sp), _span(span),
2412                        _last_addr(NULL), _last_size(0),
2413                        _last_was_obj(false), _last_was_live(false) { }
2414 
2415   virtual size_t do_blk(HeapWord* addr) {
2416     size_t res;
2417     bool   was_obj  = false;
2418     bool   was_live = false;
2419     if (_sp->block_is_obj(addr)) {
2420       was_obj = true;
2421       oop p = oop(addr);
2422       guarantee(oopDesc::is_oop(p), "Should be an oop");
2423       res = _sp->adjustObjectSize(p->size());
2424       if (_sp->obj_is_alive(addr)) {
2425         was_live = true;
2426         oopDesc::verify(p);
2427       }
2428     } else {
2429       FreeChunk* fc = (FreeChunk*)addr;
2430       res = fc->size();
2431       if (FLSVerifyLists && !fc->cantCoalesce()) {
2432         guarantee(_sp->verify_chunk_in_free_list(fc),
2433                   "Chunk should be on a free list");
2434       }
2435     }
2436     if (res == 0) {
2437       Log(gc, verify) log;
2438       log.error("Livelock: no rank reduction!");
2439       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2440                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2441         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2442         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2443       LogStream ls(log.error());
2444       _sp->print_on(&ls);
2445       guarantee(false, "Verification failed.");
2446     }
2447     _last_addr = addr;
2448     _last_size = res;
2449     _last_was_obj  = was_obj;
2450     _last_was_live = was_live;
2451     return res;
2452   }
2453 };
2454 
2455 class VerifyAllOopsClosure: public BasicOopIterateClosure {
2456  private:
2457   const CMSCollector*             _collector;
2458   const CompactibleFreeListSpace* _sp;
2459   const MemRegion                 _span;
2460   const bool                      _past_remark;
2461   const CMSBitMap*                _bit_map;
2462 
2463  protected:
2464   void do_oop(void* p, oop obj) {
2465     if (_span.contains(obj)) { // the interior oop points into CMS heap
2466       if (!_span.contains(p)) { // reference from outside CMS heap
2467         // Should be a valid object; the first disjunct below allows
2468         // us to sidestep an assertion in block_is_obj() that insists
2469         // that p be in _sp. Note that several generations (and spaces)
2470         // are spanned by _span (CMS heap) above.
2471         guarantee(!_sp->is_in_reserved(obj) ||
2472                   _sp->block_is_obj((HeapWord*)obj),
2473                   "Should be an object");
2474         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2475         oopDesc::verify(obj);
2476         if (_past_remark) {
2477           // Remark has been completed, the object should be marked
2478           _bit_map->isMarked((HeapWord*)obj);
2479         }
2480       } else { // reference within CMS heap
2481         if (_past_remark) {
2482           // Remark has been completed -- so the referent should have
2483           // been marked, if referring object is.
2484           if (_bit_map->isMarked(_collector->block_start(p))) {
2485             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2486           }
2487         }
2488       }
2489     } else if (_sp->is_in_reserved(p)) {
2490       // the reference is from FLS, and points out of FLS
2491       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2492       oopDesc::verify(obj);
2493     }
2494   }
2495 
2496   template <class T> void do_oop_work(T* p) {
2497     T heap_oop = RawAccess<>::oop_load(p);
2498     if (!CompressedOops::is_null(heap_oop)) {
2499       oop obj = CompressedOops::decode_not_null(heap_oop);
2500       do_oop(p, obj);
2501     }
2502   }
2503 
2504  public:
2505   VerifyAllOopsClosure(const CMSCollector* collector,
2506     const CompactibleFreeListSpace* sp, MemRegion span,
2507     bool past_remark, CMSBitMap* bit_map) :
2508     _collector(collector), _sp(sp), _span(span),
2509     _past_remark(past_remark), _bit_map(bit_map) { }
2510 
2511   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2512   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2513 };
2514 
2515 void CompactibleFreeListSpace::verify() const {
2516   assert_lock_strong(&_freelistLock);
2517   verify_objects_initialized();
2518   MemRegion span = _collector->_span;
2519   bool past_remark = (_collector->abstract_state() ==
2520                       CMSCollector::Sweeping);
2521 
2522   ResourceMark rm;
2523   HandleMark  hm;
2524 
2525   // Check integrity of CFL data structures
2526   _promoInfo.verify();
2527   _dictionary->verify();
2528   if (FLSVerifyIndexTable) {
2529     verifyIndexedFreeLists();
2530   }
2531   // Check integrity of all objects and free blocks in space
2532   {
2533     VerifyAllBlksClosure cl(this, span);
2534     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2535   }
2536   // Check that all references in the heap to FLS
2537   // are to valid objects in FLS or that references in
2538   // FLS are to valid objects elsewhere in the heap
2539   if (FLSVerifyAllHeapReferences)
2540   {
2541     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2542       _collector->markBitMap());
2543 
2544     // Iterate over all oops in the heap.
2545     CMSHeap::heap()->oop_iterate(&cl);
2546   }
2547 
2548   if (VerifyObjectStartArray) {
2549     // Verify the block offset table
2550     _bt.verify();
2551   }
2552 }
2553 
2554 #ifndef PRODUCT
2555 void CompactibleFreeListSpace::verifyFreeLists() const {
2556   if (FLSVerifyLists) {
2557     _dictionary->verify();
2558     verifyIndexedFreeLists();
2559   } else {
2560     if (FLSVerifyDictionary) {
2561       _dictionary->verify();
2562     }
2563     if (FLSVerifyIndexTable) {
2564       verifyIndexedFreeLists();
2565     }
2566   }
2567 }
2568 #endif
2569 
2570 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2571   size_t i = 0;
2572   for (; i < IndexSetStart; i++) {
2573     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2574   }
2575   for (; i < IndexSetSize; i++) {
2576     verifyIndexedFreeList(i);
2577   }
2578 }
2579 
2580 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2581   FreeChunk* fc   =  _indexedFreeList[size].head();
2582   FreeChunk* tail =  _indexedFreeList[size].tail();
2583   size_t    num = _indexedFreeList[size].count();
2584   size_t      n = 0;
2585   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2586             "Slot should have been empty");
2587   for (; fc != NULL; fc = fc->next(), n++) {
2588     guarantee(fc->size() == size, "Size inconsistency");
2589     guarantee(fc->is_free(), "!free?");
2590     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2591     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2592   }
2593   guarantee(n == num, "Incorrect count");
2594 }
2595 
2596 #ifndef PRODUCT
2597 void CompactibleFreeListSpace::check_free_list_consistency() const {
2598   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2599     "Some sizes can't be allocated without recourse to"
2600     " linear allocation buffers");
2601   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2602     "else MIN_TREE_CHUNK_SIZE is wrong");
2603   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2604   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2605 }
2606 #endif
2607 
2608 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2609   assert_lock_strong(&_freelistLock);
2610   LogTarget(Debug, gc, freelist, census) log;
2611   if (!log.is_enabled()) {
2612     return;
2613   }
2614   AdaptiveFreeList<FreeChunk> total;
2615   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2616   ResourceMark rm;
2617   LogStream ls(log);
2618   outputStream* out = &ls;
2619   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2620   size_t total_free = 0;
2621   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2622     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2623     total_free += fl->count() * fl->size();
2624     if (i % (40*IndexSetStride) == 0) {
2625       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2626     }
2627     fl->print_on(out);
2628     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2629     total.set_surplus(    total.surplus()     + fl->surplus()    );
2630     total.set_desired(    total.desired()     + fl->desired()    );
2631     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2632     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2633     total.set_count(      total.count()       + fl->count()      );
2634     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2635     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2636     total.set_split_births(total.split_births() + fl->split_births());
2637     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2638   }
2639   total.print_on(out, "TOTAL");
2640   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2641   log.print("growth: %8.5f  deficit: %8.5f",
2642             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2643                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2644             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2645   _dictionary->print_dict_census(out);
2646 }
2647 
2648 ///////////////////////////////////////////////////////////////////////////
2649 // CompactibleFreeListSpaceLAB
2650 ///////////////////////////////////////////////////////////////////////////
2651 
2652 #define VECTOR_257(x)                                                                                  \
2653   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2654   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2655      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2656      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2657      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2658      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2659      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2660      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2661      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2662      x }
2663 
2664 // Initialize with default setting for CMS, _not_
2665 // generic OldPLABSize, whose static default is different; if overridden at the
2666 // command-line, this will get reinitialized via a call to
2667 // modify_initialization() below.
2668 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2669   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2670 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2671 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2672 
2673 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2674   _cfls(cfls)
2675 {
2676   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2677   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2678        i < CompactibleFreeListSpace::IndexSetSize;
2679        i += CompactibleFreeListSpace::IndexSetStride) {
2680     _indexedFreeList[i].set_size(i);
2681     _num_blocks[i] = 0;
2682   }
2683 }
2684 
2685 static bool _CFLS_LAB_modified = false;
2686 
2687 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2688   assert(!_CFLS_LAB_modified, "Call only once");
2689   _CFLS_LAB_modified = true;
2690   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2691        i < CompactibleFreeListSpace::IndexSetSize;
2692        i += CompactibleFreeListSpace::IndexSetStride) {
2693     _blocks_to_claim[i].modify(n, wt, true /* force */);
2694   }
2695 }
2696 
2697 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2698   FreeChunk* res;
2699   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2700   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2701     // This locking manages sync with other large object allocations.
2702     MutexLocker x(_cfls->parDictionaryAllocLock(),
2703                   Mutex::_no_safepoint_check_flag);
2704     res = _cfls->getChunkFromDictionaryExact(word_sz);
2705     if (res == NULL) return NULL;
2706   } else {
2707     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2708     if (fl->count() == 0) {
2709       // Attempt to refill this local free list.
2710       get_from_global_pool(word_sz, fl);
2711       // If it didn't work, give up.
2712       if (fl->count() == 0) return NULL;
2713     }
2714     res = fl->get_chunk_at_head();
2715     assert(res != NULL, "Why was count non-zero?");
2716   }
2717   res->markNotFree();
2718   assert(!res->is_free(), "shouldn't be marked free");
2719   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2720   // mangle a just allocated object with a distinct pattern.
2721   debug_only(res->mangleAllocated(word_sz));
2722   return (HeapWord*)res;
2723 }
2724 
2725 // Get a chunk of blocks of the right size and update related
2726 // book-keeping stats
2727 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2728   // Get the #blocks we want to claim
2729   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2730   assert(n_blks > 0, "Error");
2731   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2732   // In some cases, when the application has a phase change,
2733   // there may be a sudden and sharp shift in the object survival
2734   // profile, and updating the counts at the end of a scavenge
2735   // may not be quick enough, giving rise to large scavenge pauses
2736   // during these phase changes. It is beneficial to detect such
2737   // changes on-the-fly during a scavenge and avoid such a phase-change
2738   // pothole. The following code is a heuristic attempt to do that.
2739   // It is protected by a product flag until we have gained
2740   // enough experience with this heuristic and fine-tuned its behavior.
2741   // WARNING: This might increase fragmentation if we overreact to
2742   // small spikes, so some kind of historical smoothing based on
2743   // previous experience with the greater reactivity might be useful.
2744   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2745   // default.
2746   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2747     //
2748     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2749     // which is why there is a cast to double.
2750     //
2751     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2752     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2753     n_blks = MIN2(n_blks, CMSOldPLABMax);
2754   }
2755   assert(n_blks > 0, "Error");
2756   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2757   // Update stats table entry for this block size
2758   _num_blocks[word_sz] += fl->count();
2759 }
2760 
2761 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2762   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2763        i < CompactibleFreeListSpace::IndexSetSize;
2764        i += CompactibleFreeListSpace::IndexSetStride) {
2765     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2766            "Counter inconsistency");
2767     if (_global_num_workers[i] > 0) {
2768       // Need to smooth wrt historical average
2769       if (ResizeOldPLAB) {
2770         _blocks_to_claim[i].sample(
2771           MAX2(CMSOldPLABMin,
2772           MIN2(CMSOldPLABMax,
2773                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2774       }
2775       // Reset counters for next round
2776       _global_num_workers[i] = 0;
2777       _global_num_blocks[i] = 0;
2778       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2779     }
2780   }
2781 }
2782 
2783 // If this is changed in the future to allow parallel
2784 // access, one would need to take the FL locks and,
2785 // depending on how it is used, stagger access from
2786 // parallel threads to reduce contention.
2787 void CompactibleFreeListSpaceLAB::retire(int tid) {
2788   // We run this single threaded with the world stopped;
2789   // so no need for locks and such.
2790   NOT_PRODUCT(Thread* t = Thread::current();)
2791   assert(Thread::current()->is_VM_thread(), "Error");
2792   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2793        i < CompactibleFreeListSpace::IndexSetSize;
2794        i += CompactibleFreeListSpace::IndexSetStride) {
2795     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2796            "Can't retire more than what we obtained");
2797     if (_num_blocks[i] > 0) {
2798       size_t num_retire =  _indexedFreeList[i].count();
2799       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2800       {
2801         // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
2802         //               Mutex::_no_safepoint_check_flag);
2803 
2804         // Update globals stats for num_blocks used
2805         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2806         _global_num_workers[i]++;
2807         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2808         if (num_retire > 0) {
2809           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2810           // Reset this list.
2811           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2812           _indexedFreeList[i].set_size(i);
2813         }
2814       }
2815       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2816                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2817       // Reset stats for next round
2818       _num_blocks[i]         = 0;
2819     }
2820   }
2821 }
2822 
2823 // Used by par_get_chunk_of_blocks() for the chunks from the
2824 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2825 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2826 // to the free list "fl".  "n" is the maximum number of chunks to
2827 // be added to "fl".
2828 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2829 
2830   // We'll try all multiples of word_sz in the indexed set, starting with
2831   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2832   // then try getting a big chunk and splitting it.
2833   {
2834     bool found;
2835     int  k;
2836     size_t cur_sz;
2837     for (k = 1, cur_sz = k * word_sz, found = false;
2838          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2839          (CMSSplitIndexedFreeListBlocks || k <= 1);
2840          k++, cur_sz = k * word_sz) {
2841       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2842       fl_for_cur_sz.set_size(cur_sz);
2843       {
2844         MutexLocker x(_indexedFreeListParLocks[cur_sz],
2845                       Mutex::_no_safepoint_check_flag);
2846         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2847         if (gfl->count() != 0) {
2848           // nn is the number of chunks of size cur_sz that
2849           // we'd need to split k-ways each, in order to create
2850           // "n" chunks of size word_sz each.
2851           const size_t nn = MAX2(n/k, (size_t)1);
2852           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2853           found = true;
2854           if (k > 1) {
2855             // Update split death stats for the cur_sz-size blocks list:
2856             // we increment the split death count by the number of blocks
2857             // we just took from the cur_sz-size blocks list and which
2858             // we will be splitting below.
2859             ssize_t deaths = gfl->split_deaths() +
2860                              fl_for_cur_sz.count();
2861             gfl->set_split_deaths(deaths);
2862           }
2863         }
2864       }
2865       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2866       if (found) {
2867         if (k == 1) {
2868           fl->prepend(&fl_for_cur_sz);
2869         } else {
2870           // Divide each block on fl_for_cur_sz up k ways.
2871           FreeChunk* fc;
2872           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2873             // Must do this in reverse order, so that anybody attempting to
2874             // access the main chunk sees it as a single free block until we
2875             // change it.
2876             size_t fc_size = fc->size();
2877             assert(fc->is_free(), "Error");
2878             for (int i = k-1; i >= 0; i--) {
2879               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2880               assert((i != 0) ||
2881                         ((fc == ffc) && ffc->is_free() &&
2882                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2883                         "Counting error");
2884               ffc->set_size(word_sz);
2885               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2886               ffc->link_next(NULL);
2887               // Above must occur before BOT is updated below.
2888               OrderAccess::storestore();
2889               // splitting from the right, fc_size == i * word_sz
2890               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2891               fc_size -= word_sz;
2892               assert(fc_size == i*word_sz, "Error");
2893               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2894               _bt.verify_single_block((HeapWord*)fc, fc_size);
2895               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2896               // Push this on "fl".
2897               fl->return_chunk_at_head(ffc);
2898             }
2899             // TRAP
2900             assert(fl->tail()->next() == NULL, "List invariant.");
2901           }
2902         }
2903         // Update birth stats for this block size.
2904         size_t num = fl->count();
2905         MutexLocker x(_indexedFreeListParLocks[word_sz],
2906                       Mutex::_no_safepoint_check_flag);
2907         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2908         _indexedFreeList[word_sz].set_split_births(births);
2909         return true;
2910       }
2911     }
2912     return found;
2913   }
2914 }
2915 
2916 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2917 
2918   FreeChunk* fc = NULL;
2919   FreeChunk* rem_fc = NULL;
2920   size_t rem;
2921   {
2922     MutexLocker x(parDictionaryAllocLock(),
2923                   Mutex::_no_safepoint_check_flag);
2924     while (n > 0) {
2925       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2926       if (fc != NULL) {
2927         break;
2928       } else {
2929         n--;
2930       }
2931     }
2932     if (fc == NULL) return NULL;
2933     // Otherwise, split up that block.
2934     assert((ssize_t)n >= 1, "Control point invariant");
2935     assert(fc->is_free(), "Error: should be a free block");
2936     _bt.verify_single_block((HeapWord*)fc, fc->size());
2937     const size_t nn = fc->size() / word_sz;
2938     n = MIN2(nn, n);
2939     assert((ssize_t)n >= 1, "Control point invariant");
2940     rem = fc->size() - n * word_sz;
2941     // If there is a remainder, and it's too small, allocate one fewer.
2942     if (rem > 0 && rem < MinChunkSize) {
2943       n--; rem += word_sz;
2944     }
2945     // Note that at this point we may have n == 0.
2946     assert((ssize_t)n >= 0, "Control point invariant");
2947 
2948     // If n is 0, the chunk fc that was found is not large
2949     // enough to leave a viable remainder.  We are unable to
2950     // allocate even one block.  Return fc to the
2951     // dictionary and return, leaving "fl" empty.
2952     if (n == 0) {
2953       returnChunkToDictionary(fc);
2954       return NULL;
2955     }
2956 
2957     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2958     dictionary()->dict_census_update(fc->size(),
2959                                      true /*split*/,
2960                                      false /*birth*/);
2961 
2962     // First return the remainder, if any.
2963     // Note that we hold the lock until we decide if we're going to give
2964     // back the remainder to the dictionary, since a concurrent allocation
2965     // may otherwise see the heap as empty.  (We're willing to take that
2966     // hit if the block is a small block.)
2967     if (rem > 0) {
2968       size_t prefix_size = n * word_sz;
2969       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2970       rem_fc->set_size(rem);
2971       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2972       rem_fc->link_next(NULL);
2973       // Above must occur before BOT is updated below.
2974       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2975       OrderAccess::storestore();
2976       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2977       assert(fc->is_free(), "Error");
2978       fc->set_size(prefix_size);
2979       if (rem >= IndexSetSize) {
2980         returnChunkToDictionary(rem_fc);
2981         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2982         rem_fc = NULL;
2983       }
2984       // Otherwise, return it to the small list below.
2985     }
2986   }
2987   if (rem_fc != NULL) {
2988     MutexLocker x(_indexedFreeListParLocks[rem],
2989                   Mutex::_no_safepoint_check_flag);
2990     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2991     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2992     smallSplitBirth(rem);
2993   }
2994   assert(n * word_sz == fc->size(),
2995          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2996          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2997          fc->size(), n, word_sz);
2998   return fc;
2999 }
3000 
3001 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
3002 
3003   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
3004 
3005   if (fc == NULL) {
3006     return;
3007   }
3008 
3009   size_t n = fc->size() / word_sz;
3010 
3011   assert((ssize_t)n > 0, "Consistency");
3012   // Now do the splitting up.
3013   // Must do this in reverse order, so that anybody attempting to
3014   // access the main chunk sees it as a single free block until we
3015   // change it.
3016   size_t fc_size = n * word_sz;
3017   // All but first chunk in this loop
3018   for (ssize_t i = n-1; i > 0; i--) {
3019     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
3020     ffc->set_size(word_sz);
3021     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
3022     ffc->link_next(NULL);
3023     // Above must occur before BOT is updated below.
3024     OrderAccess::storestore();
3025     // splitting from the right, fc_size == (n - i + 1) * wordsize
3026     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
3027     fc_size -= word_sz;
3028     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
3029     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
3030     _bt.verify_single_block((HeapWord*)fc, fc_size);
3031     // Push this on "fl".
3032     fl->return_chunk_at_head(ffc);
3033   }
3034   // First chunk
3035   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
3036   // The blocks above should show their new sizes before the first block below
3037   fc->set_size(word_sz);
3038   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
3039   fc->link_next(NULL);
3040   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
3041   _bt.verify_single_block((HeapWord*)fc, fc->size());
3042   fl->return_chunk_at_head(fc);
3043 
3044   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
3045   {
3046     // Update the stats for this block size.
3047     MutexLocker x(_indexedFreeListParLocks[word_sz],
3048                   Mutex::_no_safepoint_check_flag);
3049     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
3050     _indexedFreeList[word_sz].set_split_births(births);
3051     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
3052     // _indexedFreeList[word_sz].set_surplus(new_surplus);
3053   }
3054 
3055   // TRAP
3056   assert(fl->tail()->next() == NULL, "List invariant.");
3057 }
3058 
3059 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
3060   assert(fl->count() == 0, "Precondition.");
3061   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
3062          "Precondition");
3063 
3064   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
3065     // Got it
3066     return;
3067   }
3068 
3069   // Otherwise, we'll split a block from the dictionary.
3070   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
3071 }
3072 
3073 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
3074   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
3075   return ergo_max;
3076 }
3077 
3078 // Set up the space's par_seq_tasks structure for work claiming
3079 // for parallel rescan. See CMSParRemarkTask where this is currently used.
3080 // XXX Need to suitably abstract and generalize this and the next
3081 // method into one.
3082 void
3083 CompactibleFreeListSpace::
3084 initialize_sequential_subtasks_for_rescan(int n_threads) {
3085   // The "size" of each task is fixed according to rescan_task_size.
3086   assert(n_threads > 0, "Unexpected n_threads argument");
3087   const size_t task_size = rescan_task_size();
3088   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
3089   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
3090   assert(n_tasks == 0 ||
3091          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3092           (used_region().start() + n_tasks*task_size >= used_region().end())),
3093          "n_tasks calculation incorrect");
3094   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3095   assert(!pst->valid(), "Clobbering existing data?");
3096   // Sets the condition for completion of the subtask (how many threads
3097   // need to finish in order to be done).
3098   pst->set_n_threads(n_threads);
3099   pst->set_n_tasks((int)n_tasks);
3100 }
3101 
3102 // Set up the space's par_seq_tasks structure for work claiming
3103 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3104 void
3105 CompactibleFreeListSpace::
3106 initialize_sequential_subtasks_for_marking(int n_threads,
3107                                            HeapWord* low) {
3108   // The "size" of each task is fixed according to rescan_task_size.
3109   assert(n_threads > 0, "Unexpected n_threads argument");
3110   const size_t task_size = marking_task_size();
3111   assert(task_size > CardTable::card_size_in_words &&
3112          (task_size %  CardTable::card_size_in_words == 0),
3113          "Otherwise arithmetic below would be incorrect");
3114   MemRegion span = _old_gen->reserved();
3115   if (low != NULL) {
3116     if (span.contains(low)) {
3117       // Align low down to  a card boundary so that
3118       // we can use block_offset_careful() on span boundaries.
3119       HeapWord* aligned_low = align_down(low, CardTable::card_size);
3120       // Clip span prefix at aligned_low
3121       span = span.intersection(MemRegion(aligned_low, span.end()));
3122     } else if (low > span.end()) {
3123       span = MemRegion(low, low);  // Null region
3124     } // else use entire span
3125   }
3126   assert(span.is_empty() ||
3127          ((uintptr_t)span.start() %  CardTable::card_size == 0),
3128         "span should start at a card boundary");
3129   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3130   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3131   assert(n_tasks == 0 ||
3132          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3133           (span.start() + n_tasks*task_size >= span.end())),
3134          "n_tasks calculation incorrect");
3135   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3136   assert(!pst->valid(), "Clobbering existing data?");
3137   // Sets the condition for completion of the subtask (how many threads
3138   // need to finish in order to be done).
3139   pst->set_n_threads(n_threads);
3140   pst->set_n_tasks((int)n_tasks);
3141 }