1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/genOopClosures.inline.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "logging/log.hpp"
  37 #include "logging/logStream.hpp"
  38 #include "memory/allocation.inline.hpp"
  39 #include "memory/binaryTreeDictionary.inline.hpp"
  40 #include "memory/iterator.inline.hpp"
  41 #include "memory/resourceArea.hpp"
  42 #include "memory/universe.hpp"
  43 #include "oops/access.inline.hpp"
  44 #include "oops/compressedOops.inline.hpp"
  45 #include "oops/oop.inline.hpp"
  46 #include "runtime/globals.hpp"
  47 #include "runtime/handles.inline.hpp"
  48 #include "runtime/init.hpp"
  49 #include "runtime/java.hpp"
  50 #include "runtime/orderAccess.hpp"
  51 #include "runtime/vmThread.hpp"
  52 #include "utilities/align.hpp"
  53 #include "utilities/copy.hpp"
  54 
  55 // Specialize for AdaptiveFreeList which tries to avoid
  56 // splitting a chunk of a size that is under populated in favor of
  57 // an over populated size.  The general get_better_list() just returns
  58 // the current list.
  59 template <>
  60 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
  61 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
  62   BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
  63   // A candidate chunk has been found.  If it is already under
  64   // populated, get a chunk associated with the hint for this
  65   // chunk.
  66 
  67   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
  68   if (curTL->surplus() <= 0) {
  69     /* Use the hint to find a size with a surplus, and reset the hint. */
  70     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
  71     while (hintTL->hint() != 0) {
  72       assert(hintTL->hint() > hintTL->size(),
  73         "hint points in the wrong direction");
  74       hintTL = dictionary->find_list(hintTL->hint());
  75       assert(curTL != hintTL, "Infinite loop");
  76       if (hintTL == NULL ||
  77           hintTL == curTL /* Should not happen but protect against it */ ) {
  78         // No useful hint.  Set the hint to NULL and go on.
  79         curTL->set_hint(0);
  80         break;
  81       }
  82       assert(hintTL->size() > curTL->size(), "hint is inconsistent");
  83       if (hintTL->surplus() > 0) {
  84         // The hint led to a list that has a surplus.  Use it.
  85         // Set the hint for the candidate to an overpopulated
  86         // size.
  87         curTL->set_hint(hintTL->size());
  88         // Change the candidate.
  89         curTL = hintTL;
  90         break;
  91       }
  92     }
  93   }
  94   return curTL;
  95 }
  96 
  97 void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
  98   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
  99   if (nd) {
 100     if (split) {
 101       if (birth) {
 102         nd->increment_split_births();
 103         nd->increment_surplus();
 104       }  else {
 105         nd->increment_split_deaths();
 106         nd->decrement_surplus();
 107       }
 108     } else {
 109       if (birth) {
 110         nd->increment_coal_births();
 111         nd->increment_surplus();
 112       } else {
 113         nd->increment_coal_deaths();
 114         nd->decrement_surplus();
 115       }
 116     }
 117   }
 118   // A list for this size may not be found (nd == 0) if
 119   //   This is a death where the appropriate list is now
 120   //     empty and has been removed from the list.
 121   //   This is a birth associated with a LinAB.  The chunk
 122   //     for the LinAB is not in the dictionary.
 123 }
 124 
 125 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
 126   if (FLSAlwaysCoalesceLarge) return true;
 127 
 128   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
 129   // None of requested size implies overpopulated.
 130   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
 131          list_of_size->count() > list_of_size->coal_desired();
 132 }
 133 
 134 // For each list in the tree, calculate the desired, desired
 135 // coalesce, count before sweep, and surplus before sweep.
 136 class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 137   double _percentage;
 138   float _inter_sweep_current;
 139   float _inter_sweep_estimate;
 140   float _intra_sweep_estimate;
 141 
 142  public:
 143   BeginSweepClosure(double p, float inter_sweep_current,
 144                               float inter_sweep_estimate,
 145                               float intra_sweep_estimate) :
 146    _percentage(p),
 147    _inter_sweep_current(inter_sweep_current),
 148    _inter_sweep_estimate(inter_sweep_estimate),
 149    _intra_sweep_estimate(intra_sweep_estimate) { }
 150 
 151   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 152     double coalSurplusPercent = _percentage;
 153     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
 154     fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
 155     fl->set_before_sweep(fl->count());
 156     fl->set_bfr_surp(fl->surplus());
 157   }
 158 };
 159 
 160 void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
 161   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
 162   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
 163                         inter_sweep_estimate,
 164                         intra_sweep_estimate);
 165   bsc.do_tree(root());
 166 }
 167 
 168 // Calculate surpluses for the lists in the tree.
 169 class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 170   double percentage;
 171  public:
 172   setTreeSurplusClosure(double v) { percentage = v; }
 173 
 174   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 175     double splitSurplusPercent = percentage;
 176     fl->set_surplus(fl->count() -
 177                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
 178   }
 179 };
 180 
 181 void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
 182   setTreeSurplusClosure sts(splitSurplusPercent);
 183   sts.do_tree(root());
 184 }
 185 
 186 // Set hints for the lists in the tree.
 187 class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 188   size_t hint;
 189  public:
 190   setTreeHintsClosure(size_t v) { hint = v; }
 191 
 192   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 193     fl->set_hint(hint);
 194     assert(fl->hint() == 0 || fl->hint() > fl->size(),
 195       "Current hint is inconsistent");
 196     if (fl->surplus() > 0) {
 197       hint = fl->size();
 198     }
 199   }
 200 };
 201 
 202 void AFLBinaryTreeDictionary::set_tree_hints(void) {
 203   setTreeHintsClosure sth(0);
 204   sth.do_tree(root());
 205 }
 206 
 207 // Save count before previous sweep and splits and coalesces.
 208 class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 209   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 210     fl->set_prev_sweep(fl->count());
 211     fl->set_coal_births(0);
 212     fl->set_coal_deaths(0);
 213     fl->set_split_births(0);
 214     fl->set_split_deaths(0);
 215   }
 216 };
 217 
 218 void AFLBinaryTreeDictionary::clear_tree_census(void) {
 219   clearTreeCensusClosure ctc;
 220   ctc.do_tree(root());
 221 }
 222 
 223 // Do reporting and post sweep clean up.
 224 void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
 225   // Does walking the tree 3 times hurt?
 226   set_tree_surplus(splitSurplusPercent);
 227   set_tree_hints();
 228   LogTarget(Trace, gc, freelist, stats) log;
 229   if (log.is_enabled()) {
 230     LogStream out(log);
 231     report_statistics(&out);
 232   }
 233   clear_tree_census();
 234 }
 235 
 236 // Print census information - counts, births, deaths, etc.
 237 // for each list in the tree.  Also print some summary
 238 // information.
 239 class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 240   int _print_line;
 241   size_t _total_free;
 242   AdaptiveFreeList<FreeChunk> _total;
 243 
 244  public:
 245   PrintTreeCensusClosure() {
 246     _print_line = 0;
 247     _total_free = 0;
 248   }
 249   AdaptiveFreeList<FreeChunk>* total() { return &_total; }
 250   size_t total_free() { return _total_free; }
 251 
 252   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 253     LogStreamHandle(Debug, gc, freelist, census) out;
 254 
 255     if (++_print_line >= 40) {
 256       AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
 257       _print_line = 0;
 258     }
 259     fl->print_on(&out);
 260     _total_free +=           fl->count()             * fl->size()        ;
 261     total()->set_count(      total()->count()        + fl->count()      );
 262     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
 263     total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
 264     total()->set_desired(    total()->desired()      + fl->desired()    );
 265     total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
 266     total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
 267     total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
 268     total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
 269     total()->set_split_births(total()->split_births() + fl->split_births());
 270     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
 271   }
 272 };
 273 
 274 void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
 275 
 276   st->print_cr("BinaryTree");
 277   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 278   PrintTreeCensusClosure ptc;
 279   ptc.do_tree(root());
 280 
 281   AdaptiveFreeList<FreeChunk>* total = ptc.total();
 282   AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
 283   total->print_on(st, "TOTAL\t");
 284   st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
 285                ptc.total_free(),
 286                (double)(total->split_births() + total->coal_births()
 287                       - total->split_deaths() - total->coal_deaths())
 288                /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
 289               (double)(total->desired() - total->count())
 290               /(total->desired() != 0 ? (double)total->desired() : 1.0));
 291 }
 292 
 293 /////////////////////////////////////////////////////////////////////////
 294 //// CompactibleFreeListSpace
 295 /////////////////////////////////////////////////////////////////////////
 296 
 297 // highest ranked  free list lock rank
 298 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 299 
 300 // Defaults are 0 so things will break badly if incorrectly initialized.
 301 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
 302 size_t CompactibleFreeListSpace::IndexSetStride = 0;
 303 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
 304 
 305 size_t MinChunkSize = 0;
 306 
 307 void CompactibleFreeListSpace::set_cms_values() {
 308   // Set CMS global values
 309   assert(MinChunkSize == 0, "already set");
 310 
 311   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
 312   // for chunks to contain a FreeChunk.
 313   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
 314   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
 315 
 316   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
 317   IndexSetStart  = MinChunkSize;
 318   IndexSetStride = MinObjAlignment;
 319 }
 320 
 321 // Constructor
 322 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
 323   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
 324                     CMSRescanMultiple),
 325   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
 326                     CMSConcMarkMultiple),
 327   _bt(bs, mr),
 328   _collector(NULL),
 329   // free list locks are in the range of values taken by _lockRank
 330   // This range currently is [_leaf+2, _leaf+3]
 331   // Note: this requires that CFLspace c'tors
 332   // are called serially in the order in which the locks are
 333   // are acquired in the program text. This is true today.
 334   _freelistLock(_lockRank--, "CompactibleFreeListSpace_lock", true,
 335                 Monitor::_safepoint_check_never),
 336   _preconsumptionDirtyCardClosure(NULL),
 337   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
 338                           "CompactibleFreeListSpace_dict_par_lock", true,
 339                           Monitor::_safepoint_check_never)
 340 {
 341   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 342          "FreeChunk is larger than expected");
 343   _bt.set_space(this);
 344   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 345 
 346   _dictionary = new AFLBinaryTreeDictionary(mr);
 347 
 348   assert(_dictionary != NULL, "CMS dictionary initialization");
 349   // The indexed free lists are initially all empty and are lazily
 350   // filled in on demand. Initialize the array elements to NULL.
 351   initializeIndexedFreeListArray();
 352 
 353   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 354                              SmallForLinearAlloc);
 355 
 356   // CMSIndexedFreeListReplenish should be at least 1
 357   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 358   _promoInfo.setSpace(this);
 359   if (UseCMSBestFit) {
 360     _fitStrategy = FreeBlockBestFitFirst;
 361   } else {
 362     _fitStrategy = FreeBlockStrategyNone;
 363   }
 364   check_free_list_consistency();
 365 
 366   // Initialize locks for parallel case.
 367   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 368     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 369                                             "a freelist par lock", true, Mutex::_safepoint_check_never);
 370     DEBUG_ONLY(
 371       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 372     )
 373   }
 374   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 375 
 376   _used_stable = 0;
 377 }
 378 
 379 // Like CompactibleSpace forward() but always calls cross_threshold() to
 380 // update the block offset table.  Removed initialize_threshold call because
 381 // CFLS does not use a block offset array for contiguous spaces.
 382 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 383                                     CompactPoint* cp, HeapWord* compact_top) {
 384   // q is alive
 385   // First check if we should switch compaction space
 386   assert(this == cp->space, "'this' should be current compaction space.");
 387   size_t compaction_max_size = pointer_delta(end(), compact_top);
 388   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 389     "virtual adjustObjectSize_v() method is not correct");
 390   size_t adjusted_size = adjustObjectSize(size);
 391   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 392          "no small fragments allowed");
 393   assert(minimum_free_block_size() == MinChunkSize,
 394          "for de-virtualized reference below");
 395   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 396   if (adjusted_size + MinChunkSize > compaction_max_size &&
 397       adjusted_size != compaction_max_size) {
 398     do {
 399       // switch to next compaction space
 400       cp->space->set_compaction_top(compact_top);
 401       cp->space = cp->space->next_compaction_space();
 402       if (cp->space == NULL) {
 403         cp->gen = CMSHeap::heap()->young_gen();
 404         assert(cp->gen != NULL, "compaction must succeed");
 405         cp->space = cp->gen->first_compaction_space();
 406         assert(cp->space != NULL, "generation must have a first compaction space");
 407       }
 408       compact_top = cp->space->bottom();
 409       cp->space->set_compaction_top(compact_top);
 410       // The correct adjusted_size may not be the same as that for this method
 411       // (i.e., cp->space may no longer be "this" so adjust the size again.
 412       // Use the virtual method which is not used above to save the virtual
 413       // dispatch.
 414       adjusted_size = cp->space->adjust_object_size_v(size);
 415       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 416       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 417     } while (adjusted_size > compaction_max_size);
 418   }
 419 
 420   // store the forwarding pointer into the mark word
 421   if ((HeapWord*)q != compact_top) {
 422     q->forward_to(oop(compact_top));
 423     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 424   } else {
 425     // if the object isn't moving we can just set the mark to the default
 426     // mark and handle it specially later on.
 427     q->init_mark_raw();
 428     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 429   }
 430 
 431   compact_top += adjusted_size;
 432 
 433   // we need to update the offset table so that the beginnings of objects can be
 434   // found during scavenge.  Note that we are updating the offset table based on
 435   // where the object will be once the compaction phase finishes.
 436 
 437   // Always call cross_threshold().  A contiguous space can only call it when
 438   // the compaction_top exceeds the current threshold but not for an
 439   // non-contiguous space.
 440   cp->threshold =
 441     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 442   return compact_top;
 443 }
 444 
 445 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 446 // and use of single_block instead of alloc_block.  The name here is not really
 447 // appropriate - maybe a more general name could be invented for both the
 448 // contiguous and noncontiguous spaces.
 449 
 450 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 451   _bt.single_block(start, the_end);
 452   return end();
 453 }
 454 
 455 // Initialize them to NULL.
 456 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 457   for (size_t i = 0; i < IndexSetSize; i++) {
 458     // Note that on platforms where objects are double word aligned,
 459     // the odd array elements are not used.  It is convenient, however,
 460     // to map directly from the object size to the array element.
 461     _indexedFreeList[i].reset(IndexSetSize);
 462     _indexedFreeList[i].set_size(i);
 463     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 464     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 465     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 466     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 467   }
 468 }
 469 
 470 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 471   return adjustObjectSize(oop(addr)->size());
 472 }
 473 
 474 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 475   for (size_t i = 1; i < IndexSetSize; i++) {
 476     assert(_indexedFreeList[i].size() == (size_t) i,
 477       "Indexed free list sizes are incorrect");
 478     _indexedFreeList[i].reset(IndexSetSize);
 479     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 480     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 481     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 482     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 483   }
 484 }
 485 
 486 void CompactibleFreeListSpace::reset(MemRegion mr) {
 487   resetIndexedFreeListArray();
 488   dictionary()->reset();
 489   if (BlockOffsetArrayUseUnallocatedBlock) {
 490     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 491     // Everything's allocated until proven otherwise.
 492     _bt.set_unallocated_block(end());
 493   }
 494   if (!mr.is_empty()) {
 495     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 496     _bt.single_block(mr.start(), mr.word_size());
 497     FreeChunk* fc = (FreeChunk*) mr.start();
 498     fc->set_size(mr.word_size());
 499     if (mr.word_size() >= IndexSetSize ) {
 500       returnChunkToDictionary(fc);
 501     } else {
 502       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 503       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 504     }
 505     coalBirth(mr.word_size());
 506   }
 507   _promoInfo.reset();
 508   _smallLinearAllocBlock._ptr = NULL;
 509   _smallLinearAllocBlock._word_size = 0;
 510 }
 511 
 512 void CompactibleFreeListSpace::reset_after_compaction() {
 513   // Reset the space to the new reality - one free chunk.
 514   MemRegion mr(compaction_top(), end());
 515   reset(mr);
 516   // Now refill the linear allocation block(s) if possible.
 517   refillLinearAllocBlocksIfNeeded();
 518 }
 519 
 520 // Walks the entire dictionary, returning a coterminal
 521 // chunk, if it exists. Use with caution since it involves
 522 // a potentially complete walk of a potentially large tree.
 523 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 524 
 525   assert_lock_strong(&_freelistLock);
 526 
 527   return dictionary()->find_chunk_ends_at(end());
 528 }
 529 
 530 
 531 #ifndef PRODUCT
 532 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 533   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 534     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 535   }
 536 }
 537 
 538 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 539   size_t sum = 0;
 540   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 541     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 542   }
 543   return sum;
 544 }
 545 
 546 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 547   size_t count = 0;
 548   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 549     debug_only(
 550       ssize_t total_list_count = 0;
 551       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 552          fc = fc->next()) {
 553         total_list_count++;
 554       }
 555       assert(total_list_count ==  _indexedFreeList[i].count(),
 556         "Count in list is incorrect");
 557     )
 558     count += _indexedFreeList[i].count();
 559   }
 560   return count;
 561 }
 562 
 563 size_t CompactibleFreeListSpace::totalCount() {
 564   size_t num = totalCountInIndexedFreeLists();
 565   num +=  dictionary()->total_count();
 566   if (_smallLinearAllocBlock._word_size != 0) {
 567     num++;
 568   }
 569   return num;
 570 }
 571 #endif
 572 
 573 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 574   FreeChunk* fc = (FreeChunk*) p;
 575   return fc->is_free();
 576 }
 577 
 578 size_t CompactibleFreeListSpace::used() const {
 579   return capacity() - free();
 580 }
 581 
 582 size_t CompactibleFreeListSpace::used_stable() const {
 583   return _used_stable;
 584 }
 585 
 586 void CompactibleFreeListSpace::recalculate_used_stable() {
 587   _used_stable = used();
 588 }
 589 
 590 size_t CompactibleFreeListSpace::free() const {
 591   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 592   // if you do this while the structures are in flux you
 593   // may get an approximate answer only; for instance
 594   // because there is concurrent allocation either
 595   // directly by mutators or for promotion during a GC.
 596   // It's "MT-safe", however, in the sense that you are guaranteed
 597   // not to crash and burn, for instance, because of walking
 598   // pointers that could disappear as you were walking them.
 599   // The approximation is because the various components
 600   // that are read below are not read atomically (and
 601   // further the computation of totalSizeInIndexedFreeLists()
 602   // is itself a non-atomic computation. The normal use of
 603   // this is during a resize operation at the end of GC
 604   // and at that time you are guaranteed to get the
 605   // correct actual value. However, for instance, this is
 606   // also read completely asynchronously by the "perf-sampler"
 607   // that supports jvmstat, and you are apt to see the values
 608   // flicker in such cases.
 609   assert(_dictionary != NULL, "No _dictionary?");
 610   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 611           totalSizeInIndexedFreeLists() +
 612           _smallLinearAllocBlock._word_size) * HeapWordSize;
 613 }
 614 
 615 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 616   assert(_dictionary != NULL, "No _dictionary?");
 617   assert_locked();
 618   size_t res = _dictionary->max_chunk_size();
 619   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 620                        (size_t) SmallForLinearAlloc - 1));
 621   // XXX the following could potentially be pretty slow;
 622   // should one, pessimistically for the rare cases when res
 623   // calculated above is less than IndexSetSize,
 624   // just return res calculated above? My reasoning was that
 625   // those cases will be so rare that the extra time spent doesn't
 626   // really matter....
 627   // Note: do not change the loop test i >= res + IndexSetStride
 628   // to i > res below, because i is unsigned and res may be zero.
 629   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 630        i -= IndexSetStride) {
 631     if (_indexedFreeList[i].head() != NULL) {
 632       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 633       return i;
 634     }
 635   }
 636   return res;
 637 }
 638 
 639 void LinearAllocBlock::print_on(outputStream* st) const {
 640   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 641             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 642             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 643 }
 644 
 645 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 646   st->print_cr("COMPACTIBLE FREELIST SPACE");
 647   st->print_cr(" Space:");
 648   Space::print_on(st);
 649 
 650   st->print_cr("promoInfo:");
 651   _promoInfo.print_on(st);
 652 
 653   st->print_cr("_smallLinearAllocBlock");
 654   _smallLinearAllocBlock.print_on(st);
 655 
 656   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 657 
 658   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 659 }
 660 
 661 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 662 const {
 663   reportIndexedFreeListStatistics(st);
 664   st->print_cr("Layout of Indexed Freelists");
 665   st->print_cr("---------------------------");
 666   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 667   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 668     _indexedFreeList[i].print_on(st);
 669     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 670       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 671                    p2i(fc), p2i((HeapWord*)fc + i),
 672                    fc->cantCoalesce() ? "\t CC" : "");
 673     }
 674   }
 675 }
 676 
 677 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 678 const {
 679   _promoInfo.print_on(st);
 680 }
 681 
 682 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 683 const {
 684   _dictionary->report_statistics(st);
 685   st->print_cr("Layout of Freelists in Tree");
 686   st->print_cr("---------------------------");
 687   _dictionary->print_free_lists(st);
 688 }
 689 
 690 class BlkPrintingClosure: public BlkClosure {
 691   const CMSCollector*             _collector;
 692   const CompactibleFreeListSpace* _sp;
 693   const CMSBitMap*                _live_bit_map;
 694   const bool                      _post_remark;
 695   outputStream*                   _st;
 696 public:
 697   BlkPrintingClosure(const CMSCollector* collector,
 698                      const CompactibleFreeListSpace* sp,
 699                      const CMSBitMap* live_bit_map,
 700                      outputStream* st):
 701     _collector(collector),
 702     _sp(sp),
 703     _live_bit_map(live_bit_map),
 704     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 705     _st(st) { }
 706   size_t do_blk(HeapWord* addr);
 707 };
 708 
 709 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 710   size_t sz = _sp->block_size_no_stall(addr, _collector);
 711   assert(sz != 0, "Should always be able to compute a size");
 712   if (_sp->block_is_obj(addr)) {
 713     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 714     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 715       p2i(addr),
 716       dead ? "dead" : "live",
 717       sz,
 718       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 719     if (CMSPrintObjectsInDump && !dead) {
 720       oop(addr)->print_on(_st);
 721       _st->print_cr("--------------------------------------");
 722     }
 723   } else { // free block
 724     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 725       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 726     if (CMSPrintChunksInDump) {
 727       ((FreeChunk*)addr)->print_on(_st);
 728       _st->print_cr("--------------------------------------");
 729     }
 730   }
 731   return sz;
 732 }
 733 
 734 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 735   st->print_cr("=========================");
 736   st->print_cr("Block layout in CMS Heap:");
 737   st->print_cr("=========================");
 738   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 739   blk_iterate(&bpcl);
 740 
 741   st->print_cr("=======================================");
 742   st->print_cr("Order & Layout of Promotion Info Blocks");
 743   st->print_cr("=======================================");
 744   print_promo_info_blocks(st);
 745 
 746   st->print_cr("===========================");
 747   st->print_cr("Order of Indexed Free Lists");
 748   st->print_cr("=========================");
 749   print_indexed_free_lists(st);
 750 
 751   st->print_cr("=================================");
 752   st->print_cr("Order of Free Lists in Dictionary");
 753   st->print_cr("=================================");
 754   print_dictionary_free_lists(st);
 755 }
 756 
 757 
 758 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 759   assert_lock_strong(&_freelistLock);
 760   Log(gc, freelist, stats) log;
 761   if (!log.is_debug()) {
 762     return;
 763   }
 764   log.debug("%s", title);
 765 
 766   LogStream out(log.debug());
 767   _dictionary->report_statistics(&out);
 768 
 769   if (log.is_trace()) {
 770     LogStream trace_out(log.trace());
 771     reportIndexedFreeListStatistics(&trace_out);
 772     size_t total_size = totalSizeInIndexedFreeLists() +
 773                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 774     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 775   }
 776 }
 777 
 778 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 779   assert_lock_strong(&_freelistLock);
 780   st->print_cr("Statistics for IndexedFreeLists:");
 781   st->print_cr("--------------------------------");
 782   size_t total_size = totalSizeInIndexedFreeLists();
 783   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 784   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 785   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 786   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 787   if (free_blocks != 0) {
 788     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 789   }
 790 }
 791 
 792 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 793   size_t res = 0;
 794   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 795     debug_only(
 796       ssize_t recount = 0;
 797       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 798          fc = fc->next()) {
 799         recount += 1;
 800       }
 801       assert(recount == _indexedFreeList[i].count(),
 802         "Incorrect count in list");
 803     )
 804     res += _indexedFreeList[i].count();
 805   }
 806   return res;
 807 }
 808 
 809 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 810   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 811     if (_indexedFreeList[i].head() != NULL) {
 812       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 813       return (size_t)i;
 814     }
 815   }
 816   return 0;
 817 }
 818 
 819 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 820   HeapWord* prevEnd = end();
 821   assert(prevEnd != value, "unnecessary set_end call");
 822   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 823         "New end is below unallocated block");
 824   _end = value;
 825   if (prevEnd != NULL) {
 826     // Resize the underlying block offset table.
 827     _bt.resize(pointer_delta(value, bottom()));
 828     if (value <= prevEnd) {
 829       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 830              "New end is below unallocated block");
 831     } else {
 832       // Now, take this new chunk and add it to the free blocks.
 833       // Note that the BOT has not yet been updated for this block.
 834       size_t newFcSize = pointer_delta(value, prevEnd);
 835       // Add the block to the free lists, if possible coalescing it
 836       // with the last free block, and update the BOT and census data.
 837       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 838     }
 839   }
 840 }
 841 
 842 class FreeListSpaceDCTOC : public FilteringDCTOC {
 843   CompactibleFreeListSpace* _cfls;
 844   CMSCollector* _collector;
 845   bool _parallel;
 846 protected:
 847   // Override.
 848 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 849   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 850                                        HeapWord* bottom, HeapWord* top, \
 851                                        ClosureType* cl);                \
 852       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 853                                        HeapWord* bottom, HeapWord* top, \
 854                                        ClosureType* cl);                \
 855     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 856                                        HeapWord* bottom, HeapWord* top, \
 857                                        ClosureType* cl)
 858   walk_mem_region_with_cl_DECL(OopIterateClosure);
 859   walk_mem_region_with_cl_DECL(FilteringClosure);
 860 
 861 public:
 862   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 863                      CMSCollector* collector,
 864                      OopIterateClosure* cl,
 865                      CardTable::PrecisionStyle precision,
 866                      HeapWord* boundary,
 867                      bool parallel) :
 868     FilteringDCTOC(sp, cl, precision, boundary),
 869     _cfls(sp), _collector(collector), _parallel(parallel) {}
 870 };
 871 
 872 // We de-virtualize the block-related calls below, since we know that our
 873 // space is a CompactibleFreeListSpace.
 874 
 875 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 876 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 877                                                  HeapWord* bottom,              \
 878                                                  HeapWord* top,                 \
 879                                                  ClosureType* cl) {             \
 880    if (_parallel) {                                                             \
 881      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 882    } else {                                                                     \
 883      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 884    }                                                                            \
 885 }                                                                               \
 886 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 887                                                      HeapWord* bottom,          \
 888                                                      HeapWord* top,             \
 889                                                      ClosureType* cl) {         \
 890   /* Skip parts that are before "mr", in case "block_start" sent us             \
 891      back too far. */                                                           \
 892   HeapWord* mr_start = mr.start();                                              \
 893   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 894   HeapWord* next = bottom + bot_size;                                           \
 895   while (next < mr_start) {                                                     \
 896     bottom = next;                                                              \
 897     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 898     next = bottom + bot_size;                                                   \
 899   }                                                                             \
 900                                                                                 \
 901   while (bottom < top) {                                                        \
 902     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 903         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 904                     oop(bottom)) &&                                             \
 905         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 906       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 907       bottom += _cfls->adjustObjectSize(word_sz);                               \
 908     } else {                                                                    \
 909       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 910     }                                                                           \
 911   }                                                                             \
 912 }                                                                               \
 913 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 914                                                        HeapWord* bottom,        \
 915                                                        HeapWord* top,           \
 916                                                        ClosureType* cl) {       \
 917   /* Skip parts that are before "mr", in case "block_start" sent us             \
 918      back too far. */                                                           \
 919   HeapWord* mr_start = mr.start();                                              \
 920   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 921   HeapWord* next = bottom + bot_size;                                           \
 922   while (next < mr_start) {                                                     \
 923     bottom = next;                                                              \
 924     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 925     next = bottom + bot_size;                                                   \
 926   }                                                                             \
 927                                                                                 \
 928   while (bottom < top) {                                                        \
 929     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 930         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 931                     oop(bottom)) &&                                             \
 932         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 933       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 934       bottom += _cfls->adjustObjectSize(word_sz);                               \
 935     } else {                                                                    \
 936       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 937     }                                                                           \
 938   }                                                                             \
 939 }
 940 
 941 // (There are only two of these, rather than N, because the split is due
 942 // only to the introduction of the FilteringClosure, a local part of the
 943 // impl of this abstraction.)
 944 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(OopIterateClosure)
 945 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 946 
 947 DirtyCardToOopClosure*
 948 CompactibleFreeListSpace::new_dcto_cl(OopIterateClosure* cl,
 949                                       CardTable::PrecisionStyle precision,
 950                                       HeapWord* boundary,
 951                                       bool parallel) {
 952   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 953 }
 954 
 955 
 956 // Note on locking for the space iteration functions:
 957 // since the collector's iteration activities are concurrent with
 958 // allocation activities by mutators, absent a suitable mutual exclusion
 959 // mechanism the iterators may go awry. For instance a block being iterated
 960 // may suddenly be allocated or divided up and part of it allocated and
 961 // so on.
 962 
 963 // Apply the given closure to each block in the space.
 964 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 965   assert_lock_strong(freelistLock());
 966   HeapWord *cur, *limit;
 967   for (cur = bottom(), limit = end(); cur < limit;
 968        cur += cl->do_blk_careful(cur));
 969 }
 970 
 971 // Apply the given closure to each block in the space.
 972 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 973   assert_lock_strong(freelistLock());
 974   HeapWord *cur, *limit;
 975   for (cur = bottom(), limit = end(); cur < limit;
 976        cur += cl->do_blk(cur));
 977 }
 978 
 979 // Apply the given closure to each oop in the space.
 980 void CompactibleFreeListSpace::oop_iterate(OopIterateClosure* cl) {
 981   assert_lock_strong(freelistLock());
 982   HeapWord *cur, *limit;
 983   size_t curSize;
 984   for (cur = bottom(), limit = end(); cur < limit;
 985        cur += curSize) {
 986     curSize = block_size(cur);
 987     if (block_is_obj(cur)) {
 988       oop(cur)->oop_iterate(cl);
 989     }
 990   }
 991 }
 992 
 993 // NOTE: In the following methods, in order to safely be able to
 994 // apply the closure to an object, we need to be sure that the
 995 // object has been initialized. We are guaranteed that an object
 996 // is initialized if we are holding the Heap_lock with the
 997 // world stopped.
 998 void CompactibleFreeListSpace::verify_objects_initialized() const {
 999   if (is_init_completed()) {
1000     assert_locked_or_safepoint(Heap_lock);
1001     if (Universe::is_fully_initialized()) {
1002       guarantee(SafepointSynchronize::is_at_safepoint(),
1003                 "Required for objects to be initialized");
1004     }
1005   } // else make a concession at vm start-up
1006 }
1007 
1008 // Apply the given closure to each object in the space
1009 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
1010   assert_lock_strong(freelistLock());
1011   NOT_PRODUCT(verify_objects_initialized());
1012   HeapWord *cur, *limit;
1013   size_t curSize;
1014   for (cur = bottom(), limit = end(); cur < limit;
1015        cur += curSize) {
1016     curSize = block_size(cur);
1017     if (block_is_obj(cur)) {
1018       blk->do_object(oop(cur));
1019     }
1020   }
1021 }
1022 
1023 // Apply the given closure to each live object in the space
1024 //   The usage of CompactibleFreeListSpace
1025 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
1026 // objects in the space with references to objects that are no longer
1027 // valid.  For example, an object may reference another object
1028 // that has already been sweep up (collected).  This method uses
1029 // obj_is_alive() to determine whether it is safe to apply the closure to
1030 // an object.  See obj_is_alive() for details on how liveness of an
1031 // object is decided.
1032 
1033 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
1034   assert_lock_strong(freelistLock());
1035   NOT_PRODUCT(verify_objects_initialized());
1036   HeapWord *cur, *limit;
1037   size_t curSize;
1038   for (cur = bottom(), limit = end(); cur < limit;
1039        cur += curSize) {
1040     curSize = block_size(cur);
1041     if (block_is_obj(cur) && obj_is_alive(cur)) {
1042       blk->do_object(oop(cur));
1043     }
1044   }
1045 }
1046 
1047 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
1048                                                   UpwardsObjectClosure* cl) {
1049   assert_locked(freelistLock());
1050   NOT_PRODUCT(verify_objects_initialized());
1051   assert(!mr.is_empty(), "Should be non-empty");
1052   // We use MemRegion(bottom(), end()) rather than used_region() below
1053   // because the two are not necessarily equal for some kinds of
1054   // spaces, in particular, certain kinds of free list spaces.
1055   // We could use the more complicated but more precise:
1056   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
1057   // but the slight imprecision seems acceptable in the assertion check.
1058   assert(MemRegion(bottom(), end()).contains(mr),
1059          "Should be within used space");
1060   HeapWord* prev = cl->previous();   // max address from last time
1061   if (prev >= mr.end()) { // nothing to do
1062     return;
1063   }
1064   // This assert will not work when we go from cms space to perm
1065   // space, and use same closure. Easy fix deferred for later. XXX YSR
1066   // assert(prev == NULL || contains(prev), "Should be within space");
1067 
1068   bool last_was_obj_array = false;
1069   HeapWord *blk_start_addr, *region_start_addr;
1070   if (prev > mr.start()) {
1071     region_start_addr = prev;
1072     blk_start_addr    = prev;
1073     // The previous invocation may have pushed "prev" beyond the
1074     // last allocated block yet there may be still be blocks
1075     // in this region due to a particular coalescing policy.
1076     // Relax the assertion so that the case where the unallocated
1077     // block is maintained and "prev" is beyond the unallocated
1078     // block does not cause the assertion to fire.
1079     assert((BlockOffsetArrayUseUnallocatedBlock &&
1080             (!is_in(prev))) ||
1081            (blk_start_addr == block_start(region_start_addr)), "invariant");
1082   } else {
1083     region_start_addr = mr.start();
1084     blk_start_addr    = block_start(region_start_addr);
1085   }
1086   HeapWord* region_end_addr = mr.end();
1087   MemRegion derived_mr(region_start_addr, region_end_addr);
1088   while (blk_start_addr < region_end_addr) {
1089     const size_t size = block_size(blk_start_addr);
1090     if (block_is_obj(blk_start_addr)) {
1091       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
1092     } else {
1093       last_was_obj_array = false;
1094     }
1095     blk_start_addr += size;
1096   }
1097   if (!last_was_obj_array) {
1098     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
1099            "Should be within (closed) used space");
1100     assert(blk_start_addr > prev, "Invariant");
1101     cl->set_previous(blk_start_addr); // min address for next time
1102   }
1103 }
1104 
1105 // Callers of this iterator beware: The closure application should
1106 // be robust in the face of uninitialized objects and should (always)
1107 // return a correct size so that the next addr + size below gives us a
1108 // valid block boundary. [See for instance,
1109 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
1110 // in ConcurrentMarkSweepGeneration.cpp.]
1111 HeapWord*
1112 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
1113   ObjectClosureCareful* cl) {
1114   assert_lock_strong(freelistLock());
1115   // Can't use used_region() below because it may not necessarily
1116   // be the same as [bottom(),end()); although we could
1117   // use [used_region().start(),align_up(used_region().end(),CardSize)),
1118   // that appears too cumbersome, so we just do the simpler check
1119   // in the assertion below.
1120   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
1121          "mr should be non-empty and within used space");
1122   HeapWord *addr, *end;
1123   size_t size;
1124   for (addr = block_start_careful(mr.start()), end  = mr.end();
1125        addr < end; addr += size) {
1126     FreeChunk* fc = (FreeChunk*)addr;
1127     if (fc->is_free()) {
1128       // Since we hold the free list lock, which protects direct
1129       // allocation in this generation by mutators, a free object
1130       // will remain free throughout this iteration code.
1131       size = fc->size();
1132     } else {
1133       // Note that the object need not necessarily be initialized,
1134       // because (for instance) the free list lock does NOT protect
1135       // object initialization. The closure application below must
1136       // therefore be correct in the face of uninitialized objects.
1137       size = cl->do_object_careful_m(oop(addr), mr);
1138       if (size == 0) {
1139         // An unparsable object found. Signal early termination.
1140         return addr;
1141       }
1142     }
1143   }
1144   return NULL;
1145 }
1146 
1147 
1148 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
1149   NOT_PRODUCT(verify_objects_initialized());
1150   return _bt.block_start(p);
1151 }
1152 
1153 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
1154   return _bt.block_start_careful(p);
1155 }
1156 
1157 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
1158   NOT_PRODUCT(verify_objects_initialized());
1159   // This must be volatile, or else there is a danger that the compiler
1160   // will compile the code below into a sometimes-infinite loop, by keeping
1161   // the value read the first time in a register.
1162   while (true) {
1163     // We must do this until we get a consistent view of the object.
1164     if (FreeChunk::indicatesFreeChunk(p)) {
1165       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1166       size_t res = fc->size();
1167 
1168       // Bugfix for systems with weak memory model (PPC64/IA64). The
1169       // block's free bit was set and we have read the size of the
1170       // block. Acquire and check the free bit again. If the block is
1171       // still free, the read size is correct.
1172       OrderAccess::acquire();
1173 
1174       // If the object is still a free chunk, return the size, else it
1175       // has been allocated so try again.
1176       if (FreeChunk::indicatesFreeChunk(p)) {
1177         assert(res != 0, "Block size should not be 0");
1178         return res;
1179       }
1180     } else {
1181       // Ensure klass read before size.
1182       Klass* k = oop(p)->klass_or_null_acquire();
1183       if (k != NULL) {
1184         assert(k->is_klass(), "Should really be klass oop.");
1185         oop o = (oop)p;
1186         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1187 
1188         size_t res = o->size_given_klass(k);
1189         res = adjustObjectSize(res);
1190         assert(res != 0, "Block size should not be 0");
1191         return res;
1192       }
1193     }
1194   }
1195 }
1196 
1197 // TODO: Now that is_parsable is gone, we should combine these two functions.
1198 // A variant of the above that uses the Printezis bits for
1199 // unparsable but allocated objects. This avoids any possible
1200 // stalls waiting for mutators to initialize objects, and is
1201 // thus potentially faster than the variant above. However,
1202 // this variant may return a zero size for a block that is
1203 // under mutation and for which a consistent size cannot be
1204 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1205 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1206                                                      const CMSCollector* c)
1207 const {
1208   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1209   // This must be volatile, or else there is a danger that the compiler
1210   // will compile the code below into a sometimes-infinite loop, by keeping
1211   // the value read the first time in a register.
1212   DEBUG_ONLY(uint loops = 0;)
1213   while (true) {
1214     // We must do this until we get a consistent view of the object.
1215     if (FreeChunk::indicatesFreeChunk(p)) {
1216       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1217       size_t res = fc->size();
1218 
1219       // Bugfix for systems with weak memory model (PPC64/IA64). The
1220       // free bit of the block was set and we have read the size of
1221       // the block. Acquire and check the free bit again. If the
1222       // block is still free, the read size is correct.
1223       OrderAccess::acquire();
1224 
1225       if (FreeChunk::indicatesFreeChunk(p)) {
1226         assert(res != 0, "Block size should not be 0");
1227         assert(loops == 0, "Should be 0");
1228         return res;
1229       }
1230     } else {
1231       // Ensure klass read before size.
1232       Klass* k = oop(p)->klass_or_null_acquire();
1233       if (k != NULL) {
1234         assert(k->is_klass(), "Should really be klass oop.");
1235         oop o = (oop)p;
1236         assert(oopDesc::is_oop(o), "Should be an oop");
1237 
1238         size_t res = o->size_given_klass(k);
1239         res = adjustObjectSize(res);
1240         assert(res != 0, "Block size should not be 0");
1241         return res;
1242       } else {
1243         // May return 0 if P-bits not present.
1244         return c->block_size_if_printezis_bits(p);
1245       }
1246     }
1247     assert(loops == 0, "Can loop at most once");
1248     DEBUG_ONLY(loops++;)
1249   }
1250 }
1251 
1252 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1253   NOT_PRODUCT(verify_objects_initialized());
1254   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1255   FreeChunk* fc = (FreeChunk*)p;
1256   if (fc->is_free()) {
1257     return fc->size();
1258   } else {
1259     // Ignore mark word because this may be a recently promoted
1260     // object whose mark word is used to chain together grey
1261     // objects (the last one would have a null value).
1262     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1263     return adjustObjectSize(oop(p)->size());
1264   }
1265 }
1266 
1267 // This implementation assumes that the property of "being an object" is
1268 // stable.  But being a free chunk may not be (because of parallel
1269 // promotion.)
1270 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1271   FreeChunk* fc = (FreeChunk*)p;
1272   assert(is_in_reserved(p), "Should be in space");
1273   if (FreeChunk::indicatesFreeChunk(p)) return false;
1274   Klass* k = oop(p)->klass_or_null_acquire();
1275   if (k != NULL) {
1276     // Ignore mark word because it may have been used to
1277     // chain together promoted objects (the last one
1278     // would have a null value).
1279     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1280     return true;
1281   } else {
1282     return false;  // Was not an object at the start of collection.
1283   }
1284 }
1285 
1286 // Check if the object is alive. This fact is checked either by consulting
1287 // the main marking bitmap in the sweeping phase or, if it's a permanent
1288 // generation and we're not in the sweeping phase, by checking the
1289 // perm_gen_verify_bit_map where we store the "deadness" information if
1290 // we did not sweep the perm gen in the most recent previous GC cycle.
1291 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1292   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1293          "Else races are possible");
1294   assert(block_is_obj(p), "The address should point to an object");
1295 
1296   // If we're sweeping, we use object liveness information from the main bit map
1297   // for both perm gen and old gen.
1298   // We don't need to lock the bitmap (live_map or dead_map below), because
1299   // EITHER we are in the middle of the sweeping phase, and the
1300   // main marking bit map (live_map below) is locked,
1301   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1302   // is stable, because it's mutated only in the sweeping phase.
1303   // NOTE: This method is also used by jmap where, if class unloading is
1304   // off, the results can return "false" for legitimate perm objects,
1305   // when we are not in the midst of a sweeping phase, which can result
1306   // in jmap not reporting certain perm gen objects. This will be moot
1307   // if/when the perm gen goes away in the future.
1308   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1309     CMSBitMap* live_map = _collector->markBitMap();
1310     return live_map->par_isMarked((HeapWord*) p);
1311   }
1312   return true;
1313 }
1314 
1315 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1316   FreeChunk* fc = (FreeChunk*)p;
1317   assert(is_in_reserved(p), "Should be in space");
1318   assert(_bt.block_start(p) == p, "Should be a block boundary");
1319   if (!fc->is_free()) {
1320     // Ignore mark word because it may have been used to
1321     // chain together promoted objects (the last one
1322     // would have a null value).
1323     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1324     return true;
1325   }
1326   return false;
1327 }
1328 
1329 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1330 // approximate answer if you don't hold the freelistlock when you call this.
1331 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1332   size_t size = 0;
1333   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1334     debug_only(
1335       // We may be calling here without the lock in which case we
1336       // won't do this modest sanity check.
1337       if (freelistLock()->owned_by_self()) {
1338         size_t total_list_size = 0;
1339         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1340           fc = fc->next()) {
1341           total_list_size += i;
1342         }
1343         assert(total_list_size == i * _indexedFreeList[i].count(),
1344                "Count in list is incorrect");
1345       }
1346     )
1347     size += i * _indexedFreeList[i].count();
1348   }
1349   return size;
1350 }
1351 
1352 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1353   MutexLocker x(freelistLock(), Mutex::_no_safepoint_check_flag);
1354   return allocate(size);
1355 }
1356 
1357 HeapWord*
1358 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1359   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1360 }
1361 
1362 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1363   assert_lock_strong(freelistLock());
1364   HeapWord* res = NULL;
1365   assert(size == adjustObjectSize(size),
1366          "use adjustObjectSize() before calling into allocate()");
1367 
1368   res = allocate_adaptive_freelists(size);
1369 
1370   if (res != NULL) {
1371     // check that res does lie in this space!
1372     assert(is_in_reserved(res), "Not in this space!");
1373     assert(is_aligned((void*)res), "alignment check");
1374 
1375     FreeChunk* fc = (FreeChunk*)res;
1376     fc->markNotFree();
1377     assert(!fc->is_free(), "shouldn't be marked free");
1378     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1379     // Verify that the block offset table shows this to
1380     // be a single block, but not one which is unallocated.
1381     _bt.verify_single_block(res, size);
1382     _bt.verify_not_unallocated(res, size);
1383     // mangle a just allocated object with a distinct pattern.
1384     debug_only(fc->mangleAllocated(size));
1385   }
1386 
1387   // After allocation, recalculate used space and update used_stable
1388   recalculate_used_stable();
1389 
1390   return res;
1391 }
1392 
1393 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1394   assert_lock_strong(freelistLock());
1395   HeapWord* res = NULL;
1396   assert(size == adjustObjectSize(size),
1397          "use adjustObjectSize() before calling into allocate()");
1398 
1399   // Strategy
1400   //   if small
1401   //     exact size from small object indexed list if small
1402   //     small or large linear allocation block (linAB) as appropriate
1403   //     take from lists of greater sized chunks
1404   //   else
1405   //     dictionary
1406   //     small or large linear allocation block if it has the space
1407   // Try allocating exact size from indexTable first
1408   if (size < IndexSetSize) {
1409     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1410     if(res != NULL) {
1411       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1412         "Not removed from free list");
1413       // no block offset table adjustment is necessary on blocks in
1414       // the indexed lists.
1415 
1416     // Try allocating from the small LinAB
1417     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1418         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1419         // if successful, the above also adjusts block offset table
1420         // Note that this call will refill the LinAB to
1421         // satisfy the request.  This is different that
1422         // evm.
1423         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1424     } else {
1425       // Raid the exact free lists larger than size, even if they are not
1426       // overpopulated.
1427       res = (HeapWord*) getChunkFromGreater(size);
1428     }
1429   } else {
1430     // Big objects get allocated directly from the dictionary.
1431     res = (HeapWord*) getChunkFromDictionaryExact(size);
1432     if (res == NULL) {
1433       // Try hard not to fail since an allocation failure will likely
1434       // trigger a synchronous GC.  Try to get the space from the
1435       // allocation blocks.
1436       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1437     }
1438   }
1439 
1440   return res;
1441 }
1442 
1443 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1444 // when promoting obj.
1445 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1446   // Depending on the object size, expansion may require refilling either a
1447   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1448   // is added because the dictionary may over-allocate to avoid fragmentation.
1449   size_t space = obj_size;
1450   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1451   return space;
1452 }
1453 
1454 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1455   FreeChunk* ret;
1456 
1457   assert(numWords >= MinChunkSize, "Size is less than minimum");
1458   assert(linearAllocationWouldFail() || bestFitFirst(),
1459     "Should not be here");
1460 
1461   size_t i;
1462   size_t currSize = numWords + MinChunkSize;
1463   assert(is_object_aligned(currSize), "currSize should be aligned");
1464   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1465     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1466     if (fl->head()) {
1467       ret = getFromListGreater(fl, numWords);
1468       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1469       return ret;
1470     }
1471   }
1472 
1473   currSize = MAX2((size_t)SmallForDictionary,
1474                   (size_t)(numWords + MinChunkSize));
1475 
1476   /* Try to get a chunk that satisfies request, while avoiding
1477      fragmentation that can't be handled. */
1478   {
1479     ret =  dictionary()->get_chunk(currSize);
1480     if (ret != NULL) {
1481       assert(ret->size() - numWords >= MinChunkSize,
1482              "Chunk is too small");
1483       _bt.allocated((HeapWord*)ret, ret->size());
1484       /* Carve returned chunk. */
1485       (void) splitChunkAndReturnRemainder(ret, numWords);
1486       /* Label this as no longer a free chunk. */
1487       assert(ret->is_free(), "This chunk should be free");
1488       ret->link_prev(NULL);
1489     }
1490     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1491     return ret;
1492   }
1493   ShouldNotReachHere();
1494 }
1495 
1496 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1497   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1498   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1499 }
1500 
1501 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1502   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1503          (_smallLinearAllocBlock._word_size == fc->size()),
1504          "Linear allocation block shows incorrect size");
1505   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1506           (_smallLinearAllocBlock._word_size == fc->size()));
1507 }
1508 
1509 // Check if the purported free chunk is present either as a linear
1510 // allocation block, the size-indexed table of (smaller) free blocks,
1511 // or the larger free blocks kept in the binary tree dictionary.
1512 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1513   if (verify_chunk_is_linear_alloc_block(fc)) {
1514     return true;
1515   } else if (fc->size() < IndexSetSize) {
1516     return verifyChunkInIndexedFreeLists(fc);
1517   } else {
1518     return dictionary()->verify_chunk_in_free_list(fc);
1519   }
1520 }
1521 
1522 #ifndef PRODUCT
1523 void CompactibleFreeListSpace::assert_locked() const {
1524   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1525 }
1526 
1527 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1528   CMSLockVerifier::assert_locked(lock);
1529 }
1530 #endif
1531 
1532 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1533   // In the parallel case, the main thread holds the free list lock
1534   // on behalf the parallel threads.
1535   FreeChunk* fc;
1536   {
1537     // If GC is parallel, this might be called by several threads.
1538     // This should be rare enough that the locking overhead won't affect
1539     // the sequential code.
1540     MutexLocker x(parDictionaryAllocLock(),
1541                   Mutex::_no_safepoint_check_flag);
1542     fc = getChunkFromDictionary(size);
1543   }
1544   if (fc != NULL) {
1545     fc->dontCoalesce();
1546     assert(fc->is_free(), "Should be free, but not coalescable");
1547     // Verify that the block offset table shows this to
1548     // be a single block, but not one which is unallocated.
1549     _bt.verify_single_block((HeapWord*)fc, fc->size());
1550     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1551   }
1552   return fc;
1553 }
1554 
1555 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1556   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1557   assert_locked();
1558 
1559   // if we are tracking promotions, then first ensure space for
1560   // promotion (including spooling space for saving header if necessary).
1561   // then allocate and copy, then track promoted info if needed.
1562   // When tracking (see PromotionInfo::track()), the mark word may
1563   // be displaced and in this case restoration of the mark word
1564   // occurs in the (oop_since_save_marks_)iterate phase.
1565   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1566     return NULL;
1567   }
1568   // Call the allocate(size_t, bool) form directly to avoid the
1569   // additional call through the allocate(size_t) form.  Having
1570   // the compile inline the call is problematic because allocate(size_t)
1571   // is a virtual method.
1572   HeapWord* res = allocate(adjustObjectSize(obj_size));
1573   if (res != NULL) {
1574     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1575     // if we should be tracking promotions, do so.
1576     if (_promoInfo.tracking()) {
1577         _promoInfo.track((PromotedObject*)res);
1578     }
1579   }
1580   return oop(res);
1581 }
1582 
1583 HeapWord*
1584 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1585   assert_locked();
1586   assert(size >= MinChunkSize, "minimum chunk size");
1587   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1588     "maximum from smallLinearAllocBlock");
1589   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1590 }
1591 
1592 HeapWord*
1593 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1594                                                        size_t size) {
1595   assert_locked();
1596   assert(size >= MinChunkSize, "too small");
1597   HeapWord* res = NULL;
1598   // Try to do linear allocation from blk, making sure that
1599   if (blk->_word_size == 0) {
1600     // We have probably been unable to fill this either in the prologue or
1601     // when it was exhausted at the last linear allocation. Bail out until
1602     // next time.
1603     assert(blk->_ptr == NULL, "consistency check");
1604     return NULL;
1605   }
1606   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1607   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1608   if (res != NULL) return res;
1609 
1610   // about to exhaust this linear allocation block
1611   if (blk->_word_size == size) { // exactly satisfied
1612     res = blk->_ptr;
1613     _bt.allocated(res, blk->_word_size);
1614   } else if (size + MinChunkSize <= blk->_refillSize) {
1615     size_t sz = blk->_word_size;
1616     // Update _unallocated_block if the size is such that chunk would be
1617     // returned to the indexed free list.  All other chunks in the indexed
1618     // free lists are allocated from the dictionary so that _unallocated_block
1619     // has already been adjusted for them.  Do it here so that the cost
1620     // for all chunks added back to the indexed free lists.
1621     if (sz < SmallForDictionary) {
1622       _bt.allocated(blk->_ptr, sz);
1623     }
1624     // Return the chunk that isn't big enough, and then refill below.
1625     addChunkToFreeLists(blk->_ptr, sz);
1626     split_birth(sz);
1627     // Don't keep statistics on adding back chunk from a LinAB.
1628   } else {
1629     // A refilled block would not satisfy the request.
1630     return NULL;
1631   }
1632 
1633   blk->_ptr = NULL; blk->_word_size = 0;
1634   refillLinearAllocBlock(blk);
1635   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1636          "block was replenished");
1637   if (res != NULL) {
1638     split_birth(size);
1639     repairLinearAllocBlock(blk);
1640   } else if (blk->_ptr != NULL) {
1641     res = blk->_ptr;
1642     size_t blk_size = blk->_word_size;
1643     blk->_word_size -= size;
1644     blk->_ptr  += size;
1645     split_birth(size);
1646     repairLinearAllocBlock(blk);
1647     // Update BOT last so that other (parallel) GC threads see a consistent
1648     // view of the BOT and free blocks.
1649     // Above must occur before BOT is updated below.
1650     OrderAccess::storestore();
1651     _bt.split_block(res, blk_size, size);  // adjust block offset table
1652   }
1653   return res;
1654 }
1655 
1656 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1657                                         LinearAllocBlock* blk,
1658                                         size_t size) {
1659   assert_locked();
1660   assert(size >= MinChunkSize, "too small");
1661 
1662   HeapWord* res = NULL;
1663   // This is the common case.  Keep it simple.
1664   if (blk->_word_size >= size + MinChunkSize) {
1665     assert(blk->_ptr != NULL, "consistency check");
1666     res = blk->_ptr;
1667     // Note that the BOT is up-to-date for the linAB before allocation.  It
1668     // indicates the start of the linAB.  The split_block() updates the
1669     // BOT for the linAB after the allocation (indicates the start of the
1670     // next chunk to be allocated).
1671     size_t blk_size = blk->_word_size;
1672     blk->_word_size -= size;
1673     blk->_ptr  += size;
1674     split_birth(size);
1675     repairLinearAllocBlock(blk);
1676     // Update BOT last so that other (parallel) GC threads see a consistent
1677     // view of the BOT and free blocks.
1678     // Above must occur before BOT is updated below.
1679     OrderAccess::storestore();
1680     _bt.split_block(res, blk_size, size);  // adjust block offset table
1681     _bt.allocated(res, size);
1682   }
1683   return res;
1684 }
1685 
1686 FreeChunk*
1687 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1688   assert_locked();
1689   assert(size < SmallForDictionary, "just checking");
1690   FreeChunk* res;
1691   res = _indexedFreeList[size].get_chunk_at_head();
1692   if (res == NULL) {
1693     res = getChunkFromIndexedFreeListHelper(size);
1694   }
1695   _bt.verify_not_unallocated((HeapWord*) res, size);
1696   assert(res == NULL || res->size() == size, "Incorrect block size");
1697   return res;
1698 }
1699 
1700 FreeChunk*
1701 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1702   bool replenish) {
1703   assert_locked();
1704   FreeChunk* fc = NULL;
1705   if (size < SmallForDictionary) {
1706     assert(_indexedFreeList[size].head() == NULL ||
1707       _indexedFreeList[size].surplus() <= 0,
1708       "List for this size should be empty or under populated");
1709     // Try best fit in exact lists before replenishing the list
1710     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1711       // Replenish list.
1712       //
1713       // Things tried that failed.
1714       //   Tried allocating out of the two LinAB's first before
1715       // replenishing lists.
1716       //   Tried small linAB of size 256 (size in indexed list)
1717       // and replenishing indexed lists from the small linAB.
1718       //
1719       FreeChunk* newFc = NULL;
1720       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1721       if (replenish_size < SmallForDictionary) {
1722         // Do not replenish from an underpopulated size.
1723         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1724             _indexedFreeList[replenish_size].head() != NULL) {
1725           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1726         } else if (bestFitFirst()) {
1727           newFc = bestFitSmall(replenish_size);
1728         }
1729       }
1730       if (newFc == NULL && replenish_size > size) {
1731         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1732         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1733       }
1734       // Note: The stats update re split-death of block obtained above
1735       // will be recorded below precisely when we know we are going to
1736       // be actually splitting it into more than one pieces below.
1737       if (newFc != NULL) {
1738         if  (replenish || CMSReplenishIntermediate) {
1739           // Replenish this list and return one block to caller.
1740           size_t i;
1741           FreeChunk *curFc, *nextFc;
1742           size_t num_blk = newFc->size() / size;
1743           assert(num_blk >= 1, "Smaller than requested?");
1744           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1745           if (num_blk > 1) {
1746             // we are sure we will be splitting the block just obtained
1747             // into multiple pieces; record the split-death of the original
1748             splitDeath(replenish_size);
1749           }
1750           // carve up and link blocks 0, ..., num_blk - 2
1751           // The last chunk is not added to the lists but is returned as the
1752           // free chunk.
1753           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1754                i = 0;
1755                i < (num_blk - 1);
1756                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1757                i++) {
1758             curFc->set_size(size);
1759             // Don't record this as a return in order to try and
1760             // determine the "returns" from a GC.
1761             _bt.verify_not_unallocated((HeapWord*) fc, size);
1762             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1763             _bt.mark_block((HeapWord*)curFc, size);
1764             split_birth(size);
1765             // Don't record the initial population of the indexed list
1766             // as a split birth.
1767           }
1768 
1769           // check that the arithmetic was OK above
1770           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1771             "inconsistency in carving newFc");
1772           curFc->set_size(size);
1773           _bt.mark_block((HeapWord*)curFc, size);
1774           split_birth(size);
1775           fc = curFc;
1776         } else {
1777           // Return entire block to caller
1778           fc = newFc;
1779         }
1780       }
1781     }
1782   } else {
1783     // Get a free chunk from the free chunk dictionary to be returned to
1784     // replenish the indexed free list.
1785     fc = getChunkFromDictionaryExact(size);
1786   }
1787   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1788   return fc;
1789 }
1790 
1791 FreeChunk*
1792 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1793   assert_locked();
1794   FreeChunk* fc = _dictionary->get_chunk(size);
1795   if (fc == NULL) {
1796     return NULL;
1797   }
1798   _bt.allocated((HeapWord*)fc, fc->size());
1799   if (fc->size() >= size + MinChunkSize) {
1800     fc = splitChunkAndReturnRemainder(fc, size);
1801   }
1802   assert(fc->size() >= size, "chunk too small");
1803   assert(fc->size() < size + MinChunkSize, "chunk too big");
1804   _bt.verify_single_block((HeapWord*)fc, fc->size());
1805   return fc;
1806 }
1807 
1808 FreeChunk*
1809 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1810   assert_locked();
1811   FreeChunk* fc = _dictionary->get_chunk(size);
1812   if (fc == NULL) {
1813     return fc;
1814   }
1815   _bt.allocated((HeapWord*)fc, fc->size());
1816   if (fc->size() == size) {
1817     _bt.verify_single_block((HeapWord*)fc, size);
1818     return fc;
1819   }
1820   assert(fc->size() > size, "get_chunk() guarantee");
1821   if (fc->size() < size + MinChunkSize) {
1822     // Return the chunk to the dictionary and go get a bigger one.
1823     returnChunkToDictionary(fc);
1824     fc = _dictionary->get_chunk(size + MinChunkSize);
1825     if (fc == NULL) {
1826       return NULL;
1827     }
1828     _bt.allocated((HeapWord*)fc, fc->size());
1829   }
1830   assert(fc->size() >= size + MinChunkSize, "tautology");
1831   fc = splitChunkAndReturnRemainder(fc, size);
1832   assert(fc->size() == size, "chunk is wrong size");
1833   _bt.verify_single_block((HeapWord*)fc, size);
1834   return fc;
1835 }
1836 
1837 void
1838 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1839   assert_locked();
1840 
1841   size_t size = chunk->size();
1842   _bt.verify_single_block((HeapWord*)chunk, size);
1843   // adjust _unallocated_block downward, as necessary
1844   _bt.freed((HeapWord*)chunk, size);
1845   _dictionary->return_chunk(chunk);
1846 #ifndef PRODUCT
1847   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1848     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1849     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1850     tl->verify_stats();
1851   }
1852 #endif // PRODUCT
1853 }
1854 
1855 void
1856 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1857   assert_locked();
1858   size_t size = fc->size();
1859   _bt.verify_single_block((HeapWord*) fc, size);
1860   _bt.verify_not_unallocated((HeapWord*) fc, size);
1861   _indexedFreeList[size].return_chunk_at_tail(fc);
1862 #ifndef PRODUCT
1863   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1864      _indexedFreeList[size].verify_stats();
1865   }
1866 #endif // PRODUCT
1867 }
1868 
1869 // Add chunk to end of last block -- if it's the largest
1870 // block -- and update BOT and census data. We would
1871 // of course have preferred to coalesce it with the
1872 // last block, but it's currently less expensive to find the
1873 // largest block than it is to find the last.
1874 void
1875 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1876   HeapWord* chunk, size_t     size) {
1877   // check that the chunk does lie in this space!
1878   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1879   // One of the parallel gc task threads may be here
1880   // whilst others are allocating.
1881   Mutex* lock = &_parDictionaryAllocLock;
1882   FreeChunk* ec;
1883   {
1884     MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1885     ec = dictionary()->find_largest_dict();  // get largest block
1886     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1887       // It's a coterminal block - we can coalesce.
1888       size_t old_size = ec->size();
1889       coalDeath(old_size);
1890       removeChunkFromDictionary(ec);
1891       size += old_size;
1892     } else {
1893       ec = (FreeChunk*)chunk;
1894     }
1895   }
1896   ec->set_size(size);
1897   debug_only(ec->mangleFreed(size));
1898   if (size < SmallForDictionary) {
1899     lock = _indexedFreeListParLocks[size];
1900   }
1901   MutexLocker x(lock, Mutex::_no_safepoint_check_flag);
1902   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1903   // record the birth under the lock since the recording involves
1904   // manipulation of the list on which the chunk lives and
1905   // if the chunk is allocated and is the last on the list,
1906   // the list can go away.
1907   coalBirth(size);
1908 }
1909 
1910 void
1911 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1912                                               size_t     size) {
1913   // check that the chunk does lie in this space!
1914   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1915   assert_locked();
1916   _bt.verify_single_block(chunk, size);
1917 
1918   FreeChunk* fc = (FreeChunk*) chunk;
1919   fc->set_size(size);
1920   debug_only(fc->mangleFreed(size));
1921   if (size < SmallForDictionary) {
1922     returnChunkToFreeList(fc);
1923   } else {
1924     returnChunkToDictionary(fc);
1925   }
1926 }
1927 
1928 void
1929 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1930   size_t size, bool coalesced) {
1931   assert_locked();
1932   assert(chunk != NULL, "null chunk");
1933   if (coalesced) {
1934     // repair BOT
1935     _bt.single_block(chunk, size);
1936   }
1937   addChunkToFreeLists(chunk, size);
1938 }
1939 
1940 // We _must_ find the purported chunk on our free lists;
1941 // we assert if we don't.
1942 void
1943 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1944   size_t size = fc->size();
1945   assert_locked();
1946   debug_only(verifyFreeLists());
1947   if (size < SmallForDictionary) {
1948     removeChunkFromIndexedFreeList(fc);
1949   } else {
1950     removeChunkFromDictionary(fc);
1951   }
1952   _bt.verify_single_block((HeapWord*)fc, size);
1953   debug_only(verifyFreeLists());
1954 }
1955 
1956 void
1957 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1958   size_t size = fc->size();
1959   assert_locked();
1960   assert(fc != NULL, "null chunk");
1961   _bt.verify_single_block((HeapWord*)fc, size);
1962   _dictionary->remove_chunk(fc);
1963   // adjust _unallocated_block upward, as necessary
1964   _bt.allocated((HeapWord*)fc, size);
1965 }
1966 
1967 void
1968 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1969   assert_locked();
1970   size_t size = fc->size();
1971   _bt.verify_single_block((HeapWord*)fc, size);
1972   NOT_PRODUCT(
1973     if (FLSVerifyIndexTable) {
1974       verifyIndexedFreeList(size);
1975     }
1976   )
1977   _indexedFreeList[size].remove_chunk(fc);
1978   NOT_PRODUCT(
1979     if (FLSVerifyIndexTable) {
1980       verifyIndexedFreeList(size);
1981     }
1982   )
1983 }
1984 
1985 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1986   /* A hint is the next larger size that has a surplus.
1987      Start search at a size large enough to guarantee that
1988      the excess is >= MIN_CHUNK. */
1989   size_t start = align_object_size(numWords + MinChunkSize);
1990   if (start < IndexSetSize) {
1991     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1992     size_t    hint = _indexedFreeList[start].hint();
1993     while (hint < IndexSetSize) {
1994       assert(is_object_aligned(hint), "hint should be aligned");
1995       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1996       if (fl->surplus() > 0 && fl->head() != NULL) {
1997         // Found a list with surplus, reset original hint
1998         // and split out a free chunk which is returned.
1999         _indexedFreeList[start].set_hint(hint);
2000         FreeChunk* res = getFromListGreater(fl, numWords);
2001         assert(res == NULL || res->is_free(),
2002           "Should be returning a free chunk");
2003         return res;
2004       }
2005       hint = fl->hint(); /* keep looking */
2006     }
2007     /* None found. */
2008     it[start].set_hint(IndexSetSize);
2009   }
2010   return NULL;
2011 }
2012 
2013 /* Requires fl->size >= numWords + MinChunkSize */
2014 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
2015   size_t numWords) {
2016   FreeChunk *curr = fl->head();
2017   size_t oldNumWords = curr->size();
2018   assert(numWords >= MinChunkSize, "Word size is too small");
2019   assert(curr != NULL, "List is empty");
2020   assert(oldNumWords >= numWords + MinChunkSize,
2021         "Size of chunks in the list is too small");
2022 
2023   fl->remove_chunk(curr);
2024   // recorded indirectly by splitChunkAndReturnRemainder -
2025   // smallSplit(oldNumWords, numWords);
2026   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
2027   // Does anything have to be done for the remainder in terms of
2028   // fixing the card table?
2029   assert(new_chunk == NULL || new_chunk->is_free(),
2030     "Should be returning a free chunk");
2031   return new_chunk;
2032 }
2033 
2034 FreeChunk*
2035 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
2036   size_t new_size) {
2037   assert_locked();
2038   size_t size = chunk->size();
2039   assert(size > new_size, "Split from a smaller block?");
2040   assert(is_aligned(chunk), "alignment problem");
2041   assert(size == adjustObjectSize(size), "alignment problem");
2042   size_t rem_sz = size - new_size;
2043   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
2044   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
2045   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
2046   assert(is_aligned(ffc), "alignment problem");
2047   ffc->set_size(rem_sz);
2048   ffc->link_next(NULL);
2049   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2050   // Above must occur before BOT is updated below.
2051   // adjust block offset table
2052   OrderAccess::storestore();
2053   assert(chunk->is_free() && ffc->is_free(), "Error");
2054   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
2055   if (rem_sz < SmallForDictionary) {
2056     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
2057     bool is_par = Thread::current()->is_GC_task_thread();
2058     if (is_par) _indexedFreeListParLocks[rem_sz]->lock_without_safepoint_check();
2059     returnChunkToFreeList(ffc);
2060     split(size, rem_sz);
2061     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
2062   } else {
2063     returnChunkToDictionary(ffc);
2064     split(size, rem_sz);
2065   }
2066   chunk->set_size(new_size);
2067   return chunk;
2068 }
2069 
2070 void
2071 CompactibleFreeListSpace::sweep_completed() {
2072   // Now that space is probably plentiful, refill linear
2073   // allocation blocks as needed.
2074   refillLinearAllocBlocksIfNeeded();
2075 }
2076 
2077 void
2078 CompactibleFreeListSpace::gc_prologue() {
2079   assert_locked();
2080   reportFreeListStatistics("Before GC:");
2081   refillLinearAllocBlocksIfNeeded();
2082 }
2083 
2084 void
2085 CompactibleFreeListSpace::gc_epilogue() {
2086   assert_locked();
2087   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2088   _promoInfo.stopTrackingPromotions();
2089   repairLinearAllocationBlocks();
2090   reportFreeListStatistics("After GC:");
2091 }
2092 
2093 // Iteration support, mostly delegated from a CMS generation
2094 
2095 void CompactibleFreeListSpace::save_marks() {
2096   assert(Thread::current()->is_VM_thread(),
2097          "Global variable should only be set when single-threaded");
2098   // Mark the "end" of the used space at the time of this call;
2099   // note, however, that promoted objects from this point
2100   // on are tracked in the _promoInfo below.
2101   set_saved_mark_word(unallocated_block());
2102 #ifdef ASSERT
2103   // Check the sanity of save_marks() etc.
2104   MemRegion ur    = used_region();
2105   MemRegion urasm = used_region_at_save_marks();
2106   assert(ur.contains(urasm),
2107          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2108          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2109          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
2110 #endif
2111   // inform allocator that promotions should be tracked.
2112   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2113   _promoInfo.startTrackingPromotions();
2114 }
2115 
2116 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2117   assert(_promoInfo.tracking(), "No preceding save_marks?");
2118   return _promoInfo.noPromotions();
2119 }
2120 
2121 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2122   return _smallLinearAllocBlock._word_size == 0;
2123 }
2124 
2125 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2126   // Fix up linear allocation blocks to look like free blocks
2127   repairLinearAllocBlock(&_smallLinearAllocBlock);
2128 }
2129 
2130 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2131   assert_locked();
2132   if (blk->_ptr != NULL) {
2133     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2134            "Minimum block size requirement");
2135     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2136     fc->set_size(blk->_word_size);
2137     fc->link_prev(NULL);   // mark as free
2138     fc->dontCoalesce();
2139     assert(fc->is_free(), "just marked it free");
2140     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2141   }
2142 }
2143 
2144 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2145   assert_locked();
2146   if (_smallLinearAllocBlock._ptr == NULL) {
2147     assert(_smallLinearAllocBlock._word_size == 0,
2148       "Size of linAB should be zero if the ptr is NULL");
2149     // Reset the linAB refill and allocation size limit.
2150     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2151   }
2152   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2153 }
2154 
2155 void
2156 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2157   assert_locked();
2158   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2159          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2160          "blk invariant");
2161   if (blk->_ptr == NULL) {
2162     refillLinearAllocBlock(blk);
2163   }
2164 }
2165 
2166 void
2167 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2168   assert_locked();
2169   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2170          "linear allocation block should be empty");
2171   FreeChunk* fc;
2172   if (blk->_refillSize < SmallForDictionary &&
2173       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2174     // A linAB's strategy might be to use small sizes to reduce
2175     // fragmentation but still get the benefits of allocation from a
2176     // linAB.
2177   } else {
2178     fc = getChunkFromDictionary(blk->_refillSize);
2179   }
2180   if (fc != NULL) {
2181     blk->_ptr  = (HeapWord*)fc;
2182     blk->_word_size = fc->size();
2183     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2184   }
2185 }
2186 
2187 // Support for compaction
2188 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2189   scan_and_forward(this, cp);
2190   // Prepare_for_compaction() uses the space between live objects
2191   // so that later phase can skip dead space quickly.  So verification
2192   // of the free lists doesn't work after.
2193 }
2194 
2195 void CompactibleFreeListSpace::adjust_pointers() {
2196   // In other versions of adjust_pointers(), a bail out
2197   // based on the amount of live data in the generation
2198   // (i.e., if 0, bail out) may be used.
2199   // Cannot test used() == 0 here because the free lists have already
2200   // been mangled by the compaction.
2201 
2202   scan_and_adjust_pointers(this);
2203   // See note about verification in prepare_for_compaction().
2204 }
2205 
2206 void CompactibleFreeListSpace::compact() {
2207   scan_and_compact(this);
2208 }
2209 
2210 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2211 // where fbs is free block sizes
2212 double CompactibleFreeListSpace::flsFrag() const {
2213   size_t itabFree = totalSizeInIndexedFreeLists();
2214   double frag = 0.0;
2215   size_t i;
2216 
2217   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2218     double sz  = i;
2219     frag      += _indexedFreeList[i].count() * (sz * sz);
2220   }
2221 
2222   double totFree = itabFree +
2223                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2224   if (totFree > 0) {
2225     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2226             (totFree * totFree));
2227     frag = (double)1.0  - frag;
2228   } else {
2229     assert(frag == 0.0, "Follows from totFree == 0");
2230   }
2231   return frag;
2232 }
2233 
2234 void CompactibleFreeListSpace::beginSweepFLCensus(
2235   float inter_sweep_current,
2236   float inter_sweep_estimate,
2237   float intra_sweep_estimate) {
2238   assert_locked();
2239   size_t i;
2240   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2241     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2242     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2243     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2244     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2245     fl->set_before_sweep(fl->count());
2246     fl->set_bfr_surp(fl->surplus());
2247   }
2248   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2249                                     inter_sweep_current,
2250                                     inter_sweep_estimate,
2251                                     intra_sweep_estimate);
2252 }
2253 
2254 void CompactibleFreeListSpace::setFLSurplus() {
2255   assert_locked();
2256   size_t i;
2257   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2258     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2259     fl->set_surplus(fl->count() -
2260                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2261   }
2262 }
2263 
2264 void CompactibleFreeListSpace::setFLHints() {
2265   assert_locked();
2266   size_t i;
2267   size_t h = IndexSetSize;
2268   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2269     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2270     fl->set_hint(h);
2271     if (fl->surplus() > 0) {
2272       h = i;
2273     }
2274   }
2275 }
2276 
2277 void CompactibleFreeListSpace::clearFLCensus() {
2278   assert_locked();
2279   size_t i;
2280   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2281     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2282     fl->set_prev_sweep(fl->count());
2283     fl->set_coal_births(0);
2284     fl->set_coal_deaths(0);
2285     fl->set_split_births(0);
2286     fl->set_split_deaths(0);
2287   }
2288 }
2289 
2290 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2291   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2292   setFLSurplus();
2293   setFLHints();
2294   printFLCensus(sweep_count);
2295   clearFLCensus();
2296   assert_locked();
2297   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2298 }
2299 
2300 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2301   if (size < SmallForDictionary) {
2302     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2303     return (fl->coal_desired() < 0) ||
2304            ((int)fl->count() > fl->coal_desired());
2305   } else {
2306     return dictionary()->coal_dict_over_populated(size);
2307   }
2308 }
2309 
2310 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2311   assert(size < SmallForDictionary, "Size too large for indexed list");
2312   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2313   fl->increment_coal_births();
2314   fl->increment_surplus();
2315 }
2316 
2317 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2318   assert(size < SmallForDictionary, "Size too large for indexed list");
2319   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2320   fl->increment_coal_deaths();
2321   fl->decrement_surplus();
2322 }
2323 
2324 void CompactibleFreeListSpace::coalBirth(size_t size) {
2325   if (size  < SmallForDictionary) {
2326     smallCoalBirth(size);
2327   } else {
2328     dictionary()->dict_census_update(size,
2329                                    false /* split */,
2330                                    true /* birth */);
2331   }
2332 }
2333 
2334 void CompactibleFreeListSpace::coalDeath(size_t size) {
2335   if(size  < SmallForDictionary) {
2336     smallCoalDeath(size);
2337   } else {
2338     dictionary()->dict_census_update(size,
2339                                    false /* split */,
2340                                    false /* birth */);
2341   }
2342 }
2343 
2344 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2345   assert(size < SmallForDictionary, "Size too large for indexed list");
2346   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2347   fl->increment_split_births();
2348   fl->increment_surplus();
2349 }
2350 
2351 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2352   assert(size < SmallForDictionary, "Size too large for indexed list");
2353   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2354   fl->increment_split_deaths();
2355   fl->decrement_surplus();
2356 }
2357 
2358 void CompactibleFreeListSpace::split_birth(size_t size) {
2359   if (size  < SmallForDictionary) {
2360     smallSplitBirth(size);
2361   } else {
2362     dictionary()->dict_census_update(size,
2363                                    true /* split */,
2364                                    true /* birth */);
2365   }
2366 }
2367 
2368 void CompactibleFreeListSpace::splitDeath(size_t size) {
2369   if (size  < SmallForDictionary) {
2370     smallSplitDeath(size);
2371   } else {
2372     dictionary()->dict_census_update(size,
2373                                    true /* split */,
2374                                    false /* birth */);
2375   }
2376 }
2377 
2378 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2379   size_t to2 = from - to1;
2380   splitDeath(from);
2381   split_birth(to1);
2382   split_birth(to2);
2383 }
2384 
2385 void CompactibleFreeListSpace::print() const {
2386   print_on(tty);
2387 }
2388 
2389 void CompactibleFreeListSpace::prepare_for_verify() {
2390   assert_locked();
2391   repairLinearAllocationBlocks();
2392   // Verify that the SpoolBlocks look like free blocks of
2393   // appropriate sizes... To be done ...
2394 }
2395 
2396 class VerifyAllBlksClosure: public BlkClosure {
2397  private:
2398   const CompactibleFreeListSpace* _sp;
2399   const MemRegion                 _span;
2400   HeapWord*                       _last_addr;
2401   size_t                          _last_size;
2402   bool                            _last_was_obj;
2403   bool                            _last_was_live;
2404 
2405  public:
2406   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2407     MemRegion span) :  _sp(sp), _span(span),
2408                        _last_addr(NULL), _last_size(0),
2409                        _last_was_obj(false), _last_was_live(false) { }
2410 
2411   virtual size_t do_blk(HeapWord* addr) {
2412     size_t res;
2413     bool   was_obj  = false;
2414     bool   was_live = false;
2415     if (_sp->block_is_obj(addr)) {
2416       was_obj = true;
2417       oop p = oop(addr);
2418       guarantee(oopDesc::is_oop(p), "Should be an oop");
2419       res = _sp->adjustObjectSize(p->size());
2420       if (_sp->obj_is_alive(addr)) {
2421         was_live = true;
2422         oopDesc::verify(p);
2423       }
2424     } else {
2425       FreeChunk* fc = (FreeChunk*)addr;
2426       res = fc->size();
2427       if (FLSVerifyLists && !fc->cantCoalesce()) {
2428         guarantee(_sp->verify_chunk_in_free_list(fc),
2429                   "Chunk should be on a free list");
2430       }
2431     }
2432     if (res == 0) {
2433       Log(gc, verify) log;
2434       log.error("Livelock: no rank reduction!");
2435       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2436                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2437         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2438         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2439       LogStream ls(log.error());
2440       _sp->print_on(&ls);
2441       guarantee(false, "Verification failed.");
2442     }
2443     _last_addr = addr;
2444     _last_size = res;
2445     _last_was_obj  = was_obj;
2446     _last_was_live = was_live;
2447     return res;
2448   }
2449 };
2450 
2451 class VerifyAllOopsClosure: public BasicOopIterateClosure {
2452  private:
2453   const CMSCollector*             _collector;
2454   const CompactibleFreeListSpace* _sp;
2455   const MemRegion                 _span;
2456   const bool                      _past_remark;
2457   const CMSBitMap*                _bit_map;
2458 
2459  protected:
2460   void do_oop(void* p, oop obj) {
2461     if (_span.contains(obj)) { // the interior oop points into CMS heap
2462       if (!_span.contains(p)) { // reference from outside CMS heap
2463         // Should be a valid object; the first disjunct below allows
2464         // us to sidestep an assertion in block_is_obj() that insists
2465         // that p be in _sp. Note that several generations (and spaces)
2466         // are spanned by _span (CMS heap) above.
2467         guarantee(!_sp->is_in_reserved(obj) ||
2468                   _sp->block_is_obj((HeapWord*)obj),
2469                   "Should be an object");
2470         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2471         oopDesc::verify(obj);
2472         if (_past_remark) {
2473           // Remark has been completed, the object should be marked
2474           _bit_map->isMarked((HeapWord*)obj);
2475         }
2476       } else { // reference within CMS heap
2477         if (_past_remark) {
2478           // Remark has been completed -- so the referent should have
2479           // been marked, if referring object is.
2480           if (_bit_map->isMarked(_collector->block_start(p))) {
2481             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2482           }
2483         }
2484       }
2485     } else if (_sp->is_in_reserved(p)) {
2486       // the reference is from FLS, and points out of FLS
2487       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2488       oopDesc::verify(obj);
2489     }
2490   }
2491 
2492   template <class T> void do_oop_work(T* p) {
2493     T heap_oop = RawAccess<>::oop_load(p);
2494     if (!CompressedOops::is_null(heap_oop)) {
2495       oop obj = CompressedOops::decode_not_null(heap_oop);
2496       do_oop(p, obj);
2497     }
2498   }
2499 
2500  public:
2501   VerifyAllOopsClosure(const CMSCollector* collector,
2502     const CompactibleFreeListSpace* sp, MemRegion span,
2503     bool past_remark, CMSBitMap* bit_map) :
2504     _collector(collector), _sp(sp), _span(span),
2505     _past_remark(past_remark), _bit_map(bit_map) { }
2506 
2507   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2508   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2509 };
2510 
2511 void CompactibleFreeListSpace::verify() const {
2512   assert_lock_strong(&_freelistLock);
2513   verify_objects_initialized();
2514   MemRegion span = _collector->_span;
2515   bool past_remark = (_collector->abstract_state() ==
2516                       CMSCollector::Sweeping);
2517 
2518   ResourceMark rm;
2519   HandleMark  hm;
2520 
2521   // Check integrity of CFL data structures
2522   _promoInfo.verify();
2523   _dictionary->verify();
2524   if (FLSVerifyIndexTable) {
2525     verifyIndexedFreeLists();
2526   }
2527   // Check integrity of all objects and free blocks in space
2528   {
2529     VerifyAllBlksClosure cl(this, span);
2530     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2531   }
2532   // Check that all references in the heap to FLS
2533   // are to valid objects in FLS or that references in
2534   // FLS are to valid objects elsewhere in the heap
2535   if (FLSVerifyAllHeapReferences)
2536   {
2537     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2538       _collector->markBitMap());
2539 
2540     // Iterate over all oops in the heap.
2541     CMSHeap::heap()->oop_iterate(&cl);
2542   }
2543 
2544   if (VerifyObjectStartArray) {
2545     // Verify the block offset table
2546     _bt.verify();
2547   }
2548 }
2549 
2550 #ifndef PRODUCT
2551 void CompactibleFreeListSpace::verifyFreeLists() const {
2552   if (FLSVerifyLists) {
2553     _dictionary->verify();
2554     verifyIndexedFreeLists();
2555   } else {
2556     if (FLSVerifyDictionary) {
2557       _dictionary->verify();
2558     }
2559     if (FLSVerifyIndexTable) {
2560       verifyIndexedFreeLists();
2561     }
2562   }
2563 }
2564 #endif
2565 
2566 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2567   size_t i = 0;
2568   for (; i < IndexSetStart; i++) {
2569     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2570   }
2571   for (; i < IndexSetSize; i++) {
2572     verifyIndexedFreeList(i);
2573   }
2574 }
2575 
2576 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2577   FreeChunk* fc   =  _indexedFreeList[size].head();
2578   FreeChunk* tail =  _indexedFreeList[size].tail();
2579   size_t    num = _indexedFreeList[size].count();
2580   size_t      n = 0;
2581   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2582             "Slot should have been empty");
2583   for (; fc != NULL; fc = fc->next(), n++) {
2584     guarantee(fc->size() == size, "Size inconsistency");
2585     guarantee(fc->is_free(), "!free?");
2586     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2587     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2588   }
2589   guarantee(n == num, "Incorrect count");
2590 }
2591 
2592 #ifndef PRODUCT
2593 void CompactibleFreeListSpace::check_free_list_consistency() const {
2594   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2595     "Some sizes can't be allocated without recourse to"
2596     " linear allocation buffers");
2597   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2598     "else MIN_TREE_CHUNK_SIZE is wrong");
2599   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2600   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2601 }
2602 #endif
2603 
2604 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2605   assert_lock_strong(&_freelistLock);
2606   LogTarget(Debug, gc, freelist, census) log;
2607   if (!log.is_enabled()) {
2608     return;
2609   }
2610   AdaptiveFreeList<FreeChunk> total;
2611   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2612   ResourceMark rm;
2613   LogStream ls(log);
2614   outputStream* out = &ls;
2615   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2616   size_t total_free = 0;
2617   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2618     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2619     total_free += fl->count() * fl->size();
2620     if (i % (40*IndexSetStride) == 0) {
2621       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2622     }
2623     fl->print_on(out);
2624     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2625     total.set_surplus(    total.surplus()     + fl->surplus()    );
2626     total.set_desired(    total.desired()     + fl->desired()    );
2627     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2628     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2629     total.set_count(      total.count()       + fl->count()      );
2630     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2631     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2632     total.set_split_births(total.split_births() + fl->split_births());
2633     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2634   }
2635   total.print_on(out, "TOTAL");
2636   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2637   log.print("growth: %8.5f  deficit: %8.5f",
2638             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2639                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2640             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2641   _dictionary->print_dict_census(out);
2642 }
2643 
2644 ///////////////////////////////////////////////////////////////////////////
2645 // CompactibleFreeListSpaceLAB
2646 ///////////////////////////////////////////////////////////////////////////
2647 
2648 #define VECTOR_257(x)                                                                                  \
2649   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2650   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2651      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2652      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2653      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2654      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2655      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2656      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2657      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2658      x }
2659 
2660 // Initialize with default setting for CMS, _not_
2661 // generic OldPLABSize, whose static default is different; if overridden at the
2662 // command-line, this will get reinitialized via a call to
2663 // modify_initialization() below.
2664 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2665   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2666 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2667 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2668 
2669 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2670   _cfls(cfls)
2671 {
2672   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2673   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2674        i < CompactibleFreeListSpace::IndexSetSize;
2675        i += CompactibleFreeListSpace::IndexSetStride) {
2676     _indexedFreeList[i].set_size(i);
2677     _num_blocks[i] = 0;
2678   }
2679 }
2680 
2681 static bool _CFLS_LAB_modified = false;
2682 
2683 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2684   assert(!_CFLS_LAB_modified, "Call only once");
2685   _CFLS_LAB_modified = true;
2686   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2687        i < CompactibleFreeListSpace::IndexSetSize;
2688        i += CompactibleFreeListSpace::IndexSetStride) {
2689     _blocks_to_claim[i].modify(n, wt, true /* force */);
2690   }
2691 }
2692 
2693 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2694   FreeChunk* res;
2695   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2696   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2697     // This locking manages sync with other large object allocations.
2698     MutexLocker x(_cfls->parDictionaryAllocLock(),
2699                   Mutex::_no_safepoint_check_flag);
2700     res = _cfls->getChunkFromDictionaryExact(word_sz);
2701     if (res == NULL) return NULL;
2702   } else {
2703     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2704     if (fl->count() == 0) {
2705       // Attempt to refill this local free list.
2706       get_from_global_pool(word_sz, fl);
2707       // If it didn't work, give up.
2708       if (fl->count() == 0) return NULL;
2709     }
2710     res = fl->get_chunk_at_head();
2711     assert(res != NULL, "Why was count non-zero?");
2712   }
2713   res->markNotFree();
2714   assert(!res->is_free(), "shouldn't be marked free");
2715   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2716   // mangle a just allocated object with a distinct pattern.
2717   debug_only(res->mangleAllocated(word_sz));
2718   return (HeapWord*)res;
2719 }
2720 
2721 // Get a chunk of blocks of the right size and update related
2722 // book-keeping stats
2723 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2724   // Get the #blocks we want to claim
2725   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2726   assert(n_blks > 0, "Error");
2727   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2728   // In some cases, when the application has a phase change,
2729   // there may be a sudden and sharp shift in the object survival
2730   // profile, and updating the counts at the end of a scavenge
2731   // may not be quick enough, giving rise to large scavenge pauses
2732   // during these phase changes. It is beneficial to detect such
2733   // changes on-the-fly during a scavenge and avoid such a phase-change
2734   // pothole. The following code is a heuristic attempt to do that.
2735   // It is protected by a product flag until we have gained
2736   // enough experience with this heuristic and fine-tuned its behavior.
2737   // WARNING: This might increase fragmentation if we overreact to
2738   // small spikes, so some kind of historical smoothing based on
2739   // previous experience with the greater reactivity might be useful.
2740   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2741   // default.
2742   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2743     //
2744     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2745     // which is why there is a cast to double.
2746     //
2747     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2748     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2749     n_blks = MIN2(n_blks, CMSOldPLABMax);
2750   }
2751   assert(n_blks > 0, "Error");
2752   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2753   // Update stats table entry for this block size
2754   _num_blocks[word_sz] += fl->count();
2755 }
2756 
2757 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2758   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2759        i < CompactibleFreeListSpace::IndexSetSize;
2760        i += CompactibleFreeListSpace::IndexSetStride) {
2761     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2762            "Counter inconsistency");
2763     if (_global_num_workers[i] > 0) {
2764       // Need to smooth wrt historical average
2765       if (ResizeOldPLAB) {
2766         _blocks_to_claim[i].sample(
2767           MAX2(CMSOldPLABMin,
2768           MIN2(CMSOldPLABMax,
2769                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2770       }
2771       // Reset counters for next round
2772       _global_num_workers[i] = 0;
2773       _global_num_blocks[i] = 0;
2774       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2775     }
2776   }
2777 }
2778 
2779 // If this is changed in the future to allow parallel
2780 // access, one would need to take the FL locks and,
2781 // depending on how it is used, stagger access from
2782 // parallel threads to reduce contention.
2783 void CompactibleFreeListSpaceLAB::retire(int tid) {
2784   // We run this single threaded with the world stopped;
2785   // so no need for locks and such.
2786   NOT_PRODUCT(Thread* t = Thread::current();)
2787   assert(Thread::current()->is_VM_thread(), "Error");
2788   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2789        i < CompactibleFreeListSpace::IndexSetSize;
2790        i += CompactibleFreeListSpace::IndexSetStride) {
2791     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2792            "Can't retire more than what we obtained");
2793     if (_num_blocks[i] > 0) {
2794       size_t num_retire =  _indexedFreeList[i].count();
2795       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2796       {
2797         // MutexLocker x(_cfls->_indexedFreeListParLocks[i],
2798         //               Mutex::_no_safepoint_check_flag);
2799 
2800         // Update globals stats for num_blocks used
2801         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2802         _global_num_workers[i]++;
2803         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2804         if (num_retire > 0) {
2805           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2806           // Reset this list.
2807           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2808           _indexedFreeList[i].set_size(i);
2809         }
2810       }
2811       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2812                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2813       // Reset stats for next round
2814       _num_blocks[i]         = 0;
2815     }
2816   }
2817 }
2818 
2819 // Used by par_get_chunk_of_blocks() for the chunks from the
2820 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2821 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2822 // to the free list "fl".  "n" is the maximum number of chunks to
2823 // be added to "fl".
2824 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2825 
2826   // We'll try all multiples of word_sz in the indexed set, starting with
2827   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2828   // then try getting a big chunk and splitting it.
2829   {
2830     bool found;
2831     int  k;
2832     size_t cur_sz;
2833     for (k = 1, cur_sz = k * word_sz, found = false;
2834          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2835          (CMSSplitIndexedFreeListBlocks || k <= 1);
2836          k++, cur_sz = k * word_sz) {
2837       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2838       fl_for_cur_sz.set_size(cur_sz);
2839       {
2840         MutexLocker x(_indexedFreeListParLocks[cur_sz],
2841                       Mutex::_no_safepoint_check_flag);
2842         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2843         if (gfl->count() != 0) {
2844           // nn is the number of chunks of size cur_sz that
2845           // we'd need to split k-ways each, in order to create
2846           // "n" chunks of size word_sz each.
2847           const size_t nn = MAX2(n/k, (size_t)1);
2848           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2849           found = true;
2850           if (k > 1) {
2851             // Update split death stats for the cur_sz-size blocks list:
2852             // we increment the split death count by the number of blocks
2853             // we just took from the cur_sz-size blocks list and which
2854             // we will be splitting below.
2855             ssize_t deaths = gfl->split_deaths() +
2856                              fl_for_cur_sz.count();
2857             gfl->set_split_deaths(deaths);
2858           }
2859         }
2860       }
2861       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2862       if (found) {
2863         if (k == 1) {
2864           fl->prepend(&fl_for_cur_sz);
2865         } else {
2866           // Divide each block on fl_for_cur_sz up k ways.
2867           FreeChunk* fc;
2868           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2869             // Must do this in reverse order, so that anybody attempting to
2870             // access the main chunk sees it as a single free block until we
2871             // change it.
2872             size_t fc_size = fc->size();
2873             assert(fc->is_free(), "Error");
2874             for (int i = k-1; i >= 0; i--) {
2875               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2876               assert((i != 0) ||
2877                         ((fc == ffc) && ffc->is_free() &&
2878                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2879                         "Counting error");
2880               ffc->set_size(word_sz);
2881               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2882               ffc->link_next(NULL);
2883               // Above must occur before BOT is updated below.
2884               OrderAccess::storestore();
2885               // splitting from the right, fc_size == i * word_sz
2886               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2887               fc_size -= word_sz;
2888               assert(fc_size == i*word_sz, "Error");
2889               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2890               _bt.verify_single_block((HeapWord*)fc, fc_size);
2891               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2892               // Push this on "fl".
2893               fl->return_chunk_at_head(ffc);
2894             }
2895             // TRAP
2896             assert(fl->tail()->next() == NULL, "List invariant.");
2897           }
2898         }
2899         // Update birth stats for this block size.
2900         size_t num = fl->count();
2901         MutexLocker x(_indexedFreeListParLocks[word_sz],
2902                       Mutex::_no_safepoint_check_flag);
2903         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2904         _indexedFreeList[word_sz].set_split_births(births);
2905         return true;
2906       }
2907     }
2908     return found;
2909   }
2910 }
2911 
2912 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2913 
2914   FreeChunk* fc = NULL;
2915   FreeChunk* rem_fc = NULL;
2916   size_t rem;
2917   {
2918     MutexLocker x(parDictionaryAllocLock(),
2919                   Mutex::_no_safepoint_check_flag);
2920     while (n > 0) {
2921       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2922       if (fc != NULL) {
2923         break;
2924       } else {
2925         n--;
2926       }
2927     }
2928     if (fc == NULL) return NULL;
2929     // Otherwise, split up that block.
2930     assert((ssize_t)n >= 1, "Control point invariant");
2931     assert(fc->is_free(), "Error: should be a free block");
2932     _bt.verify_single_block((HeapWord*)fc, fc->size());
2933     const size_t nn = fc->size() / word_sz;
2934     n = MIN2(nn, n);
2935     assert((ssize_t)n >= 1, "Control point invariant");
2936     rem = fc->size() - n * word_sz;
2937     // If there is a remainder, and it's too small, allocate one fewer.
2938     if (rem > 0 && rem < MinChunkSize) {
2939       n--; rem += word_sz;
2940     }
2941     // Note that at this point we may have n == 0.
2942     assert((ssize_t)n >= 0, "Control point invariant");
2943 
2944     // If n is 0, the chunk fc that was found is not large
2945     // enough to leave a viable remainder.  We are unable to
2946     // allocate even one block.  Return fc to the
2947     // dictionary and return, leaving "fl" empty.
2948     if (n == 0) {
2949       returnChunkToDictionary(fc);
2950       return NULL;
2951     }
2952 
2953     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2954     dictionary()->dict_census_update(fc->size(),
2955                                      true /*split*/,
2956                                      false /*birth*/);
2957 
2958     // First return the remainder, if any.
2959     // Note that we hold the lock until we decide if we're going to give
2960     // back the remainder to the dictionary, since a concurrent allocation
2961     // may otherwise see the heap as empty.  (We're willing to take that
2962     // hit if the block is a small block.)
2963     if (rem > 0) {
2964       size_t prefix_size = n * word_sz;
2965       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2966       rem_fc->set_size(rem);
2967       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2968       rem_fc->link_next(NULL);
2969       // Above must occur before BOT is updated below.
2970       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2971       OrderAccess::storestore();
2972       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2973       assert(fc->is_free(), "Error");
2974       fc->set_size(prefix_size);
2975       if (rem >= IndexSetSize) {
2976         returnChunkToDictionary(rem_fc);
2977         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2978         rem_fc = NULL;
2979       }
2980       // Otherwise, return it to the small list below.
2981     }
2982   }
2983   if (rem_fc != NULL) {
2984     MutexLocker x(_indexedFreeListParLocks[rem],
2985                   Mutex::_no_safepoint_check_flag);
2986     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2987     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2988     smallSplitBirth(rem);
2989   }
2990   assert(n * word_sz == fc->size(),
2991          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2992          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2993          fc->size(), n, word_sz);
2994   return fc;
2995 }
2996 
2997 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2998 
2999   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
3000 
3001   if (fc == NULL) {
3002     return;
3003   }
3004 
3005   size_t n = fc->size() / word_sz;
3006 
3007   assert((ssize_t)n > 0, "Consistency");
3008   // Now do the splitting up.
3009   // Must do this in reverse order, so that anybody attempting to
3010   // access the main chunk sees it as a single free block until we
3011   // change it.
3012   size_t fc_size = n * word_sz;
3013   // All but first chunk in this loop
3014   for (ssize_t i = n-1; i > 0; i--) {
3015     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
3016     ffc->set_size(word_sz);
3017     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
3018     ffc->link_next(NULL);
3019     // Above must occur before BOT is updated below.
3020     OrderAccess::storestore();
3021     // splitting from the right, fc_size == (n - i + 1) * wordsize
3022     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
3023     fc_size -= word_sz;
3024     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
3025     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
3026     _bt.verify_single_block((HeapWord*)fc, fc_size);
3027     // Push this on "fl".
3028     fl->return_chunk_at_head(ffc);
3029   }
3030   // First chunk
3031   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
3032   // The blocks above should show their new sizes before the first block below
3033   fc->set_size(word_sz);
3034   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
3035   fc->link_next(NULL);
3036   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
3037   _bt.verify_single_block((HeapWord*)fc, fc->size());
3038   fl->return_chunk_at_head(fc);
3039 
3040   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
3041   {
3042     // Update the stats for this block size.
3043     MutexLocker x(_indexedFreeListParLocks[word_sz],
3044                   Mutex::_no_safepoint_check_flag);
3045     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
3046     _indexedFreeList[word_sz].set_split_births(births);
3047     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
3048     // _indexedFreeList[word_sz].set_surplus(new_surplus);
3049   }
3050 
3051   // TRAP
3052   assert(fl->tail()->next() == NULL, "List invariant.");
3053 }
3054 
3055 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
3056   assert(fl->count() == 0, "Precondition.");
3057   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
3058          "Precondition");
3059 
3060   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
3061     // Got it
3062     return;
3063   }
3064 
3065   // Otherwise, we'll split a block from the dictionary.
3066   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
3067 }
3068 
3069 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
3070   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
3071   return ergo_max;
3072 }
3073 
3074 // Set up the space's par_seq_tasks structure for work claiming
3075 // for parallel rescan. See CMSParRemarkTask where this is currently used.
3076 // XXX Need to suitably abstract and generalize this and the next
3077 // method into one.
3078 void
3079 CompactibleFreeListSpace::
3080 initialize_sequential_subtasks_for_rescan(int n_threads) {
3081   // The "size" of each task is fixed according to rescan_task_size.
3082   assert(n_threads > 0, "Unexpected n_threads argument");
3083   const size_t task_size = rescan_task_size();
3084   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
3085   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
3086   assert(n_tasks == 0 ||
3087          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3088           (used_region().start() + n_tasks*task_size >= used_region().end())),
3089          "n_tasks calculation incorrect");
3090   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3091   assert(!pst->valid(), "Clobbering existing data?");
3092   // Sets the condition for completion of the subtask (how many threads
3093   // need to finish in order to be done).
3094   pst->set_n_threads(n_threads);
3095   pst->set_n_tasks((int)n_tasks);
3096 }
3097 
3098 // Set up the space's par_seq_tasks structure for work claiming
3099 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3100 void
3101 CompactibleFreeListSpace::
3102 initialize_sequential_subtasks_for_marking(int n_threads,
3103                                            HeapWord* low) {
3104   // The "size" of each task is fixed according to rescan_task_size.
3105   assert(n_threads > 0, "Unexpected n_threads argument");
3106   const size_t task_size = marking_task_size();
3107   assert(task_size > CardTable::card_size_in_words &&
3108          (task_size %  CardTable::card_size_in_words == 0),
3109          "Otherwise arithmetic below would be incorrect");
3110   MemRegion span = _old_gen->reserved();
3111   if (low != NULL) {
3112     if (span.contains(low)) {
3113       // Align low down to  a card boundary so that
3114       // we can use block_offset_careful() on span boundaries.
3115       HeapWord* aligned_low = align_down(low, CardTable::card_size);
3116       // Clip span prefix at aligned_low
3117       span = span.intersection(MemRegion(aligned_low, span.end()));
3118     } else if (low > span.end()) {
3119       span = MemRegion(low, low);  // Null region
3120     } // else use entire span
3121   }
3122   assert(span.is_empty() ||
3123          ((uintptr_t)span.start() %  CardTable::card_size == 0),
3124         "span should start at a card boundary");
3125   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3126   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3127   assert(n_tasks == 0 ||
3128          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3129           (span.start() + n_tasks*task_size >= span.end())),
3130          "n_tasks calculation incorrect");
3131   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3132   assert(!pst->valid(), "Clobbering existing data?");
3133   // Sets the condition for completion of the subtask (how many threads
3134   // need to finish in order to be done).
3135   pst->set_n_threads(n_threads);
3136   pst->set_n_tasks((int)n_tasks);
3137 }