1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/binaryTreeDictionary.inline.hpp"
  39 #include "memory/resourceArea.hpp"
  40 #include "memory/universe.hpp"
  41 #include "oops/access.inline.hpp"
  42 #include "oops/compressedOops.inline.hpp"
  43 #include "oops/oop.inline.hpp"
  44 #include "runtime/globals.hpp"
  45 #include "runtime/handles.inline.hpp"
  46 #include "runtime/init.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/orderAccess.hpp"
  49 #include "runtime/vmThread.hpp"
  50 #include "utilities/align.hpp"
  51 #include "utilities/copy.hpp"
  52 
  53 // Specialize for AdaptiveFreeList which tries to avoid
  54 // splitting a chunk of a size that is under populated in favor of
  55 // an over populated size.  The general get_better_list() just returns
  56 // the current list.
  57 template <>
  58 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >*
  59 TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >::get_better_list(
  60   BinaryTreeDictionary<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* dictionary) {
  61   // A candidate chunk has been found.  If it is already under
  62   // populated, get a chunk associated with the hint for this
  63   // chunk.
  64 
  65   TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* curTL = this;
  66   if (curTL->surplus() <= 0) {
  67     /* Use the hint to find a size with a surplus, and reset the hint. */
  68     TreeList<FreeChunk, ::AdaptiveFreeList<FreeChunk> >* hintTL = this;
  69     while (hintTL->hint() != 0) {
  70       assert(hintTL->hint() > hintTL->size(),
  71         "hint points in the wrong direction");
  72       hintTL = dictionary->find_list(hintTL->hint());
  73       assert(curTL != hintTL, "Infinite loop");
  74       if (hintTL == NULL ||
  75           hintTL == curTL /* Should not happen but protect against it */ ) {
  76         // No useful hint.  Set the hint to NULL and go on.
  77         curTL->set_hint(0);
  78         break;
  79       }
  80       assert(hintTL->size() > curTL->size(), "hint is inconsistent");
  81       if (hintTL->surplus() > 0) {
  82         // The hint led to a list that has a surplus.  Use it.
  83         // Set the hint for the candidate to an overpopulated
  84         // size.
  85         curTL->set_hint(hintTL->size());
  86         // Change the candidate.
  87         curTL = hintTL;
  88         break;
  89       }
  90     }
  91   }
  92   return curTL;
  93 }
  94 
  95 void AFLBinaryTreeDictionary::dict_census_update(size_t size, bool split, bool birth) {
  96   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* nd = find_list(size);
  97   if (nd) {
  98     if (split) {
  99       if (birth) {
 100         nd->increment_split_births();
 101         nd->increment_surplus();
 102       }  else {
 103         nd->increment_split_deaths();
 104         nd->decrement_surplus();
 105       }
 106     } else {
 107       if (birth) {
 108         nd->increment_coal_births();
 109         nd->increment_surplus();
 110       } else {
 111         nd->increment_coal_deaths();
 112         nd->decrement_surplus();
 113       }
 114     }
 115   }
 116   // A list for this size may not be found (nd == 0) if
 117   //   This is a death where the appropriate list is now
 118   //     empty and has been removed from the list.
 119   //   This is a birth associated with a LinAB.  The chunk
 120   //     for the LinAB is not in the dictionary.
 121 }
 122 
 123 bool AFLBinaryTreeDictionary::coal_dict_over_populated(size_t size) {
 124   if (FLSAlwaysCoalesceLarge) return true;
 125 
 126   TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* list_of_size = find_list(size);
 127   // None of requested size implies overpopulated.
 128   return list_of_size == NULL || list_of_size->coal_desired() <= 0 ||
 129          list_of_size->count() > list_of_size->coal_desired();
 130 }
 131 
 132 // For each list in the tree, calculate the desired, desired
 133 // coalesce, count before sweep, and surplus before sweep.
 134 class BeginSweepClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 135   double _percentage;
 136   float _inter_sweep_current;
 137   float _inter_sweep_estimate;
 138   float _intra_sweep_estimate;
 139 
 140  public:
 141   BeginSweepClosure(double p, float inter_sweep_current,
 142                               float inter_sweep_estimate,
 143                               float intra_sweep_estimate) :
 144    _percentage(p),
 145    _inter_sweep_current(inter_sweep_current),
 146    _inter_sweep_estimate(inter_sweep_estimate),
 147    _intra_sweep_estimate(intra_sweep_estimate) { }
 148 
 149   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 150     double coalSurplusPercent = _percentage;
 151     fl->compute_desired(_inter_sweep_current, _inter_sweep_estimate, _intra_sweep_estimate);
 152     fl->set_coal_desired((ssize_t)((double)fl->desired() * coalSurplusPercent));
 153     fl->set_before_sweep(fl->count());
 154     fl->set_bfr_surp(fl->surplus());
 155   }
 156 };
 157 
 158 void AFLBinaryTreeDictionary::begin_sweep_dict_census(double coalSurplusPercent,
 159   float inter_sweep_current, float inter_sweep_estimate, float intra_sweep_estimate) {
 160   BeginSweepClosure bsc(coalSurplusPercent, inter_sweep_current,
 161                         inter_sweep_estimate,
 162                         intra_sweep_estimate);
 163   bsc.do_tree(root());
 164 }
 165 
 166 // Calculate surpluses for the lists in the tree.
 167 class setTreeSurplusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 168   double percentage;
 169  public:
 170   setTreeSurplusClosure(double v) { percentage = v; }
 171 
 172   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 173     double splitSurplusPercent = percentage;
 174     fl->set_surplus(fl->count() -
 175                    (ssize_t)((double)fl->desired() * splitSurplusPercent));
 176   }
 177 };
 178 
 179 void AFLBinaryTreeDictionary::set_tree_surplus(double splitSurplusPercent) {
 180   setTreeSurplusClosure sts(splitSurplusPercent);
 181   sts.do_tree(root());
 182 }
 183 
 184 // Set hints for the lists in the tree.
 185 class setTreeHintsClosure : public DescendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 186   size_t hint;
 187  public:
 188   setTreeHintsClosure(size_t v) { hint = v; }
 189 
 190   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 191     fl->set_hint(hint);
 192     assert(fl->hint() == 0 || fl->hint() > fl->size(),
 193       "Current hint is inconsistent");
 194     if (fl->surplus() > 0) {
 195       hint = fl->size();
 196     }
 197   }
 198 };
 199 
 200 void AFLBinaryTreeDictionary::set_tree_hints(void) {
 201   setTreeHintsClosure sth(0);
 202   sth.do_tree(root());
 203 }
 204 
 205 // Save count before previous sweep and splits and coalesces.
 206 class clearTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 207   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 208     fl->set_prev_sweep(fl->count());
 209     fl->set_coal_births(0);
 210     fl->set_coal_deaths(0);
 211     fl->set_split_births(0);
 212     fl->set_split_deaths(0);
 213   }
 214 };
 215 
 216 void AFLBinaryTreeDictionary::clear_tree_census(void) {
 217   clearTreeCensusClosure ctc;
 218   ctc.do_tree(root());
 219 }
 220 
 221 // Do reporting and post sweep clean up.
 222 void AFLBinaryTreeDictionary::end_sweep_dict_census(double splitSurplusPercent) {
 223   // Does walking the tree 3 times hurt?
 224   set_tree_surplus(splitSurplusPercent);
 225   set_tree_hints();
 226   LogTarget(Trace, gc, freelist, stats) log;
 227   if (log.is_enabled()) {
 228     LogStream out(log);
 229     report_statistics(&out);
 230   }
 231   clear_tree_census();
 232 }
 233 
 234 // Print census information - counts, births, deaths, etc.
 235 // for each list in the tree.  Also print some summary
 236 // information.
 237 class PrintTreeCensusClosure : public AscendTreeCensusClosure<FreeChunk, AdaptiveFreeList<FreeChunk> > {
 238   int _print_line;
 239   size_t _total_free;
 240   AdaptiveFreeList<FreeChunk> _total;
 241 
 242  public:
 243   PrintTreeCensusClosure() {
 244     _print_line = 0;
 245     _total_free = 0;
 246   }
 247   AdaptiveFreeList<FreeChunk>* total() { return &_total; }
 248   size_t total_free() { return _total_free; }
 249 
 250   void do_list(AdaptiveFreeList<FreeChunk>* fl) {
 251     LogStreamHandle(Debug, gc, freelist, census) out;
 252 
 253     if (++_print_line >= 40) {
 254       AdaptiveFreeList<FreeChunk>::print_labels_on(&out, "size");
 255       _print_line = 0;
 256     }
 257     fl->print_on(&out);
 258     _total_free +=           fl->count()             * fl->size()        ;
 259     total()->set_count(      total()->count()        + fl->count()      );
 260     total()->set_bfr_surp(   total()->bfr_surp()     + fl->bfr_surp()    );
 261     total()->set_surplus(    total()->split_deaths() + fl->surplus()    );
 262     total()->set_desired(    total()->desired()      + fl->desired()    );
 263     total()->set_prev_sweep(  total()->prev_sweep()   + fl->prev_sweep()  );
 264     total()->set_before_sweep(total()->before_sweep() + fl->before_sweep());
 265     total()->set_coal_births( total()->coal_births()  + fl->coal_births() );
 266     total()->set_coal_deaths( total()->coal_deaths()  + fl->coal_deaths() );
 267     total()->set_split_births(total()->split_births() + fl->split_births());
 268     total()->set_split_deaths(total()->split_deaths() + fl->split_deaths());
 269   }
 270 };
 271 
 272 void AFLBinaryTreeDictionary::print_dict_census(outputStream* st) const {
 273 
 274   st->print_cr("BinaryTree");
 275   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 276   PrintTreeCensusClosure ptc;
 277   ptc.do_tree(root());
 278 
 279   AdaptiveFreeList<FreeChunk>* total = ptc.total();
 280   AdaptiveFreeList<FreeChunk>::print_labels_on(st, " ");
 281   total->print_on(st, "TOTAL\t");
 282   st->print_cr("total_free(words): " SIZE_FORMAT_W(16) " growth: %8.5f  deficit: %8.5f",
 283                ptc.total_free(),
 284                (double)(total->split_births() + total->coal_births()
 285                       - total->split_deaths() - total->coal_deaths())
 286                /(total->prev_sweep() != 0 ? (double)total->prev_sweep() : 1.0),
 287               (double)(total->desired() - total->count())
 288               /(total->desired() != 0 ? (double)total->desired() : 1.0));
 289 }
 290 
 291 /////////////////////////////////////////////////////////////////////////
 292 //// CompactibleFreeListSpace
 293 /////////////////////////////////////////////////////////////////////////
 294 
 295 // highest ranked  free list lock rank
 296 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
 297 
 298 // Defaults are 0 so things will break badly if incorrectly initialized.
 299 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
 300 size_t CompactibleFreeListSpace::IndexSetStride = 0;
 301 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
 302 
 303 size_t MinChunkSize = 0;
 304 
 305 void CompactibleFreeListSpace::set_cms_values() {
 306   // Set CMS global values
 307   assert(MinChunkSize == 0, "already set");
 308 
 309   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
 310   // for chunks to contain a FreeChunk.
 311   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
 312   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
 313 
 314   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
 315   IndexSetStart  = MinChunkSize;
 316   IndexSetStride = MinObjAlignment;
 317 }
 318 
 319 // Constructor
 320 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
 321   _bt(bs, mr),
 322   // free list locks are in the range of values taken by _lockRank
 323   // This range currently is [_leaf+2, _leaf+3]
 324   // Note: this requires that CFLspace c'tors
 325   // are called serially in the order in which the locks are
 326   // are acquired in the program text. This is true today.
 327   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
 328                 Monitor::_safepoint_check_sometimes),
 329   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
 330                           "CompactibleFreeListSpace._dict_par_lock", true,
 331                           Monitor::_safepoint_check_never),
 332   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
 333                     CMSRescanMultiple),
 334   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
 335                     CMSConcMarkMultiple),
 336   _collector(NULL),
 337   _preconsumptionDirtyCardClosure(NULL)
 338 {
 339   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 340          "FreeChunk is larger than expected");
 341   _bt.set_space(this);
 342   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 343 
 344   _dictionary = new AFLBinaryTreeDictionary(mr);
 345 
 346   assert(_dictionary != NULL, "CMS dictionary initialization");
 347   // The indexed free lists are initially all empty and are lazily
 348   // filled in on demand. Initialize the array elements to NULL.
 349   initializeIndexedFreeListArray();
 350 
 351   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 352                              SmallForLinearAlloc);
 353 
 354   // CMSIndexedFreeListReplenish should be at least 1
 355   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 356   _promoInfo.setSpace(this);
 357   if (UseCMSBestFit) {
 358     _fitStrategy = FreeBlockBestFitFirst;
 359   } else {
 360     _fitStrategy = FreeBlockStrategyNone;
 361   }
 362   check_free_list_consistency();
 363 
 364   // Initialize locks for parallel case.
 365   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 366     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 367                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 368     DEBUG_ONLY(
 369       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 370     )
 371   }
 372   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 373 }
 374 
 375 // Like CompactibleSpace forward() but always calls cross_threshold() to
 376 // update the block offset table.  Removed initialize_threshold call because
 377 // CFLS does not use a block offset array for contiguous spaces.
 378 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 379                                     CompactPoint* cp, HeapWord* compact_top) {
 380   // q is alive
 381   // First check if we should switch compaction space
 382   assert(this == cp->space, "'this' should be current compaction space.");
 383   size_t compaction_max_size = pointer_delta(end(), compact_top);
 384   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 385     "virtual adjustObjectSize_v() method is not correct");
 386   size_t adjusted_size = adjustObjectSize(size);
 387   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 388          "no small fragments allowed");
 389   assert(minimum_free_block_size() == MinChunkSize,
 390          "for de-virtualized reference below");
 391   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 392   if (adjusted_size + MinChunkSize > compaction_max_size &&
 393       adjusted_size != compaction_max_size) {
 394     do {
 395       // switch to next compaction space
 396       cp->space->set_compaction_top(compact_top);
 397       cp->space = cp->space->next_compaction_space();
 398       if (cp->space == NULL) {
 399         cp->gen = CMSHeap::heap()->young_gen();
 400         assert(cp->gen != NULL, "compaction must succeed");
 401         cp->space = cp->gen->first_compaction_space();
 402         assert(cp->space != NULL, "generation must have a first compaction space");
 403       }
 404       compact_top = cp->space->bottom();
 405       cp->space->set_compaction_top(compact_top);
 406       // The correct adjusted_size may not be the same as that for this method
 407       // (i.e., cp->space may no longer be "this" so adjust the size again.
 408       // Use the virtual method which is not used above to save the virtual
 409       // dispatch.
 410       adjusted_size = cp->space->adjust_object_size_v(size);
 411       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 412       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 413     } while (adjusted_size > compaction_max_size);
 414   }
 415 
 416   // store the forwarding pointer into the mark word
 417   if ((HeapWord*)q != compact_top) {
 418     q->forward_to(oop(compact_top));
 419     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 420   } else {
 421     // if the object isn't moving we can just set the mark to the default
 422     // mark and handle it specially later on.
 423     q->init_mark_raw();
 424     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 425   }
 426 
 427   compact_top += adjusted_size;
 428 
 429   // we need to update the offset table so that the beginnings of objects can be
 430   // found during scavenge.  Note that we are updating the offset table based on
 431   // where the object will be once the compaction phase finishes.
 432 
 433   // Always call cross_threshold().  A contiguous space can only call it when
 434   // the compaction_top exceeds the current threshold but not for an
 435   // non-contiguous space.
 436   cp->threshold =
 437     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 438   return compact_top;
 439 }
 440 
 441 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 442 // and use of single_block instead of alloc_block.  The name here is not really
 443 // appropriate - maybe a more general name could be invented for both the
 444 // contiguous and noncontiguous spaces.
 445 
 446 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 447   _bt.single_block(start, the_end);
 448   return end();
 449 }
 450 
 451 // Initialize them to NULL.
 452 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 453   for (size_t i = 0; i < IndexSetSize; i++) {
 454     // Note that on platforms where objects are double word aligned,
 455     // the odd array elements are not used.  It is convenient, however,
 456     // to map directly from the object size to the array element.
 457     _indexedFreeList[i].reset(IndexSetSize);
 458     _indexedFreeList[i].set_size(i);
 459     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 460     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 461     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 462     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 463   }
 464 }
 465 
 466 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 467   return adjustObjectSize(oop(addr)->size());
 468 }
 469 
 470 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 471   for (size_t i = 1; i < IndexSetSize; i++) {
 472     assert(_indexedFreeList[i].size() == (size_t) i,
 473       "Indexed free list sizes are incorrect");
 474     _indexedFreeList[i].reset(IndexSetSize);
 475     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 476     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 477     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 478     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 479   }
 480 }
 481 
 482 void CompactibleFreeListSpace::reset(MemRegion mr) {
 483   resetIndexedFreeListArray();
 484   dictionary()->reset();
 485   if (BlockOffsetArrayUseUnallocatedBlock) {
 486     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 487     // Everything's allocated until proven otherwise.
 488     _bt.set_unallocated_block(end());
 489   }
 490   if (!mr.is_empty()) {
 491     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 492     _bt.single_block(mr.start(), mr.word_size());
 493     FreeChunk* fc = (FreeChunk*) mr.start();
 494     fc->set_size(mr.word_size());
 495     if (mr.word_size() >= IndexSetSize ) {
 496       returnChunkToDictionary(fc);
 497     } else {
 498       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 499       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 500     }
 501     coalBirth(mr.word_size());
 502   }
 503   _promoInfo.reset();
 504   _smallLinearAllocBlock._ptr = NULL;
 505   _smallLinearAllocBlock._word_size = 0;
 506 }
 507 
 508 void CompactibleFreeListSpace::reset_after_compaction() {
 509   // Reset the space to the new reality - one free chunk.
 510   MemRegion mr(compaction_top(), end());
 511   reset(mr);
 512   // Now refill the linear allocation block(s) if possible.
 513   refillLinearAllocBlocksIfNeeded();
 514 }
 515 
 516 // Walks the entire dictionary, returning a coterminal
 517 // chunk, if it exists. Use with caution since it involves
 518 // a potentially complete walk of a potentially large tree.
 519 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 520 
 521   assert_lock_strong(&_freelistLock);
 522 
 523   return dictionary()->find_chunk_ends_at(end());
 524 }
 525 
 526 
 527 #ifndef PRODUCT
 528 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 529   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 530     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 531   }
 532 }
 533 
 534 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 535   size_t sum = 0;
 536   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 537     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 538   }
 539   return sum;
 540 }
 541 
 542 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 543   size_t count = 0;
 544   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 545     debug_only(
 546       ssize_t total_list_count = 0;
 547       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 548          fc = fc->next()) {
 549         total_list_count++;
 550       }
 551       assert(total_list_count ==  _indexedFreeList[i].count(),
 552         "Count in list is incorrect");
 553     )
 554     count += _indexedFreeList[i].count();
 555   }
 556   return count;
 557 }
 558 
 559 size_t CompactibleFreeListSpace::totalCount() {
 560   size_t num = totalCountInIndexedFreeLists();
 561   num +=  dictionary()->total_count();
 562   if (_smallLinearAllocBlock._word_size != 0) {
 563     num++;
 564   }
 565   return num;
 566 }
 567 #endif
 568 
 569 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 570   FreeChunk* fc = (FreeChunk*) p;
 571   return fc->is_free();
 572 }
 573 
 574 size_t CompactibleFreeListSpace::used() const {
 575   return capacity() - free();
 576 }
 577 
 578 size_t CompactibleFreeListSpace::free() const {
 579   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 580   // if you do this while the structures are in flux you
 581   // may get an approximate answer only; for instance
 582   // because there is concurrent allocation either
 583   // directly by mutators or for promotion during a GC.
 584   // It's "MT-safe", however, in the sense that you are guaranteed
 585   // not to crash and burn, for instance, because of walking
 586   // pointers that could disappear as you were walking them.
 587   // The approximation is because the various components
 588   // that are read below are not read atomically (and
 589   // further the computation of totalSizeInIndexedFreeLists()
 590   // is itself a non-atomic computation. The normal use of
 591   // this is during a resize operation at the end of GC
 592   // and at that time you are guaranteed to get the
 593   // correct actual value. However, for instance, this is
 594   // also read completely asynchronously by the "perf-sampler"
 595   // that supports jvmstat, and you are apt to see the values
 596   // flicker in such cases.
 597   assert(_dictionary != NULL, "No _dictionary?");
 598   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 599           totalSizeInIndexedFreeLists() +
 600           _smallLinearAllocBlock._word_size) * HeapWordSize;
 601 }
 602 
 603 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 604   assert(_dictionary != NULL, "No _dictionary?");
 605   assert_locked();
 606   size_t res = _dictionary->max_chunk_size();
 607   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 608                        (size_t) SmallForLinearAlloc - 1));
 609   // XXX the following could potentially be pretty slow;
 610   // should one, pessimistically for the rare cases when res
 611   // calculated above is less than IndexSetSize,
 612   // just return res calculated above? My reasoning was that
 613   // those cases will be so rare that the extra time spent doesn't
 614   // really matter....
 615   // Note: do not change the loop test i >= res + IndexSetStride
 616   // to i > res below, because i is unsigned and res may be zero.
 617   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 618        i -= IndexSetStride) {
 619     if (_indexedFreeList[i].head() != NULL) {
 620       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 621       return i;
 622     }
 623   }
 624   return res;
 625 }
 626 
 627 void LinearAllocBlock::print_on(outputStream* st) const {
 628   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 629             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 630             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 631 }
 632 
 633 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 634   st->print_cr("COMPACTIBLE FREELIST SPACE");
 635   st->print_cr(" Space:");
 636   Space::print_on(st);
 637 
 638   st->print_cr("promoInfo:");
 639   _promoInfo.print_on(st);
 640 
 641   st->print_cr("_smallLinearAllocBlock");
 642   _smallLinearAllocBlock.print_on(st);
 643 
 644   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 645 
 646   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 647 }
 648 
 649 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 650 const {
 651   reportIndexedFreeListStatistics(st);
 652   st->print_cr("Layout of Indexed Freelists");
 653   st->print_cr("---------------------------");
 654   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 655   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 656     _indexedFreeList[i].print_on(st);
 657     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 658       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 659                    p2i(fc), p2i((HeapWord*)fc + i),
 660                    fc->cantCoalesce() ? "\t CC" : "");
 661     }
 662   }
 663 }
 664 
 665 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 666 const {
 667   _promoInfo.print_on(st);
 668 }
 669 
 670 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 671 const {
 672   _dictionary->report_statistics(st);
 673   st->print_cr("Layout of Freelists in Tree");
 674   st->print_cr("---------------------------");
 675   _dictionary->print_free_lists(st);
 676 }
 677 
 678 class BlkPrintingClosure: public BlkClosure {
 679   const CMSCollector*             _collector;
 680   const CompactibleFreeListSpace* _sp;
 681   const CMSBitMap*                _live_bit_map;
 682   const bool                      _post_remark;
 683   outputStream*                   _st;
 684 public:
 685   BlkPrintingClosure(const CMSCollector* collector,
 686                      const CompactibleFreeListSpace* sp,
 687                      const CMSBitMap* live_bit_map,
 688                      outputStream* st):
 689     _collector(collector),
 690     _sp(sp),
 691     _live_bit_map(live_bit_map),
 692     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 693     _st(st) { }
 694   size_t do_blk(HeapWord* addr);
 695 };
 696 
 697 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 698   size_t sz = _sp->block_size_no_stall(addr, _collector);
 699   assert(sz != 0, "Should always be able to compute a size");
 700   if (_sp->block_is_obj(addr)) {
 701     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 702     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 703       p2i(addr),
 704       dead ? "dead" : "live",
 705       sz,
 706       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 707     if (CMSPrintObjectsInDump && !dead) {
 708       oop(addr)->print_on(_st);
 709       _st->print_cr("--------------------------------------");
 710     }
 711   } else { // free block
 712     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 713       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 714     if (CMSPrintChunksInDump) {
 715       ((FreeChunk*)addr)->print_on(_st);
 716       _st->print_cr("--------------------------------------");
 717     }
 718   }
 719   return sz;
 720 }
 721 
 722 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 723   st->print_cr("=========================");
 724   st->print_cr("Block layout in CMS Heap:");
 725   st->print_cr("=========================");
 726   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 727   blk_iterate(&bpcl);
 728 
 729   st->print_cr("=======================================");
 730   st->print_cr("Order & Layout of Promotion Info Blocks");
 731   st->print_cr("=======================================");
 732   print_promo_info_blocks(st);
 733 
 734   st->print_cr("===========================");
 735   st->print_cr("Order of Indexed Free Lists");
 736   st->print_cr("=========================");
 737   print_indexed_free_lists(st);
 738 
 739   st->print_cr("=================================");
 740   st->print_cr("Order of Free Lists in Dictionary");
 741   st->print_cr("=================================");
 742   print_dictionary_free_lists(st);
 743 }
 744 
 745 
 746 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 747   assert_lock_strong(&_freelistLock);
 748   Log(gc, freelist, stats) log;
 749   if (!log.is_debug()) {
 750     return;
 751   }
 752   log.debug("%s", title);
 753 
 754   LogStream out(log.debug());
 755   _dictionary->report_statistics(&out);
 756 
 757   if (log.is_trace()) {
 758     LogStream trace_out(log.trace());
 759     reportIndexedFreeListStatistics(&trace_out);
 760     size_t total_size = totalSizeInIndexedFreeLists() +
 761                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 762     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 763   }
 764 }
 765 
 766 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 767   assert_lock_strong(&_freelistLock);
 768   st->print_cr("Statistics for IndexedFreeLists:");
 769   st->print_cr("--------------------------------");
 770   size_t total_size = totalSizeInIndexedFreeLists();
 771   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 772   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 773   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 774   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 775   if (free_blocks != 0) {
 776     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 777   }
 778 }
 779 
 780 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 781   size_t res = 0;
 782   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 783     debug_only(
 784       ssize_t recount = 0;
 785       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 786          fc = fc->next()) {
 787         recount += 1;
 788       }
 789       assert(recount == _indexedFreeList[i].count(),
 790         "Incorrect count in list");
 791     )
 792     res += _indexedFreeList[i].count();
 793   }
 794   return res;
 795 }
 796 
 797 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 798   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 799     if (_indexedFreeList[i].head() != NULL) {
 800       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 801       return (size_t)i;
 802     }
 803   }
 804   return 0;
 805 }
 806 
 807 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 808   HeapWord* prevEnd = end();
 809   assert(prevEnd != value, "unnecessary set_end call");
 810   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 811         "New end is below unallocated block");
 812   _end = value;
 813   if (prevEnd != NULL) {
 814     // Resize the underlying block offset table.
 815     _bt.resize(pointer_delta(value, bottom()));
 816     if (value <= prevEnd) {
 817       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 818              "New end is below unallocated block");
 819     } else {
 820       // Now, take this new chunk and add it to the free blocks.
 821       // Note that the BOT has not yet been updated for this block.
 822       size_t newFcSize = pointer_delta(value, prevEnd);
 823       // Add the block to the free lists, if possible coalescing it
 824       // with the last free block, and update the BOT and census data.
 825       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 826     }
 827   }
 828 }
 829 
 830 class FreeListSpaceDCTOC : public FilteringDCTOC {
 831   CompactibleFreeListSpace* _cfls;
 832   CMSCollector* _collector;
 833   bool _parallel;
 834 protected:
 835   // Override.
 836 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 837   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 838                                        HeapWord* bottom, HeapWord* top, \
 839                                        ClosureType* cl);                \
 840       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 841                                        HeapWord* bottom, HeapWord* top, \
 842                                        ClosureType* cl);                \
 843     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 844                                        HeapWord* bottom, HeapWord* top, \
 845                                        ClosureType* cl)
 846   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 847   walk_mem_region_with_cl_DECL(FilteringClosure);
 848 
 849 public:
 850   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 851                      CMSCollector* collector,
 852                      ExtendedOopClosure* cl,
 853                      CardTable::PrecisionStyle precision,
 854                      HeapWord* boundary,
 855                      bool parallel) :
 856     FilteringDCTOC(sp, cl, precision, boundary),
 857     _cfls(sp), _collector(collector), _parallel(parallel) {}
 858 };
 859 
 860 // We de-virtualize the block-related calls below, since we know that our
 861 // space is a CompactibleFreeListSpace.
 862 
 863 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 864 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 865                                                  HeapWord* bottom,              \
 866                                                  HeapWord* top,                 \
 867                                                  ClosureType* cl) {             \
 868    if (_parallel) {                                                             \
 869      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 870    } else {                                                                     \
 871      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 872    }                                                                            \
 873 }                                                                               \
 874 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 875                                                      HeapWord* bottom,          \
 876                                                      HeapWord* top,             \
 877                                                      ClosureType* cl) {         \
 878   /* Skip parts that are before "mr", in case "block_start" sent us             \
 879      back too far. */                                                           \
 880   HeapWord* mr_start = mr.start();                                              \
 881   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 882   HeapWord* next = bottom + bot_size;                                           \
 883   while (next < mr_start) {                                                     \
 884     bottom = next;                                                              \
 885     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 886     next = bottom + bot_size;                                                   \
 887   }                                                                             \
 888                                                                                 \
 889   while (bottom < top) {                                                        \
 890     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 891         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 892                     oop(bottom)) &&                                             \
 893         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 894       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 895       bottom += _cfls->adjustObjectSize(word_sz);                               \
 896     } else {                                                                    \
 897       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 898     }                                                                           \
 899   }                                                                             \
 900 }                                                                               \
 901 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 902                                                        HeapWord* bottom,        \
 903                                                        HeapWord* top,           \
 904                                                        ClosureType* cl) {       \
 905   /* Skip parts that are before "mr", in case "block_start" sent us             \
 906      back too far. */                                                           \
 907   HeapWord* mr_start = mr.start();                                              \
 908   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 909   HeapWord* next = bottom + bot_size;                                           \
 910   while (next < mr_start) {                                                     \
 911     bottom = next;                                                              \
 912     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 913     next = bottom + bot_size;                                                   \
 914   }                                                                             \
 915                                                                                 \
 916   while (bottom < top) {                                                        \
 917     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 918         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 919                     oop(bottom)) &&                                             \
 920         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 921       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 922       bottom += _cfls->adjustObjectSize(word_sz);                               \
 923     } else {                                                                    \
 924       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 925     }                                                                           \
 926   }                                                                             \
 927 }
 928 
 929 // (There are only two of these, rather than N, because the split is due
 930 // only to the introduction of the FilteringClosure, a local part of the
 931 // impl of this abstraction.)
 932 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 933 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 934 
 935 DirtyCardToOopClosure*
 936 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 937                                       CardTable::PrecisionStyle precision,
 938                                       HeapWord* boundary,
 939                                       bool parallel) {
 940   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 941 }
 942 
 943 
 944 // Note on locking for the space iteration functions:
 945 // since the collector's iteration activities are concurrent with
 946 // allocation activities by mutators, absent a suitable mutual exclusion
 947 // mechanism the iterators may go awry. For instance a block being iterated
 948 // may suddenly be allocated or divided up and part of it allocated and
 949 // so on.
 950 
 951 // Apply the given closure to each block in the space.
 952 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 953   assert_lock_strong(freelistLock());
 954   HeapWord *cur, *limit;
 955   for (cur = bottom(), limit = end(); cur < limit;
 956        cur += cl->do_blk_careful(cur));
 957 }
 958 
 959 // Apply the given closure to each block in the space.
 960 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 961   assert_lock_strong(freelistLock());
 962   HeapWord *cur, *limit;
 963   for (cur = bottom(), limit = end(); cur < limit;
 964        cur += cl->do_blk(cur));
 965 }
 966 
 967 // Apply the given closure to each oop in the space.
 968 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 969   assert_lock_strong(freelistLock());
 970   HeapWord *cur, *limit;
 971   size_t curSize;
 972   for (cur = bottom(), limit = end(); cur < limit;
 973        cur += curSize) {
 974     curSize = block_size(cur);
 975     if (block_is_obj(cur)) {
 976       oop(cur)->oop_iterate(cl);
 977     }
 978   }
 979 }
 980 
 981 // NOTE: In the following methods, in order to safely be able to
 982 // apply the closure to an object, we need to be sure that the
 983 // object has been initialized. We are guaranteed that an object
 984 // is initialized if we are holding the Heap_lock with the
 985 // world stopped.
 986 void CompactibleFreeListSpace::verify_objects_initialized() const {
 987   if (is_init_completed()) {
 988     assert_locked_or_safepoint(Heap_lock);
 989     if (Universe::is_fully_initialized()) {
 990       guarantee(SafepointSynchronize::is_at_safepoint(),
 991                 "Required for objects to be initialized");
 992     }
 993   } // else make a concession at vm start-up
 994 }
 995 
 996 // Apply the given closure to each object in the space
 997 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 998   assert_lock_strong(freelistLock());
 999   NOT_PRODUCT(verify_objects_initialized());
1000   HeapWord *cur, *limit;
1001   size_t curSize;
1002   for (cur = bottom(), limit = end(); cur < limit;
1003        cur += curSize) {
1004     curSize = block_size(cur);
1005     if (block_is_obj(cur)) {
1006       blk->do_object(oop(cur));
1007     }
1008   }
1009 }
1010 
1011 // Apply the given closure to each live object in the space
1012 //   The usage of CompactibleFreeListSpace
1013 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
1014 // objects in the space with references to objects that are no longer
1015 // valid.  For example, an object may reference another object
1016 // that has already been sweep up (collected).  This method uses
1017 // obj_is_alive() to determine whether it is safe to apply the closure to
1018 // an object.  See obj_is_alive() for details on how liveness of an
1019 // object is decided.
1020 
1021 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
1022   assert_lock_strong(freelistLock());
1023   NOT_PRODUCT(verify_objects_initialized());
1024   HeapWord *cur, *limit;
1025   size_t curSize;
1026   for (cur = bottom(), limit = end(); cur < limit;
1027        cur += curSize) {
1028     curSize = block_size(cur);
1029     if (block_is_obj(cur) && obj_is_alive(cur)) {
1030       blk->do_object(oop(cur));
1031     }
1032   }
1033 }
1034 
1035 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
1036                                                   UpwardsObjectClosure* cl) {
1037   assert_locked(freelistLock());
1038   NOT_PRODUCT(verify_objects_initialized());
1039   assert(!mr.is_empty(), "Should be non-empty");
1040   // We use MemRegion(bottom(), end()) rather than used_region() below
1041   // because the two are not necessarily equal for some kinds of
1042   // spaces, in particular, certain kinds of free list spaces.
1043   // We could use the more complicated but more precise:
1044   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
1045   // but the slight imprecision seems acceptable in the assertion check.
1046   assert(MemRegion(bottom(), end()).contains(mr),
1047          "Should be within used space");
1048   HeapWord* prev = cl->previous();   // max address from last time
1049   if (prev >= mr.end()) { // nothing to do
1050     return;
1051   }
1052   // This assert will not work when we go from cms space to perm
1053   // space, and use same closure. Easy fix deferred for later. XXX YSR
1054   // assert(prev == NULL || contains(prev), "Should be within space");
1055 
1056   bool last_was_obj_array = false;
1057   HeapWord *blk_start_addr, *region_start_addr;
1058   if (prev > mr.start()) {
1059     region_start_addr = prev;
1060     blk_start_addr    = prev;
1061     // The previous invocation may have pushed "prev" beyond the
1062     // last allocated block yet there may be still be blocks
1063     // in this region due to a particular coalescing policy.
1064     // Relax the assertion so that the case where the unallocated
1065     // block is maintained and "prev" is beyond the unallocated
1066     // block does not cause the assertion to fire.
1067     assert((BlockOffsetArrayUseUnallocatedBlock &&
1068             (!is_in(prev))) ||
1069            (blk_start_addr == block_start(region_start_addr)), "invariant");
1070   } else {
1071     region_start_addr = mr.start();
1072     blk_start_addr    = block_start(region_start_addr);
1073   }
1074   HeapWord* region_end_addr = mr.end();
1075   MemRegion derived_mr(region_start_addr, region_end_addr);
1076   while (blk_start_addr < region_end_addr) {
1077     const size_t size = block_size(blk_start_addr);
1078     if (block_is_obj(blk_start_addr)) {
1079       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
1080     } else {
1081       last_was_obj_array = false;
1082     }
1083     blk_start_addr += size;
1084   }
1085   if (!last_was_obj_array) {
1086     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
1087            "Should be within (closed) used space");
1088     assert(blk_start_addr > prev, "Invariant");
1089     cl->set_previous(blk_start_addr); // min address for next time
1090   }
1091 }
1092 
1093 // Callers of this iterator beware: The closure application should
1094 // be robust in the face of uninitialized objects and should (always)
1095 // return a correct size so that the next addr + size below gives us a
1096 // valid block boundary. [See for instance,
1097 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
1098 // in ConcurrentMarkSweepGeneration.cpp.]
1099 HeapWord*
1100 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
1101   ObjectClosureCareful* cl) {
1102   assert_lock_strong(freelistLock());
1103   // Can't use used_region() below because it may not necessarily
1104   // be the same as [bottom(),end()); although we could
1105   // use [used_region().start(),align_up(used_region().end(),CardSize)),
1106   // that appears too cumbersome, so we just do the simpler check
1107   // in the assertion below.
1108   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
1109          "mr should be non-empty and within used space");
1110   HeapWord *addr, *end;
1111   size_t size;
1112   for (addr = block_start_careful(mr.start()), end  = mr.end();
1113        addr < end; addr += size) {
1114     FreeChunk* fc = (FreeChunk*)addr;
1115     if (fc->is_free()) {
1116       // Since we hold the free list lock, which protects direct
1117       // allocation in this generation by mutators, a free object
1118       // will remain free throughout this iteration code.
1119       size = fc->size();
1120     } else {
1121       // Note that the object need not necessarily be initialized,
1122       // because (for instance) the free list lock does NOT protect
1123       // object initialization. The closure application below must
1124       // therefore be correct in the face of uninitialized objects.
1125       size = cl->do_object_careful_m(oop(addr), mr);
1126       if (size == 0) {
1127         // An unparsable object found. Signal early termination.
1128         return addr;
1129       }
1130     }
1131   }
1132   return NULL;
1133 }
1134 
1135 
1136 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
1137   NOT_PRODUCT(verify_objects_initialized());
1138   return _bt.block_start(p);
1139 }
1140 
1141 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
1142   return _bt.block_start_careful(p);
1143 }
1144 
1145 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
1146   NOT_PRODUCT(verify_objects_initialized());
1147   // This must be volatile, or else there is a danger that the compiler
1148   // will compile the code below into a sometimes-infinite loop, by keeping
1149   // the value read the first time in a register.
1150   while (true) {
1151     // We must do this until we get a consistent view of the object.
1152     if (FreeChunk::indicatesFreeChunk(p)) {
1153       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1154       size_t res = fc->size();
1155 
1156       // Bugfix for systems with weak memory model (PPC64/IA64). The
1157       // block's free bit was set and we have read the size of the
1158       // block. Acquire and check the free bit again. If the block is
1159       // still free, the read size is correct.
1160       OrderAccess::acquire();
1161 
1162       // If the object is still a free chunk, return the size, else it
1163       // has been allocated so try again.
1164       if (FreeChunk::indicatesFreeChunk(p)) {
1165         assert(res != 0, "Block size should not be 0");
1166         return res;
1167       }
1168     } else {
1169       // Ensure klass read before size.
1170       Klass* k = oop(p)->klass_or_null_acquire();
1171       if (k != NULL) {
1172         assert(k->is_klass(), "Should really be klass oop.");
1173         oop o = (oop)p;
1174         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
1175 
1176         size_t res = o->size_given_klass(k);
1177         res = adjustObjectSize(res);
1178         assert(res != 0, "Block size should not be 0");
1179         return res;
1180       }
1181     }
1182   }
1183 }
1184 
1185 // TODO: Now that is_parsable is gone, we should combine these two functions.
1186 // A variant of the above that uses the Printezis bits for
1187 // unparsable but allocated objects. This avoids any possible
1188 // stalls waiting for mutators to initialize objects, and is
1189 // thus potentially faster than the variant above. However,
1190 // this variant may return a zero size for a block that is
1191 // under mutation and for which a consistent size cannot be
1192 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1193 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1194                                                      const CMSCollector* c)
1195 const {
1196   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1197   // This must be volatile, or else there is a danger that the compiler
1198   // will compile the code below into a sometimes-infinite loop, by keeping
1199   // the value read the first time in a register.
1200   DEBUG_ONLY(uint loops = 0;)
1201   while (true) {
1202     // We must do this until we get a consistent view of the object.
1203     if (FreeChunk::indicatesFreeChunk(p)) {
1204       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1205       size_t res = fc->size();
1206 
1207       // Bugfix for systems with weak memory model (PPC64/IA64). The
1208       // free bit of the block was set and we have read the size of
1209       // the block. Acquire and check the free bit again. If the
1210       // block is still free, the read size is correct.
1211       OrderAccess::acquire();
1212 
1213       if (FreeChunk::indicatesFreeChunk(p)) {
1214         assert(res != 0, "Block size should not be 0");
1215         assert(loops == 0, "Should be 0");
1216         return res;
1217       }
1218     } else {
1219       // Ensure klass read before size.
1220       Klass* k = oop(p)->klass_or_null_acquire();
1221       if (k != NULL) {
1222         assert(k->is_klass(), "Should really be klass oop.");
1223         oop o = (oop)p;
1224         assert(oopDesc::is_oop(o), "Should be an oop");
1225 
1226         size_t res = o->size_given_klass(k);
1227         res = adjustObjectSize(res);
1228         assert(res != 0, "Block size should not be 0");
1229         return res;
1230       } else {
1231         // May return 0 if P-bits not present.
1232         return c->block_size_if_printezis_bits(p);
1233       }
1234     }
1235     assert(loops == 0, "Can loop at most once");
1236     DEBUG_ONLY(loops++;)
1237   }
1238 }
1239 
1240 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1241   NOT_PRODUCT(verify_objects_initialized());
1242   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1243   FreeChunk* fc = (FreeChunk*)p;
1244   if (fc->is_free()) {
1245     return fc->size();
1246   } else {
1247     // Ignore mark word because this may be a recently promoted
1248     // object whose mark word is used to chain together grey
1249     // objects (the last one would have a null value).
1250     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1251     return adjustObjectSize(oop(p)->size());
1252   }
1253 }
1254 
1255 // This implementation assumes that the property of "being an object" is
1256 // stable.  But being a free chunk may not be (because of parallel
1257 // promotion.)
1258 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1259   FreeChunk* fc = (FreeChunk*)p;
1260   assert(is_in_reserved(p), "Should be in space");
1261   if (FreeChunk::indicatesFreeChunk(p)) return false;
1262   Klass* k = oop(p)->klass_or_null_acquire();
1263   if (k != NULL) {
1264     // Ignore mark word because it may have been used to
1265     // chain together promoted objects (the last one
1266     // would have a null value).
1267     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1268     return true;
1269   } else {
1270     return false;  // Was not an object at the start of collection.
1271   }
1272 }
1273 
1274 // Check if the object is alive. This fact is checked either by consulting
1275 // the main marking bitmap in the sweeping phase or, if it's a permanent
1276 // generation and we're not in the sweeping phase, by checking the
1277 // perm_gen_verify_bit_map where we store the "deadness" information if
1278 // we did not sweep the perm gen in the most recent previous GC cycle.
1279 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1280   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1281          "Else races are possible");
1282   assert(block_is_obj(p), "The address should point to an object");
1283 
1284   // If we're sweeping, we use object liveness information from the main bit map
1285   // for both perm gen and old gen.
1286   // We don't need to lock the bitmap (live_map or dead_map below), because
1287   // EITHER we are in the middle of the sweeping phase, and the
1288   // main marking bit map (live_map below) is locked,
1289   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1290   // is stable, because it's mutated only in the sweeping phase.
1291   // NOTE: This method is also used by jmap where, if class unloading is
1292   // off, the results can return "false" for legitimate perm objects,
1293   // when we are not in the midst of a sweeping phase, which can result
1294   // in jmap not reporting certain perm gen objects. This will be moot
1295   // if/when the perm gen goes away in the future.
1296   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1297     CMSBitMap* live_map = _collector->markBitMap();
1298     return live_map->par_isMarked((HeapWord*) p);
1299   }
1300   return true;
1301 }
1302 
1303 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1304   FreeChunk* fc = (FreeChunk*)p;
1305   assert(is_in_reserved(p), "Should be in space");
1306   assert(_bt.block_start(p) == p, "Should be a block boundary");
1307   if (!fc->is_free()) {
1308     // Ignore mark word because it may have been used to
1309     // chain together promoted objects (the last one
1310     // would have a null value).
1311     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1312     return true;
1313   }
1314   return false;
1315 }
1316 
1317 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1318 // approximate answer if you don't hold the freelistlock when you call this.
1319 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1320   size_t size = 0;
1321   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1322     debug_only(
1323       // We may be calling here without the lock in which case we
1324       // won't do this modest sanity check.
1325       if (freelistLock()->owned_by_self()) {
1326         size_t total_list_size = 0;
1327         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1328           fc = fc->next()) {
1329           total_list_size += i;
1330         }
1331         assert(total_list_size == i * _indexedFreeList[i].count(),
1332                "Count in list is incorrect");
1333       }
1334     )
1335     size += i * _indexedFreeList[i].count();
1336   }
1337   return size;
1338 }
1339 
1340 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1341   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1342   return allocate(size);
1343 }
1344 
1345 HeapWord*
1346 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1347   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1348 }
1349 
1350 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1351   assert_lock_strong(freelistLock());
1352   HeapWord* res = NULL;
1353   assert(size == adjustObjectSize(size),
1354          "use adjustObjectSize() before calling into allocate()");
1355 
1356   res = allocate_adaptive_freelists(size);
1357 
1358   if (res != NULL) {
1359     // check that res does lie in this space!
1360     assert(is_in_reserved(res), "Not in this space!");
1361     assert(is_aligned((void*)res), "alignment check");
1362 
1363     FreeChunk* fc = (FreeChunk*)res;
1364     fc->markNotFree();
1365     assert(!fc->is_free(), "shouldn't be marked free");
1366     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1367     // Verify that the block offset table shows this to
1368     // be a single block, but not one which is unallocated.
1369     _bt.verify_single_block(res, size);
1370     _bt.verify_not_unallocated(res, size);
1371     // mangle a just allocated object with a distinct pattern.
1372     debug_only(fc->mangleAllocated(size));
1373   }
1374 
1375   return res;
1376 }
1377 
1378 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1379   assert_lock_strong(freelistLock());
1380   HeapWord* res = NULL;
1381   assert(size == adjustObjectSize(size),
1382          "use adjustObjectSize() before calling into allocate()");
1383 
1384   // Strategy
1385   //   if small
1386   //     exact size from small object indexed list if small
1387   //     small or large linear allocation block (linAB) as appropriate
1388   //     take from lists of greater sized chunks
1389   //   else
1390   //     dictionary
1391   //     small or large linear allocation block if it has the space
1392   // Try allocating exact size from indexTable first
1393   if (size < IndexSetSize) {
1394     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1395     if(res != NULL) {
1396       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1397         "Not removed from free list");
1398       // no block offset table adjustment is necessary on blocks in
1399       // the indexed lists.
1400 
1401     // Try allocating from the small LinAB
1402     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1403         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1404         // if successful, the above also adjusts block offset table
1405         // Note that this call will refill the LinAB to
1406         // satisfy the request.  This is different that
1407         // evm.
1408         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1409     } else {
1410       // Raid the exact free lists larger than size, even if they are not
1411       // overpopulated.
1412       res = (HeapWord*) getChunkFromGreater(size);
1413     }
1414   } else {
1415     // Big objects get allocated directly from the dictionary.
1416     res = (HeapWord*) getChunkFromDictionaryExact(size);
1417     if (res == NULL) {
1418       // Try hard not to fail since an allocation failure will likely
1419       // trigger a synchronous GC.  Try to get the space from the
1420       // allocation blocks.
1421       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1422     }
1423   }
1424 
1425   return res;
1426 }
1427 
1428 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1429 // when promoting obj.
1430 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1431   // Depending on the object size, expansion may require refilling either a
1432   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1433   // is added because the dictionary may over-allocate to avoid fragmentation.
1434   size_t space = obj_size;
1435   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1436   return space;
1437 }
1438 
1439 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1440   FreeChunk* ret;
1441 
1442   assert(numWords >= MinChunkSize, "Size is less than minimum");
1443   assert(linearAllocationWouldFail() || bestFitFirst(),
1444     "Should not be here");
1445 
1446   size_t i;
1447   size_t currSize = numWords + MinChunkSize;
1448   assert(is_object_aligned(currSize), "currSize should be aligned");
1449   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1450     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1451     if (fl->head()) {
1452       ret = getFromListGreater(fl, numWords);
1453       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1454       return ret;
1455     }
1456   }
1457 
1458   currSize = MAX2((size_t)SmallForDictionary,
1459                   (size_t)(numWords + MinChunkSize));
1460 
1461   /* Try to get a chunk that satisfies request, while avoiding
1462      fragmentation that can't be handled. */
1463   {
1464     ret =  dictionary()->get_chunk(currSize);
1465     if (ret != NULL) {
1466       assert(ret->size() - numWords >= MinChunkSize,
1467              "Chunk is too small");
1468       _bt.allocated((HeapWord*)ret, ret->size());
1469       /* Carve returned chunk. */
1470       (void) splitChunkAndReturnRemainder(ret, numWords);
1471       /* Label this as no longer a free chunk. */
1472       assert(ret->is_free(), "This chunk should be free");
1473       ret->link_prev(NULL);
1474     }
1475     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1476     return ret;
1477   }
1478   ShouldNotReachHere();
1479 }
1480 
1481 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1482   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1483   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1484 }
1485 
1486 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1487   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1488          (_smallLinearAllocBlock._word_size == fc->size()),
1489          "Linear allocation block shows incorrect size");
1490   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1491           (_smallLinearAllocBlock._word_size == fc->size()));
1492 }
1493 
1494 // Check if the purported free chunk is present either as a linear
1495 // allocation block, the size-indexed table of (smaller) free blocks,
1496 // or the larger free blocks kept in the binary tree dictionary.
1497 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1498   if (verify_chunk_is_linear_alloc_block(fc)) {
1499     return true;
1500   } else if (fc->size() < IndexSetSize) {
1501     return verifyChunkInIndexedFreeLists(fc);
1502   } else {
1503     return dictionary()->verify_chunk_in_free_list(fc);
1504   }
1505 }
1506 
1507 #ifndef PRODUCT
1508 void CompactibleFreeListSpace::assert_locked() const {
1509   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1510 }
1511 
1512 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1513   CMSLockVerifier::assert_locked(lock);
1514 }
1515 #endif
1516 
1517 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1518   // In the parallel case, the main thread holds the free list lock
1519   // on behalf the parallel threads.
1520   FreeChunk* fc;
1521   {
1522     // If GC is parallel, this might be called by several threads.
1523     // This should be rare enough that the locking overhead won't affect
1524     // the sequential code.
1525     MutexLockerEx x(parDictionaryAllocLock(),
1526                     Mutex::_no_safepoint_check_flag);
1527     fc = getChunkFromDictionary(size);
1528   }
1529   if (fc != NULL) {
1530     fc->dontCoalesce();
1531     assert(fc->is_free(), "Should be free, but not coalescable");
1532     // Verify that the block offset table shows this to
1533     // be a single block, but not one which is unallocated.
1534     _bt.verify_single_block((HeapWord*)fc, fc->size());
1535     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1536   }
1537   return fc;
1538 }
1539 
1540 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1541   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1542   assert_locked();
1543 
1544   // if we are tracking promotions, then first ensure space for
1545   // promotion (including spooling space for saving header if necessary).
1546   // then allocate and copy, then track promoted info if needed.
1547   // When tracking (see PromotionInfo::track()), the mark word may
1548   // be displaced and in this case restoration of the mark word
1549   // occurs in the (oop_since_save_marks_)iterate phase.
1550   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1551     return NULL;
1552   }
1553   // Call the allocate(size_t, bool) form directly to avoid the
1554   // additional call through the allocate(size_t) form.  Having
1555   // the compile inline the call is problematic because allocate(size_t)
1556   // is a virtual method.
1557   HeapWord* res = allocate(adjustObjectSize(obj_size));
1558   if (res != NULL) {
1559     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1560     // if we should be tracking promotions, do so.
1561     if (_promoInfo.tracking()) {
1562         _promoInfo.track((PromotedObject*)res);
1563     }
1564   }
1565   return oop(res);
1566 }
1567 
1568 HeapWord*
1569 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1570   assert_locked();
1571   assert(size >= MinChunkSize, "minimum chunk size");
1572   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1573     "maximum from smallLinearAllocBlock");
1574   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1575 }
1576 
1577 HeapWord*
1578 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1579                                                        size_t size) {
1580   assert_locked();
1581   assert(size >= MinChunkSize, "too small");
1582   HeapWord* res = NULL;
1583   // Try to do linear allocation from blk, making sure that
1584   if (blk->_word_size == 0) {
1585     // We have probably been unable to fill this either in the prologue or
1586     // when it was exhausted at the last linear allocation. Bail out until
1587     // next time.
1588     assert(blk->_ptr == NULL, "consistency check");
1589     return NULL;
1590   }
1591   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1592   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1593   if (res != NULL) return res;
1594 
1595   // about to exhaust this linear allocation block
1596   if (blk->_word_size == size) { // exactly satisfied
1597     res = blk->_ptr;
1598     _bt.allocated(res, blk->_word_size);
1599   } else if (size + MinChunkSize <= blk->_refillSize) {
1600     size_t sz = blk->_word_size;
1601     // Update _unallocated_block if the size is such that chunk would be
1602     // returned to the indexed free list.  All other chunks in the indexed
1603     // free lists are allocated from the dictionary so that _unallocated_block
1604     // has already been adjusted for them.  Do it here so that the cost
1605     // for all chunks added back to the indexed free lists.
1606     if (sz < SmallForDictionary) {
1607       _bt.allocated(blk->_ptr, sz);
1608     }
1609     // Return the chunk that isn't big enough, and then refill below.
1610     addChunkToFreeLists(blk->_ptr, sz);
1611     split_birth(sz);
1612     // Don't keep statistics on adding back chunk from a LinAB.
1613   } else {
1614     // A refilled block would not satisfy the request.
1615     return NULL;
1616   }
1617 
1618   blk->_ptr = NULL; blk->_word_size = 0;
1619   refillLinearAllocBlock(blk);
1620   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1621          "block was replenished");
1622   if (res != NULL) {
1623     split_birth(size);
1624     repairLinearAllocBlock(blk);
1625   } else if (blk->_ptr != NULL) {
1626     res = blk->_ptr;
1627     size_t blk_size = blk->_word_size;
1628     blk->_word_size -= size;
1629     blk->_ptr  += size;
1630     split_birth(size);
1631     repairLinearAllocBlock(blk);
1632     // Update BOT last so that other (parallel) GC threads see a consistent
1633     // view of the BOT and free blocks.
1634     // Above must occur before BOT is updated below.
1635     OrderAccess::storestore();
1636     _bt.split_block(res, blk_size, size);  // adjust block offset table
1637   }
1638   return res;
1639 }
1640 
1641 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1642                                         LinearAllocBlock* blk,
1643                                         size_t size) {
1644   assert_locked();
1645   assert(size >= MinChunkSize, "too small");
1646 
1647   HeapWord* res = NULL;
1648   // This is the common case.  Keep it simple.
1649   if (blk->_word_size >= size + MinChunkSize) {
1650     assert(blk->_ptr != NULL, "consistency check");
1651     res = blk->_ptr;
1652     // Note that the BOT is up-to-date for the linAB before allocation.  It
1653     // indicates the start of the linAB.  The split_block() updates the
1654     // BOT for the linAB after the allocation (indicates the start of the
1655     // next chunk to be allocated).
1656     size_t blk_size = blk->_word_size;
1657     blk->_word_size -= size;
1658     blk->_ptr  += size;
1659     split_birth(size);
1660     repairLinearAllocBlock(blk);
1661     // Update BOT last so that other (parallel) GC threads see a consistent
1662     // view of the BOT and free blocks.
1663     // Above must occur before BOT is updated below.
1664     OrderAccess::storestore();
1665     _bt.split_block(res, blk_size, size);  // adjust block offset table
1666     _bt.allocated(res, size);
1667   }
1668   return res;
1669 }
1670 
1671 FreeChunk*
1672 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1673   assert_locked();
1674   assert(size < SmallForDictionary, "just checking");
1675   FreeChunk* res;
1676   res = _indexedFreeList[size].get_chunk_at_head();
1677   if (res == NULL) {
1678     res = getChunkFromIndexedFreeListHelper(size);
1679   }
1680   _bt.verify_not_unallocated((HeapWord*) res, size);
1681   assert(res == NULL || res->size() == size, "Incorrect block size");
1682   return res;
1683 }
1684 
1685 FreeChunk*
1686 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1687   bool replenish) {
1688   assert_locked();
1689   FreeChunk* fc = NULL;
1690   if (size < SmallForDictionary) {
1691     assert(_indexedFreeList[size].head() == NULL ||
1692       _indexedFreeList[size].surplus() <= 0,
1693       "List for this size should be empty or under populated");
1694     // Try best fit in exact lists before replenishing the list
1695     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1696       // Replenish list.
1697       //
1698       // Things tried that failed.
1699       //   Tried allocating out of the two LinAB's first before
1700       // replenishing lists.
1701       //   Tried small linAB of size 256 (size in indexed list)
1702       // and replenishing indexed lists from the small linAB.
1703       //
1704       FreeChunk* newFc = NULL;
1705       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1706       if (replenish_size < SmallForDictionary) {
1707         // Do not replenish from an underpopulated size.
1708         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1709             _indexedFreeList[replenish_size].head() != NULL) {
1710           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1711         } else if (bestFitFirst()) {
1712           newFc = bestFitSmall(replenish_size);
1713         }
1714       }
1715       if (newFc == NULL && replenish_size > size) {
1716         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1717         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1718       }
1719       // Note: The stats update re split-death of block obtained above
1720       // will be recorded below precisely when we know we are going to
1721       // be actually splitting it into more than one pieces below.
1722       if (newFc != NULL) {
1723         if  (replenish || CMSReplenishIntermediate) {
1724           // Replenish this list and return one block to caller.
1725           size_t i;
1726           FreeChunk *curFc, *nextFc;
1727           size_t num_blk = newFc->size() / size;
1728           assert(num_blk >= 1, "Smaller than requested?");
1729           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1730           if (num_blk > 1) {
1731             // we are sure we will be splitting the block just obtained
1732             // into multiple pieces; record the split-death of the original
1733             splitDeath(replenish_size);
1734           }
1735           // carve up and link blocks 0, ..., num_blk - 2
1736           // The last chunk is not added to the lists but is returned as the
1737           // free chunk.
1738           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1739                i = 0;
1740                i < (num_blk - 1);
1741                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1742                i++) {
1743             curFc->set_size(size);
1744             // Don't record this as a return in order to try and
1745             // determine the "returns" from a GC.
1746             _bt.verify_not_unallocated((HeapWord*) fc, size);
1747             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1748             _bt.mark_block((HeapWord*)curFc, size);
1749             split_birth(size);
1750             // Don't record the initial population of the indexed list
1751             // as a split birth.
1752           }
1753 
1754           // check that the arithmetic was OK above
1755           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1756             "inconsistency in carving newFc");
1757           curFc->set_size(size);
1758           _bt.mark_block((HeapWord*)curFc, size);
1759           split_birth(size);
1760           fc = curFc;
1761         } else {
1762           // Return entire block to caller
1763           fc = newFc;
1764         }
1765       }
1766     }
1767   } else {
1768     // Get a free chunk from the free chunk dictionary to be returned to
1769     // replenish the indexed free list.
1770     fc = getChunkFromDictionaryExact(size);
1771   }
1772   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1773   return fc;
1774 }
1775 
1776 FreeChunk*
1777 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1778   assert_locked();
1779   FreeChunk* fc = _dictionary->get_chunk(size);
1780   if (fc == NULL) {
1781     return NULL;
1782   }
1783   _bt.allocated((HeapWord*)fc, fc->size());
1784   if (fc->size() >= size + MinChunkSize) {
1785     fc = splitChunkAndReturnRemainder(fc, size);
1786   }
1787   assert(fc->size() >= size, "chunk too small");
1788   assert(fc->size() < size + MinChunkSize, "chunk too big");
1789   _bt.verify_single_block((HeapWord*)fc, fc->size());
1790   return fc;
1791 }
1792 
1793 FreeChunk*
1794 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1795   assert_locked();
1796   FreeChunk* fc = _dictionary->get_chunk(size);
1797   if (fc == NULL) {
1798     return fc;
1799   }
1800   _bt.allocated((HeapWord*)fc, fc->size());
1801   if (fc->size() == size) {
1802     _bt.verify_single_block((HeapWord*)fc, size);
1803     return fc;
1804   }
1805   assert(fc->size() > size, "get_chunk() guarantee");
1806   if (fc->size() < size + MinChunkSize) {
1807     // Return the chunk to the dictionary and go get a bigger one.
1808     returnChunkToDictionary(fc);
1809     fc = _dictionary->get_chunk(size + MinChunkSize);
1810     if (fc == NULL) {
1811       return NULL;
1812     }
1813     _bt.allocated((HeapWord*)fc, fc->size());
1814   }
1815   assert(fc->size() >= size + MinChunkSize, "tautology");
1816   fc = splitChunkAndReturnRemainder(fc, size);
1817   assert(fc->size() == size, "chunk is wrong size");
1818   _bt.verify_single_block((HeapWord*)fc, size);
1819   return fc;
1820 }
1821 
1822 void
1823 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1824   assert_locked();
1825 
1826   size_t size = chunk->size();
1827   _bt.verify_single_block((HeapWord*)chunk, size);
1828   // adjust _unallocated_block downward, as necessary
1829   _bt.freed((HeapWord*)chunk, size);
1830   _dictionary->return_chunk(chunk);
1831 #ifndef PRODUCT
1832   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1833     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1834     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1835     tl->verify_stats();
1836   }
1837 #endif // PRODUCT
1838 }
1839 
1840 void
1841 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1842   assert_locked();
1843   size_t size = fc->size();
1844   _bt.verify_single_block((HeapWord*) fc, size);
1845   _bt.verify_not_unallocated((HeapWord*) fc, size);
1846   _indexedFreeList[size].return_chunk_at_tail(fc);
1847 #ifndef PRODUCT
1848   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1849      _indexedFreeList[size].verify_stats();
1850   }
1851 #endif // PRODUCT
1852 }
1853 
1854 // Add chunk to end of last block -- if it's the largest
1855 // block -- and update BOT and census data. We would
1856 // of course have preferred to coalesce it with the
1857 // last block, but it's currently less expensive to find the
1858 // largest block than it is to find the last.
1859 void
1860 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1861   HeapWord* chunk, size_t     size) {
1862   // check that the chunk does lie in this space!
1863   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1864   // One of the parallel gc task threads may be here
1865   // whilst others are allocating.
1866   Mutex* lock = &_parDictionaryAllocLock;
1867   FreeChunk* ec;
1868   {
1869     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1870     ec = dictionary()->find_largest_dict();  // get largest block
1871     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1872       // It's a coterminal block - we can coalesce.
1873       size_t old_size = ec->size();
1874       coalDeath(old_size);
1875       removeChunkFromDictionary(ec);
1876       size += old_size;
1877     } else {
1878       ec = (FreeChunk*)chunk;
1879     }
1880   }
1881   ec->set_size(size);
1882   debug_only(ec->mangleFreed(size));
1883   if (size < SmallForDictionary) {
1884     lock = _indexedFreeListParLocks[size];
1885   }
1886   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1887   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1888   // record the birth under the lock since the recording involves
1889   // manipulation of the list on which the chunk lives and
1890   // if the chunk is allocated and is the last on the list,
1891   // the list can go away.
1892   coalBirth(size);
1893 }
1894 
1895 void
1896 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1897                                               size_t     size) {
1898   // check that the chunk does lie in this space!
1899   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1900   assert_locked();
1901   _bt.verify_single_block(chunk, size);
1902 
1903   FreeChunk* fc = (FreeChunk*) chunk;
1904   fc->set_size(size);
1905   debug_only(fc->mangleFreed(size));
1906   if (size < SmallForDictionary) {
1907     returnChunkToFreeList(fc);
1908   } else {
1909     returnChunkToDictionary(fc);
1910   }
1911 }
1912 
1913 void
1914 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1915   size_t size, bool coalesced) {
1916   assert_locked();
1917   assert(chunk != NULL, "null chunk");
1918   if (coalesced) {
1919     // repair BOT
1920     _bt.single_block(chunk, size);
1921   }
1922   addChunkToFreeLists(chunk, size);
1923 }
1924 
1925 // We _must_ find the purported chunk on our free lists;
1926 // we assert if we don't.
1927 void
1928 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1929   size_t size = fc->size();
1930   assert_locked();
1931   debug_only(verifyFreeLists());
1932   if (size < SmallForDictionary) {
1933     removeChunkFromIndexedFreeList(fc);
1934   } else {
1935     removeChunkFromDictionary(fc);
1936   }
1937   _bt.verify_single_block((HeapWord*)fc, size);
1938   debug_only(verifyFreeLists());
1939 }
1940 
1941 void
1942 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1943   size_t size = fc->size();
1944   assert_locked();
1945   assert(fc != NULL, "null chunk");
1946   _bt.verify_single_block((HeapWord*)fc, size);
1947   _dictionary->remove_chunk(fc);
1948   // adjust _unallocated_block upward, as necessary
1949   _bt.allocated((HeapWord*)fc, size);
1950 }
1951 
1952 void
1953 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1954   assert_locked();
1955   size_t size = fc->size();
1956   _bt.verify_single_block((HeapWord*)fc, size);
1957   NOT_PRODUCT(
1958     if (FLSVerifyIndexTable) {
1959       verifyIndexedFreeList(size);
1960     }
1961   )
1962   _indexedFreeList[size].remove_chunk(fc);
1963   NOT_PRODUCT(
1964     if (FLSVerifyIndexTable) {
1965       verifyIndexedFreeList(size);
1966     }
1967   )
1968 }
1969 
1970 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1971   /* A hint is the next larger size that has a surplus.
1972      Start search at a size large enough to guarantee that
1973      the excess is >= MIN_CHUNK. */
1974   size_t start = align_object_size(numWords + MinChunkSize);
1975   if (start < IndexSetSize) {
1976     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1977     size_t    hint = _indexedFreeList[start].hint();
1978     while (hint < IndexSetSize) {
1979       assert(is_object_aligned(hint), "hint should be aligned");
1980       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1981       if (fl->surplus() > 0 && fl->head() != NULL) {
1982         // Found a list with surplus, reset original hint
1983         // and split out a free chunk which is returned.
1984         _indexedFreeList[start].set_hint(hint);
1985         FreeChunk* res = getFromListGreater(fl, numWords);
1986         assert(res == NULL || res->is_free(),
1987           "Should be returning a free chunk");
1988         return res;
1989       }
1990       hint = fl->hint(); /* keep looking */
1991     }
1992     /* None found. */
1993     it[start].set_hint(IndexSetSize);
1994   }
1995   return NULL;
1996 }
1997 
1998 /* Requires fl->size >= numWords + MinChunkSize */
1999 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
2000   size_t numWords) {
2001   FreeChunk *curr = fl->head();
2002   size_t oldNumWords = curr->size();
2003   assert(numWords >= MinChunkSize, "Word size is too small");
2004   assert(curr != NULL, "List is empty");
2005   assert(oldNumWords >= numWords + MinChunkSize,
2006         "Size of chunks in the list is too small");
2007 
2008   fl->remove_chunk(curr);
2009   // recorded indirectly by splitChunkAndReturnRemainder -
2010   // smallSplit(oldNumWords, numWords);
2011   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
2012   // Does anything have to be done for the remainder in terms of
2013   // fixing the card table?
2014   assert(new_chunk == NULL || new_chunk->is_free(),
2015     "Should be returning a free chunk");
2016   return new_chunk;
2017 }
2018 
2019 FreeChunk*
2020 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
2021   size_t new_size) {
2022   assert_locked();
2023   size_t size = chunk->size();
2024   assert(size > new_size, "Split from a smaller block?");
2025   assert(is_aligned(chunk), "alignment problem");
2026   assert(size == adjustObjectSize(size), "alignment problem");
2027   size_t rem_sz = size - new_size;
2028   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
2029   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
2030   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
2031   assert(is_aligned(ffc), "alignment problem");
2032   ffc->set_size(rem_sz);
2033   ffc->link_next(NULL);
2034   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2035   // Above must occur before BOT is updated below.
2036   // adjust block offset table
2037   OrderAccess::storestore();
2038   assert(chunk->is_free() && ffc->is_free(), "Error");
2039   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
2040   if (rem_sz < SmallForDictionary) {
2041     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
2042     bool is_par = Thread::current()->is_GC_task_thread();
2043     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
2044     returnChunkToFreeList(ffc);
2045     split(size, rem_sz);
2046     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
2047   } else {
2048     returnChunkToDictionary(ffc);
2049     split(size, rem_sz);
2050   }
2051   chunk->set_size(new_size);
2052   return chunk;
2053 }
2054 
2055 void
2056 CompactibleFreeListSpace::sweep_completed() {
2057   // Now that space is probably plentiful, refill linear
2058   // allocation blocks as needed.
2059   refillLinearAllocBlocksIfNeeded();
2060 }
2061 
2062 void
2063 CompactibleFreeListSpace::gc_prologue() {
2064   assert_locked();
2065   reportFreeListStatistics("Before GC:");
2066   refillLinearAllocBlocksIfNeeded();
2067 }
2068 
2069 void
2070 CompactibleFreeListSpace::gc_epilogue() {
2071   assert_locked();
2072   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2073   _promoInfo.stopTrackingPromotions();
2074   repairLinearAllocationBlocks();
2075   reportFreeListStatistics("After GC:");
2076 }
2077 
2078 // Iteration support, mostly delegated from a CMS generation
2079 
2080 void CompactibleFreeListSpace::save_marks() {
2081   assert(Thread::current()->is_VM_thread(),
2082          "Global variable should only be set when single-threaded");
2083   // Mark the "end" of the used space at the time of this call;
2084   // note, however, that promoted objects from this point
2085   // on are tracked in the _promoInfo below.
2086   set_saved_mark_word(unallocated_block());
2087 #ifdef ASSERT
2088   // Check the sanity of save_marks() etc.
2089   MemRegion ur    = used_region();
2090   MemRegion urasm = used_region_at_save_marks();
2091   assert(ur.contains(urasm),
2092          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2093          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2094          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
2095 #endif
2096   // inform allocator that promotions should be tracked.
2097   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2098   _promoInfo.startTrackingPromotions();
2099 }
2100 
2101 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2102   assert(_promoInfo.tracking(), "No preceding save_marks?");
2103   return _promoInfo.noPromotions();
2104 }
2105 
2106 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2107   return _smallLinearAllocBlock._word_size == 0;
2108 }
2109 
2110 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2111   // Fix up linear allocation blocks to look like free blocks
2112   repairLinearAllocBlock(&_smallLinearAllocBlock);
2113 }
2114 
2115 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2116   assert_locked();
2117   if (blk->_ptr != NULL) {
2118     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2119            "Minimum block size requirement");
2120     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2121     fc->set_size(blk->_word_size);
2122     fc->link_prev(NULL);   // mark as free
2123     fc->dontCoalesce();
2124     assert(fc->is_free(), "just marked it free");
2125     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2126   }
2127 }
2128 
2129 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2130   assert_locked();
2131   if (_smallLinearAllocBlock._ptr == NULL) {
2132     assert(_smallLinearAllocBlock._word_size == 0,
2133       "Size of linAB should be zero if the ptr is NULL");
2134     // Reset the linAB refill and allocation size limit.
2135     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2136   }
2137   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2138 }
2139 
2140 void
2141 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2142   assert_locked();
2143   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2144          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2145          "blk invariant");
2146   if (blk->_ptr == NULL) {
2147     refillLinearAllocBlock(blk);
2148   }
2149 }
2150 
2151 void
2152 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2153   assert_locked();
2154   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2155          "linear allocation block should be empty");
2156   FreeChunk* fc;
2157   if (blk->_refillSize < SmallForDictionary &&
2158       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2159     // A linAB's strategy might be to use small sizes to reduce
2160     // fragmentation but still get the benefits of allocation from a
2161     // linAB.
2162   } else {
2163     fc = getChunkFromDictionary(blk->_refillSize);
2164   }
2165   if (fc != NULL) {
2166     blk->_ptr  = (HeapWord*)fc;
2167     blk->_word_size = fc->size();
2168     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2169   }
2170 }
2171 
2172 // Support for compaction
2173 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2174   scan_and_forward(this, cp);
2175   // Prepare_for_compaction() uses the space between live objects
2176   // so that later phase can skip dead space quickly.  So verification
2177   // of the free lists doesn't work after.
2178 }
2179 
2180 void CompactibleFreeListSpace::adjust_pointers() {
2181   // In other versions of adjust_pointers(), a bail out
2182   // based on the amount of live data in the generation
2183   // (i.e., if 0, bail out) may be used.
2184   // Cannot test used() == 0 here because the free lists have already
2185   // been mangled by the compaction.
2186 
2187   scan_and_adjust_pointers(this);
2188   // See note about verification in prepare_for_compaction().
2189 }
2190 
2191 void CompactibleFreeListSpace::compact() {
2192   scan_and_compact(this);
2193 }
2194 
2195 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2196 // where fbs is free block sizes
2197 double CompactibleFreeListSpace::flsFrag() const {
2198   size_t itabFree = totalSizeInIndexedFreeLists();
2199   double frag = 0.0;
2200   size_t i;
2201 
2202   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2203     double sz  = i;
2204     frag      += _indexedFreeList[i].count() * (sz * sz);
2205   }
2206 
2207   double totFree = itabFree +
2208                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2209   if (totFree > 0) {
2210     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2211             (totFree * totFree));
2212     frag = (double)1.0  - frag;
2213   } else {
2214     assert(frag == 0.0, "Follows from totFree == 0");
2215   }
2216   return frag;
2217 }
2218 
2219 void CompactibleFreeListSpace::beginSweepFLCensus(
2220   float inter_sweep_current,
2221   float inter_sweep_estimate,
2222   float intra_sweep_estimate) {
2223   assert_locked();
2224   size_t i;
2225   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2226     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2227     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2228     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2229     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2230     fl->set_before_sweep(fl->count());
2231     fl->set_bfr_surp(fl->surplus());
2232   }
2233   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2234                                     inter_sweep_current,
2235                                     inter_sweep_estimate,
2236                                     intra_sweep_estimate);
2237 }
2238 
2239 void CompactibleFreeListSpace::setFLSurplus() {
2240   assert_locked();
2241   size_t i;
2242   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2243     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2244     fl->set_surplus(fl->count() -
2245                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2246   }
2247 }
2248 
2249 void CompactibleFreeListSpace::setFLHints() {
2250   assert_locked();
2251   size_t i;
2252   size_t h = IndexSetSize;
2253   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2254     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2255     fl->set_hint(h);
2256     if (fl->surplus() > 0) {
2257       h = i;
2258     }
2259   }
2260 }
2261 
2262 void CompactibleFreeListSpace::clearFLCensus() {
2263   assert_locked();
2264   size_t i;
2265   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2266     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2267     fl->set_prev_sweep(fl->count());
2268     fl->set_coal_births(0);
2269     fl->set_coal_deaths(0);
2270     fl->set_split_births(0);
2271     fl->set_split_deaths(0);
2272   }
2273 }
2274 
2275 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2276   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2277   setFLSurplus();
2278   setFLHints();
2279   printFLCensus(sweep_count);
2280   clearFLCensus();
2281   assert_locked();
2282   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2283 }
2284 
2285 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2286   if (size < SmallForDictionary) {
2287     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2288     return (fl->coal_desired() < 0) ||
2289            ((int)fl->count() > fl->coal_desired());
2290   } else {
2291     return dictionary()->coal_dict_over_populated(size);
2292   }
2293 }
2294 
2295 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2296   assert(size < SmallForDictionary, "Size too large for indexed list");
2297   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2298   fl->increment_coal_births();
2299   fl->increment_surplus();
2300 }
2301 
2302 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2303   assert(size < SmallForDictionary, "Size too large for indexed list");
2304   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2305   fl->increment_coal_deaths();
2306   fl->decrement_surplus();
2307 }
2308 
2309 void CompactibleFreeListSpace::coalBirth(size_t size) {
2310   if (size  < SmallForDictionary) {
2311     smallCoalBirth(size);
2312   } else {
2313     dictionary()->dict_census_update(size,
2314                                    false /* split */,
2315                                    true /* birth */);
2316   }
2317 }
2318 
2319 void CompactibleFreeListSpace::coalDeath(size_t size) {
2320   if(size  < SmallForDictionary) {
2321     smallCoalDeath(size);
2322   } else {
2323     dictionary()->dict_census_update(size,
2324                                    false /* split */,
2325                                    false /* birth */);
2326   }
2327 }
2328 
2329 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2330   assert(size < SmallForDictionary, "Size too large for indexed list");
2331   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2332   fl->increment_split_births();
2333   fl->increment_surplus();
2334 }
2335 
2336 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2337   assert(size < SmallForDictionary, "Size too large for indexed list");
2338   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2339   fl->increment_split_deaths();
2340   fl->decrement_surplus();
2341 }
2342 
2343 void CompactibleFreeListSpace::split_birth(size_t size) {
2344   if (size  < SmallForDictionary) {
2345     smallSplitBirth(size);
2346   } else {
2347     dictionary()->dict_census_update(size,
2348                                    true /* split */,
2349                                    true /* birth */);
2350   }
2351 }
2352 
2353 void CompactibleFreeListSpace::splitDeath(size_t size) {
2354   if (size  < SmallForDictionary) {
2355     smallSplitDeath(size);
2356   } else {
2357     dictionary()->dict_census_update(size,
2358                                    true /* split */,
2359                                    false /* birth */);
2360   }
2361 }
2362 
2363 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2364   size_t to2 = from - to1;
2365   splitDeath(from);
2366   split_birth(to1);
2367   split_birth(to2);
2368 }
2369 
2370 void CompactibleFreeListSpace::print() const {
2371   print_on(tty);
2372 }
2373 
2374 void CompactibleFreeListSpace::prepare_for_verify() {
2375   assert_locked();
2376   repairLinearAllocationBlocks();
2377   // Verify that the SpoolBlocks look like free blocks of
2378   // appropriate sizes... To be done ...
2379 }
2380 
2381 class VerifyAllBlksClosure: public BlkClosure {
2382  private:
2383   const CompactibleFreeListSpace* _sp;
2384   const MemRegion                 _span;
2385   HeapWord*                       _last_addr;
2386   size_t                          _last_size;
2387   bool                            _last_was_obj;
2388   bool                            _last_was_live;
2389 
2390  public:
2391   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2392     MemRegion span) :  _sp(sp), _span(span),
2393                        _last_addr(NULL), _last_size(0),
2394                        _last_was_obj(false), _last_was_live(false) { }
2395 
2396   virtual size_t do_blk(HeapWord* addr) {
2397     size_t res;
2398     bool   was_obj  = false;
2399     bool   was_live = false;
2400     if (_sp->block_is_obj(addr)) {
2401       was_obj = true;
2402       oop p = oop(addr);
2403       guarantee(oopDesc::is_oop(p), "Should be an oop");
2404       res = _sp->adjustObjectSize(p->size());
2405       if (_sp->obj_is_alive(addr)) {
2406         was_live = true;
2407         p->verify();
2408       }
2409     } else {
2410       FreeChunk* fc = (FreeChunk*)addr;
2411       res = fc->size();
2412       if (FLSVerifyLists && !fc->cantCoalesce()) {
2413         guarantee(_sp->verify_chunk_in_free_list(fc),
2414                   "Chunk should be on a free list");
2415       }
2416     }
2417     if (res == 0) {
2418       Log(gc, verify) log;
2419       log.error("Livelock: no rank reduction!");
2420       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2421                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2422         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2423         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2424       LogStream ls(log.error());
2425       _sp->print_on(&ls);
2426       guarantee(false, "Verification failed.");
2427     }
2428     _last_addr = addr;
2429     _last_size = res;
2430     _last_was_obj  = was_obj;
2431     _last_was_live = was_live;
2432     return res;
2433   }
2434 };
2435 
2436 class VerifyAllOopsClosure: public OopClosure {
2437  private:
2438   const CMSCollector*             _collector;
2439   const CompactibleFreeListSpace* _sp;
2440   const MemRegion                 _span;
2441   const bool                      _past_remark;
2442   const CMSBitMap*                _bit_map;
2443 
2444  protected:
2445   void do_oop(void* p, oop obj) {
2446     if (_span.contains(obj)) { // the interior oop points into CMS heap
2447       if (!_span.contains(p)) { // reference from outside CMS heap
2448         // Should be a valid object; the first disjunct below allows
2449         // us to sidestep an assertion in block_is_obj() that insists
2450         // that p be in _sp. Note that several generations (and spaces)
2451         // are spanned by _span (CMS heap) above.
2452         guarantee(!_sp->is_in_reserved(obj) ||
2453                   _sp->block_is_obj((HeapWord*)obj),
2454                   "Should be an object");
2455         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2456         obj->verify();
2457         if (_past_remark) {
2458           // Remark has been completed, the object should be marked
2459           _bit_map->isMarked((HeapWord*)obj);
2460         }
2461       } else { // reference within CMS heap
2462         if (_past_remark) {
2463           // Remark has been completed -- so the referent should have
2464           // been marked, if referring object is.
2465           if (_bit_map->isMarked(_collector->block_start(p))) {
2466             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2467           }
2468         }
2469       }
2470     } else if (_sp->is_in_reserved(p)) {
2471       // the reference is from FLS, and points out of FLS
2472       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2473       obj->verify();
2474     }
2475   }
2476 
2477   template <class T> void do_oop_work(T* p) {
2478     T heap_oop = RawAccess<>::oop_load(p);
2479     if (!CompressedOops::is_null(heap_oop)) {
2480       oop obj = CompressedOops::decode_not_null(heap_oop);
2481       do_oop(p, obj);
2482     }
2483   }
2484 
2485  public:
2486   VerifyAllOopsClosure(const CMSCollector* collector,
2487     const CompactibleFreeListSpace* sp, MemRegion span,
2488     bool past_remark, CMSBitMap* bit_map) :
2489     _collector(collector), _sp(sp), _span(span),
2490     _past_remark(past_remark), _bit_map(bit_map) { }
2491 
2492   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2493   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2494 };
2495 
2496 void CompactibleFreeListSpace::verify() const {
2497   assert_lock_strong(&_freelistLock);
2498   verify_objects_initialized();
2499   MemRegion span = _collector->_span;
2500   bool past_remark = (_collector->abstract_state() ==
2501                       CMSCollector::Sweeping);
2502 
2503   ResourceMark rm;
2504   HandleMark  hm;
2505 
2506   // Check integrity of CFL data structures
2507   _promoInfo.verify();
2508   _dictionary->verify();
2509   if (FLSVerifyIndexTable) {
2510     verifyIndexedFreeLists();
2511   }
2512   // Check integrity of all objects and free blocks in space
2513   {
2514     VerifyAllBlksClosure cl(this, span);
2515     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2516   }
2517   // Check that all references in the heap to FLS
2518   // are to valid objects in FLS or that references in
2519   // FLS are to valid objects elsewhere in the heap
2520   if (FLSVerifyAllHeapReferences)
2521   {
2522     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2523       _collector->markBitMap());
2524 
2525     // Iterate over all oops in the heap. Uses the _no_header version
2526     // since we are not interested in following the klass pointers.
2527     CMSHeap::heap()->oop_iterate_no_header(&cl);
2528   }
2529 
2530   if (VerifyObjectStartArray) {
2531     // Verify the block offset table
2532     _bt.verify();
2533   }
2534 }
2535 
2536 #ifndef PRODUCT
2537 void CompactibleFreeListSpace::verifyFreeLists() const {
2538   if (FLSVerifyLists) {
2539     _dictionary->verify();
2540     verifyIndexedFreeLists();
2541   } else {
2542     if (FLSVerifyDictionary) {
2543       _dictionary->verify();
2544     }
2545     if (FLSVerifyIndexTable) {
2546       verifyIndexedFreeLists();
2547     }
2548   }
2549 }
2550 #endif
2551 
2552 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2553   size_t i = 0;
2554   for (; i < IndexSetStart; i++) {
2555     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2556   }
2557   for (; i < IndexSetSize; i++) {
2558     verifyIndexedFreeList(i);
2559   }
2560 }
2561 
2562 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2563   FreeChunk* fc   =  _indexedFreeList[size].head();
2564   FreeChunk* tail =  _indexedFreeList[size].tail();
2565   size_t    num = _indexedFreeList[size].count();
2566   size_t      n = 0;
2567   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2568             "Slot should have been empty");
2569   for (; fc != NULL; fc = fc->next(), n++) {
2570     guarantee(fc->size() == size, "Size inconsistency");
2571     guarantee(fc->is_free(), "!free?");
2572     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2573     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2574   }
2575   guarantee(n == num, "Incorrect count");
2576 }
2577 
2578 #ifndef PRODUCT
2579 void CompactibleFreeListSpace::check_free_list_consistency() const {
2580   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2581     "Some sizes can't be allocated without recourse to"
2582     " linear allocation buffers");
2583   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2584     "else MIN_TREE_CHUNK_SIZE is wrong");
2585   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2586   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2587 }
2588 #endif
2589 
2590 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2591   assert_lock_strong(&_freelistLock);
2592   LogTarget(Debug, gc, freelist, census) log;
2593   if (!log.is_enabled()) {
2594     return;
2595   }
2596   AdaptiveFreeList<FreeChunk> total;
2597   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2598   ResourceMark rm;
2599   LogStream ls(log);
2600   outputStream* out = &ls;
2601   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2602   size_t total_free = 0;
2603   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2604     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2605     total_free += fl->count() * fl->size();
2606     if (i % (40*IndexSetStride) == 0) {
2607       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2608     }
2609     fl->print_on(out);
2610     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2611     total.set_surplus(    total.surplus()     + fl->surplus()    );
2612     total.set_desired(    total.desired()     + fl->desired()    );
2613     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2614     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2615     total.set_count(      total.count()       + fl->count()      );
2616     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2617     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2618     total.set_split_births(total.split_births() + fl->split_births());
2619     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2620   }
2621   total.print_on(out, "TOTAL");
2622   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2623   log.print("growth: %8.5f  deficit: %8.5f",
2624             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2625                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2626             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2627   _dictionary->print_dict_census(out);
2628 }
2629 
2630 ///////////////////////////////////////////////////////////////////////////
2631 // CompactibleFreeListSpaceLAB
2632 ///////////////////////////////////////////////////////////////////////////
2633 
2634 #define VECTOR_257(x)                                                                                  \
2635   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2636   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2637      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2638      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2639      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2640      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2641      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2642      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2643      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2644      x }
2645 
2646 // Initialize with default setting for CMS, _not_
2647 // generic OldPLABSize, whose static default is different; if overridden at the
2648 // command-line, this will get reinitialized via a call to
2649 // modify_initialization() below.
2650 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2651   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2652 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2653 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2654 
2655 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2656   _cfls(cfls)
2657 {
2658   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2659   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2660        i < CompactibleFreeListSpace::IndexSetSize;
2661        i += CompactibleFreeListSpace::IndexSetStride) {
2662     _indexedFreeList[i].set_size(i);
2663     _num_blocks[i] = 0;
2664   }
2665 }
2666 
2667 static bool _CFLS_LAB_modified = false;
2668 
2669 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2670   assert(!_CFLS_LAB_modified, "Call only once");
2671   _CFLS_LAB_modified = true;
2672   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2673        i < CompactibleFreeListSpace::IndexSetSize;
2674        i += CompactibleFreeListSpace::IndexSetStride) {
2675     _blocks_to_claim[i].modify(n, wt, true /* force */);
2676   }
2677 }
2678 
2679 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2680   FreeChunk* res;
2681   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2682   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2683     // This locking manages sync with other large object allocations.
2684     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2685                     Mutex::_no_safepoint_check_flag);
2686     res = _cfls->getChunkFromDictionaryExact(word_sz);
2687     if (res == NULL) return NULL;
2688   } else {
2689     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2690     if (fl->count() == 0) {
2691       // Attempt to refill this local free list.
2692       get_from_global_pool(word_sz, fl);
2693       // If it didn't work, give up.
2694       if (fl->count() == 0) return NULL;
2695     }
2696     res = fl->get_chunk_at_head();
2697     assert(res != NULL, "Why was count non-zero?");
2698   }
2699   res->markNotFree();
2700   assert(!res->is_free(), "shouldn't be marked free");
2701   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2702   // mangle a just allocated object with a distinct pattern.
2703   debug_only(res->mangleAllocated(word_sz));
2704   return (HeapWord*)res;
2705 }
2706 
2707 // Get a chunk of blocks of the right size and update related
2708 // book-keeping stats
2709 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2710   // Get the #blocks we want to claim
2711   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2712   assert(n_blks > 0, "Error");
2713   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2714   // In some cases, when the application has a phase change,
2715   // there may be a sudden and sharp shift in the object survival
2716   // profile, and updating the counts at the end of a scavenge
2717   // may not be quick enough, giving rise to large scavenge pauses
2718   // during these phase changes. It is beneficial to detect such
2719   // changes on-the-fly during a scavenge and avoid such a phase-change
2720   // pothole. The following code is a heuristic attempt to do that.
2721   // It is protected by a product flag until we have gained
2722   // enough experience with this heuristic and fine-tuned its behavior.
2723   // WARNING: This might increase fragmentation if we overreact to
2724   // small spikes, so some kind of historical smoothing based on
2725   // previous experience with the greater reactivity might be useful.
2726   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2727   // default.
2728   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2729     //
2730     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2731     // which is why there is a cast to double.
2732     //
2733     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2734     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2735     n_blks = MIN2(n_blks, CMSOldPLABMax);
2736   }
2737   assert(n_blks > 0, "Error");
2738   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2739   // Update stats table entry for this block size
2740   _num_blocks[word_sz] += fl->count();
2741 }
2742 
2743 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2744   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2745        i < CompactibleFreeListSpace::IndexSetSize;
2746        i += CompactibleFreeListSpace::IndexSetStride) {
2747     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2748            "Counter inconsistency");
2749     if (_global_num_workers[i] > 0) {
2750       // Need to smooth wrt historical average
2751       if (ResizeOldPLAB) {
2752         _blocks_to_claim[i].sample(
2753           MAX2(CMSOldPLABMin,
2754           MIN2(CMSOldPLABMax,
2755                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2756       }
2757       // Reset counters for next round
2758       _global_num_workers[i] = 0;
2759       _global_num_blocks[i] = 0;
2760       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2761     }
2762   }
2763 }
2764 
2765 // If this is changed in the future to allow parallel
2766 // access, one would need to take the FL locks and,
2767 // depending on how it is used, stagger access from
2768 // parallel threads to reduce contention.
2769 void CompactibleFreeListSpaceLAB::retire(int tid) {
2770   // We run this single threaded with the world stopped;
2771   // so no need for locks and such.
2772   NOT_PRODUCT(Thread* t = Thread::current();)
2773   assert(Thread::current()->is_VM_thread(), "Error");
2774   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2775        i < CompactibleFreeListSpace::IndexSetSize;
2776        i += CompactibleFreeListSpace::IndexSetStride) {
2777     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2778            "Can't retire more than what we obtained");
2779     if (_num_blocks[i] > 0) {
2780       size_t num_retire =  _indexedFreeList[i].count();
2781       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2782       {
2783         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2784         //                Mutex::_no_safepoint_check_flag);
2785 
2786         // Update globals stats for num_blocks used
2787         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2788         _global_num_workers[i]++;
2789         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2790         if (num_retire > 0) {
2791           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2792           // Reset this list.
2793           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2794           _indexedFreeList[i].set_size(i);
2795         }
2796       }
2797       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2798                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2799       // Reset stats for next round
2800       _num_blocks[i]         = 0;
2801     }
2802   }
2803 }
2804 
2805 // Used by par_get_chunk_of_blocks() for the chunks from the
2806 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2807 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2808 // to the free list "fl".  "n" is the maximum number of chunks to
2809 // be added to "fl".
2810 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2811 
2812   // We'll try all multiples of word_sz in the indexed set, starting with
2813   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2814   // then try getting a big chunk and splitting it.
2815   {
2816     bool found;
2817     int  k;
2818     size_t cur_sz;
2819     for (k = 1, cur_sz = k * word_sz, found = false;
2820          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2821          (CMSSplitIndexedFreeListBlocks || k <= 1);
2822          k++, cur_sz = k * word_sz) {
2823       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2824       fl_for_cur_sz.set_size(cur_sz);
2825       {
2826         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2827                         Mutex::_no_safepoint_check_flag);
2828         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2829         if (gfl->count() != 0) {
2830           // nn is the number of chunks of size cur_sz that
2831           // we'd need to split k-ways each, in order to create
2832           // "n" chunks of size word_sz each.
2833           const size_t nn = MAX2(n/k, (size_t)1);
2834           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2835           found = true;
2836           if (k > 1) {
2837             // Update split death stats for the cur_sz-size blocks list:
2838             // we increment the split death count by the number of blocks
2839             // we just took from the cur_sz-size blocks list and which
2840             // we will be splitting below.
2841             ssize_t deaths = gfl->split_deaths() +
2842                              fl_for_cur_sz.count();
2843             gfl->set_split_deaths(deaths);
2844           }
2845         }
2846       }
2847       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2848       if (found) {
2849         if (k == 1) {
2850           fl->prepend(&fl_for_cur_sz);
2851         } else {
2852           // Divide each block on fl_for_cur_sz up k ways.
2853           FreeChunk* fc;
2854           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2855             // Must do this in reverse order, so that anybody attempting to
2856             // access the main chunk sees it as a single free block until we
2857             // change it.
2858             size_t fc_size = fc->size();
2859             assert(fc->is_free(), "Error");
2860             for (int i = k-1; i >= 0; i--) {
2861               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2862               assert((i != 0) ||
2863                         ((fc == ffc) && ffc->is_free() &&
2864                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2865                         "Counting error");
2866               ffc->set_size(word_sz);
2867               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2868               ffc->link_next(NULL);
2869               // Above must occur before BOT is updated below.
2870               OrderAccess::storestore();
2871               // splitting from the right, fc_size == i * word_sz
2872               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2873               fc_size -= word_sz;
2874               assert(fc_size == i*word_sz, "Error");
2875               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2876               _bt.verify_single_block((HeapWord*)fc, fc_size);
2877               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2878               // Push this on "fl".
2879               fl->return_chunk_at_head(ffc);
2880             }
2881             // TRAP
2882             assert(fl->tail()->next() == NULL, "List invariant.");
2883           }
2884         }
2885         // Update birth stats for this block size.
2886         size_t num = fl->count();
2887         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2888                         Mutex::_no_safepoint_check_flag);
2889         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2890         _indexedFreeList[word_sz].set_split_births(births);
2891         return true;
2892       }
2893     }
2894     return found;
2895   }
2896 }
2897 
2898 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2899 
2900   FreeChunk* fc = NULL;
2901   FreeChunk* rem_fc = NULL;
2902   size_t rem;
2903   {
2904     MutexLockerEx x(parDictionaryAllocLock(),
2905                     Mutex::_no_safepoint_check_flag);
2906     while (n > 0) {
2907       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2908       if (fc != NULL) {
2909         break;
2910       } else {
2911         n--;
2912       }
2913     }
2914     if (fc == NULL) return NULL;
2915     // Otherwise, split up that block.
2916     assert((ssize_t)n >= 1, "Control point invariant");
2917     assert(fc->is_free(), "Error: should be a free block");
2918     _bt.verify_single_block((HeapWord*)fc, fc->size());
2919     const size_t nn = fc->size() / word_sz;
2920     n = MIN2(nn, n);
2921     assert((ssize_t)n >= 1, "Control point invariant");
2922     rem = fc->size() - n * word_sz;
2923     // If there is a remainder, and it's too small, allocate one fewer.
2924     if (rem > 0 && rem < MinChunkSize) {
2925       n--; rem += word_sz;
2926     }
2927     // Note that at this point we may have n == 0.
2928     assert((ssize_t)n >= 0, "Control point invariant");
2929 
2930     // If n is 0, the chunk fc that was found is not large
2931     // enough to leave a viable remainder.  We are unable to
2932     // allocate even one block.  Return fc to the
2933     // dictionary and return, leaving "fl" empty.
2934     if (n == 0) {
2935       returnChunkToDictionary(fc);
2936       return NULL;
2937     }
2938 
2939     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2940     dictionary()->dict_census_update(fc->size(),
2941                                      true /*split*/,
2942                                      false /*birth*/);
2943 
2944     // First return the remainder, if any.
2945     // Note that we hold the lock until we decide if we're going to give
2946     // back the remainder to the dictionary, since a concurrent allocation
2947     // may otherwise see the heap as empty.  (We're willing to take that
2948     // hit if the block is a small block.)
2949     if (rem > 0) {
2950       size_t prefix_size = n * word_sz;
2951       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2952       rem_fc->set_size(rem);
2953       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2954       rem_fc->link_next(NULL);
2955       // Above must occur before BOT is updated below.
2956       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2957       OrderAccess::storestore();
2958       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2959       assert(fc->is_free(), "Error");
2960       fc->set_size(prefix_size);
2961       if (rem >= IndexSetSize) {
2962         returnChunkToDictionary(rem_fc);
2963         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2964         rem_fc = NULL;
2965       }
2966       // Otherwise, return it to the small list below.
2967     }
2968   }
2969   if (rem_fc != NULL) {
2970     MutexLockerEx x(_indexedFreeListParLocks[rem],
2971                     Mutex::_no_safepoint_check_flag);
2972     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2973     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2974     smallSplitBirth(rem);
2975   }
2976   assert(n * word_sz == fc->size(),
2977          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2978          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2979          fc->size(), n, word_sz);
2980   return fc;
2981 }
2982 
2983 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2984 
2985   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2986 
2987   if (fc == NULL) {
2988     return;
2989   }
2990 
2991   size_t n = fc->size() / word_sz;
2992 
2993   assert((ssize_t)n > 0, "Consistency");
2994   // Now do the splitting up.
2995   // Must do this in reverse order, so that anybody attempting to
2996   // access the main chunk sees it as a single free block until we
2997   // change it.
2998   size_t fc_size = n * word_sz;
2999   // All but first chunk in this loop
3000   for (ssize_t i = n-1; i > 0; i--) {
3001     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
3002     ffc->set_size(word_sz);
3003     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
3004     ffc->link_next(NULL);
3005     // Above must occur before BOT is updated below.
3006     OrderAccess::storestore();
3007     // splitting from the right, fc_size == (n - i + 1) * wordsize
3008     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
3009     fc_size -= word_sz;
3010     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
3011     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
3012     _bt.verify_single_block((HeapWord*)fc, fc_size);
3013     // Push this on "fl".
3014     fl->return_chunk_at_head(ffc);
3015   }
3016   // First chunk
3017   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
3018   // The blocks above should show their new sizes before the first block below
3019   fc->set_size(word_sz);
3020   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
3021   fc->link_next(NULL);
3022   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
3023   _bt.verify_single_block((HeapWord*)fc, fc->size());
3024   fl->return_chunk_at_head(fc);
3025 
3026   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
3027   {
3028     // Update the stats for this block size.
3029     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
3030                     Mutex::_no_safepoint_check_flag);
3031     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
3032     _indexedFreeList[word_sz].set_split_births(births);
3033     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
3034     // _indexedFreeList[word_sz].set_surplus(new_surplus);
3035   }
3036 
3037   // TRAP
3038   assert(fl->tail()->next() == NULL, "List invariant.");
3039 }
3040 
3041 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
3042   assert(fl->count() == 0, "Precondition.");
3043   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
3044          "Precondition");
3045 
3046   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
3047     // Got it
3048     return;
3049   }
3050 
3051   // Otherwise, we'll split a block from the dictionary.
3052   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
3053 }
3054 
3055 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
3056   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
3057   return ergo_max;
3058 }
3059 
3060 // Set up the space's par_seq_tasks structure for work claiming
3061 // for parallel rescan. See CMSParRemarkTask where this is currently used.
3062 // XXX Need to suitably abstract and generalize this and the next
3063 // method into one.
3064 void
3065 CompactibleFreeListSpace::
3066 initialize_sequential_subtasks_for_rescan(int n_threads) {
3067   // The "size" of each task is fixed according to rescan_task_size.
3068   assert(n_threads > 0, "Unexpected n_threads argument");
3069   const size_t task_size = rescan_task_size();
3070   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
3071   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
3072   assert(n_tasks == 0 ||
3073          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3074           (used_region().start() + n_tasks*task_size >= used_region().end())),
3075          "n_tasks calculation incorrect");
3076   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3077   assert(!pst->valid(), "Clobbering existing data?");
3078   // Sets the condition for completion of the subtask (how many threads
3079   // need to finish in order to be done).
3080   pst->set_n_threads(n_threads);
3081   pst->set_n_tasks((int)n_tasks);
3082 }
3083 
3084 // Set up the space's par_seq_tasks structure for work claiming
3085 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3086 void
3087 CompactibleFreeListSpace::
3088 initialize_sequential_subtasks_for_marking(int n_threads,
3089                                            HeapWord* low) {
3090   // The "size" of each task is fixed according to rescan_task_size.
3091   assert(n_threads > 0, "Unexpected n_threads argument");
3092   const size_t task_size = marking_task_size();
3093   assert(task_size > CardTable::card_size_in_words &&
3094          (task_size %  CardTable::card_size_in_words == 0),
3095          "Otherwise arithmetic below would be incorrect");
3096   MemRegion span = _old_gen->reserved();
3097   if (low != NULL) {
3098     if (span.contains(low)) {
3099       // Align low down to  a card boundary so that
3100       // we can use block_offset_careful() on span boundaries.
3101       HeapWord* aligned_low = align_down(low, CardTable::card_size);
3102       // Clip span prefix at aligned_low
3103       span = span.intersection(MemRegion(aligned_low, span.end()));
3104     } else if (low > span.end()) {
3105       span = MemRegion(low, low);  // Null region
3106     } // else use entire span
3107   }
3108   assert(span.is_empty() ||
3109          ((uintptr_t)span.start() %  CardTable::card_size == 0),
3110         "span should start at a card boundary");
3111   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3112   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3113   assert(n_tasks == 0 ||
3114          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3115           (span.start() + n_tasks*task_size >= span.end())),
3116          "n_tasks calculation incorrect");
3117   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3118   assert(!pst->valid(), "Clobbering existing data?");
3119   // Sets the condition for completion of the subtask (how many threads
3120   // need to finish in order to be done).
3121   pst->set_n_threads(n_threads);
3122   pst->set_n_tasks((int)n_tasks);
3123 }