1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  87                 Monitor::_safepoint_check_sometimes),
  88   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  89                           "CompactibleFreeListSpace._dict_par_lock", true,
  90                           Monitor::_safepoint_check_never),
  91   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSRescanMultiple),
  93   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  94                     CMSConcMarkMultiple),
  95   _collector(NULL),
  96   _preconsumptionDirtyCardClosure(NULL)
  97 {
  98   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  99          "FreeChunk is larger than expected");
 100   _bt.set_space(this);
 101   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 102   // We have all of "mr", all of which we place in the dictionary
 103   // as one big chunk. We'll need to decide here which of several
 104   // possible alternative dictionary implementations to use. For
 105   // now the choice is easy, since we have only one working
 106   // implementation, namely, the simple binary tree (splaying
 107   // temporarily disabled).
 108   switch (dictionaryChoice) {
 109     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 110       _dictionary = new AFLBinaryTreeDictionary(mr);
 111       break;
 112     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 113     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 114     default:
 115       warning("dictionaryChoice: selected option not understood; using"
 116               " default BinaryTreeDictionary implementation instead.");
 117   }
 118   assert(_dictionary != NULL, "CMS dictionary initialization");
 119   // The indexed free lists are initially all empty and are lazily
 120   // filled in on demand. Initialize the array elements to NULL.
 121   initializeIndexedFreeListArray();
 122 
 123   // Not using adaptive free lists assumes that allocation is first
 124   // from the linAB's.  Also a cms perm gen which can be compacted
 125   // has to have the klass's klassKlass allocated at a lower
 126   // address in the heap than the klass so that the klassKlass is
 127   // moved to its new location before the klass is moved.
 128   // Set the _refillSize for the linear allocation blocks
 129   if (!use_adaptive_freelists) {
 130     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 131                                            FreeBlockDictionary<FreeChunk>::atLeast);
 132     // The small linAB initially has all the space and will allocate
 133     // a chunk of any size.
 134     HeapWord* addr = (HeapWord*) fc;
 135     _smallLinearAllocBlock.set(addr, fc->size() ,
 136       1024*SmallForLinearAlloc, fc->size());
 137     // Note that _unallocated_block is not updated here.
 138     // Allocations from the linear allocation block should
 139     // update it.
 140   } else {
 141     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 142                                SmallForLinearAlloc);
 143   }
 144   // CMSIndexedFreeListReplenish should be at least 1
 145   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 146   _promoInfo.setSpace(this);
 147   if (UseCMSBestFit) {
 148     _fitStrategy = FreeBlockBestFitFirst;
 149   } else {
 150     _fitStrategy = FreeBlockStrategyNone;
 151   }
 152   check_free_list_consistency();
 153 
 154   // Initialize locks for parallel case.
 155   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 156     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 157                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 158     DEBUG_ONLY(
 159       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160     )
 161   }
 162   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163 }
 164 
 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
 166 // update the block offset table.  Removed initialize_threshold call because
 167 // CFLS does not use a block offset array for contiguous spaces.
 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 169                                     CompactPoint* cp, HeapWord* compact_top) {
 170   // q is alive
 171   // First check if we should switch compaction space
 172   assert(this == cp->space, "'this' should be current compaction space.");
 173   size_t compaction_max_size = pointer_delta(end(), compact_top);
 174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 175     "virtual adjustObjectSize_v() method is not correct");
 176   size_t adjusted_size = adjustObjectSize(size);
 177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 178          "no small fragments allowed");
 179   assert(minimum_free_block_size() == MinChunkSize,
 180          "for de-virtualized reference below");
 181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 182   if (adjusted_size + MinChunkSize > compaction_max_size &&
 183       adjusted_size != compaction_max_size) {
 184     do {
 185       // switch to next compaction space
 186       cp->space->set_compaction_top(compact_top);
 187       cp->space = cp->space->next_compaction_space();
 188       if (cp->space == NULL) {
 189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 190         assert(cp->gen != NULL, "compaction must succeed");
 191         cp->space = cp->gen->first_compaction_space();
 192         assert(cp->space != NULL, "generation must have a first compaction space");
 193       }
 194       compact_top = cp->space->bottom();
 195       cp->space->set_compaction_top(compact_top);
 196       // The correct adjusted_size may not be the same as that for this method
 197       // (i.e., cp->space may no longer be "this" so adjust the size again.
 198       // Use the virtual method which is not used above to save the virtual
 199       // dispatch.
 200       adjusted_size = cp->space->adjust_object_size_v(size);
 201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 203     } while (adjusted_size > compaction_max_size);
 204   }
 205 
 206   // store the forwarding pointer into the mark word
 207   if ((HeapWord*)q != compact_top) {
 208     q->forward_to(oop(compact_top));
 209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 210   } else {
 211     // if the object isn't moving we can just set the mark to the default
 212     // mark and handle it specially later on.
 213     q->init_mark();
 214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 215   }
 216 
 217   compact_top += adjusted_size;
 218 
 219   // we need to update the offset table so that the beginnings of objects can be
 220   // found during scavenge.  Note that we are updating the offset table based on
 221   // where the object will be once the compaction phase finishes.
 222 
 223   // Always call cross_threshold().  A contiguous space can only call it when
 224   // the compaction_top exceeds the current threshold but not for an
 225   // non-contiguous space.
 226   cp->threshold =
 227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 228   return compact_top;
 229 }
 230 
 231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 232 // and use of single_block instead of alloc_block.  The name here is not really
 233 // appropriate - maybe a more general name could be invented for both the
 234 // contiguous and noncontiguous spaces.
 235 
 236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 237   _bt.single_block(start, the_end);
 238   return end();
 239 }
 240 
 241 // Initialize them to NULL.
 242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 243   for (size_t i = 0; i < IndexSetSize; i++) {
 244     // Note that on platforms where objects are double word aligned,
 245     // the odd array elements are not used.  It is convenient, however,
 246     // to map directly from the object size to the array element.
 247     _indexedFreeList[i].reset(IndexSetSize);
 248     _indexedFreeList[i].set_size(i);
 249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 253   }
 254 }
 255 
 256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 257   for (size_t i = 1; i < IndexSetSize; i++) {
 258     assert(_indexedFreeList[i].size() == (size_t) i,
 259       "Indexed free list sizes are incorrect");
 260     _indexedFreeList[i].reset(IndexSetSize);
 261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 265   }
 266 }
 267 
 268 void CompactibleFreeListSpace::reset(MemRegion mr) {
 269   resetIndexedFreeListArray();
 270   dictionary()->reset();
 271   if (BlockOffsetArrayUseUnallocatedBlock) {
 272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 273     // Everything's allocated until proven otherwise.
 274     _bt.set_unallocated_block(end());
 275   }
 276   if (!mr.is_empty()) {
 277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 278     _bt.single_block(mr.start(), mr.word_size());
 279     FreeChunk* fc = (FreeChunk*) mr.start();
 280     fc->set_size(mr.word_size());
 281     if (mr.word_size() >= IndexSetSize ) {
 282       returnChunkToDictionary(fc);
 283     } else {
 284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 286     }
 287     coalBirth(mr.word_size());
 288   }
 289   _promoInfo.reset();
 290   _smallLinearAllocBlock._ptr = NULL;
 291   _smallLinearAllocBlock._word_size = 0;
 292 }
 293 
 294 void CompactibleFreeListSpace::reset_after_compaction() {
 295   // Reset the space to the new reality - one free chunk.
 296   MemRegion mr(compaction_top(), end());
 297   reset(mr);
 298   // Now refill the linear allocation block(s) if possible.
 299   if (_adaptive_freelists) {
 300     refillLinearAllocBlocksIfNeeded();
 301   } else {
 302     // Place as much of mr in the linAB as we can get,
 303     // provided it was big enough to go into the dictionary.
 304     FreeChunk* fc = dictionary()->find_largest_dict();
 305     if (fc != NULL) {
 306       assert(fc->size() == mr.word_size(),
 307              "Why was the chunk broken up?");
 308       removeChunkFromDictionary(fc);
 309       HeapWord* addr = (HeapWord*) fc;
 310       _smallLinearAllocBlock.set(addr, fc->size() ,
 311         1024*SmallForLinearAlloc, fc->size());
 312       // Note that _unallocated_block is not updated here.
 313     }
 314   }
 315 }
 316 
 317 // Walks the entire dictionary, returning a coterminal
 318 // chunk, if it exists. Use with caution since it involves
 319 // a potentially complete walk of a potentially large tree.
 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 321 
 322   assert_lock_strong(&_freelistLock);
 323 
 324   return dictionary()->find_chunk_ends_at(end());
 325 }
 326 
 327 
 328 #ifndef PRODUCT
 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 332   }
 333 }
 334 
 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 336   size_t sum = 0;
 337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 339   }
 340   return sum;
 341 }
 342 
 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 344   size_t count = 0;
 345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 346     debug_only(
 347       ssize_t total_list_count = 0;
 348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 349          fc = fc->next()) {
 350         total_list_count++;
 351       }
 352       assert(total_list_count ==  _indexedFreeList[i].count(),
 353         "Count in list is incorrect");
 354     )
 355     count += _indexedFreeList[i].count();
 356   }
 357   return count;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::totalCount() {
 361   size_t num = totalCountInIndexedFreeLists();
 362   num +=  dictionary()->total_count();
 363   if (_smallLinearAllocBlock._word_size != 0) {
 364     num++;
 365   }
 366   return num;
 367 }
 368 #endif
 369 
 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 371   FreeChunk* fc = (FreeChunk*) p;
 372   return fc->is_free();
 373 }
 374 
 375 size_t CompactibleFreeListSpace::used() const {
 376   return capacity() - free();
 377 }
 378 
 379 size_t CompactibleFreeListSpace::free() const {
 380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 381   // if you do this while the structures are in flux you
 382   // may get an approximate answer only; for instance
 383   // because there is concurrent allocation either
 384   // directly by mutators or for promotion during a GC.
 385   // It's "MT-safe", however, in the sense that you are guaranteed
 386   // not to crash and burn, for instance, because of walking
 387   // pointers that could disappear as you were walking them.
 388   // The approximation is because the various components
 389   // that are read below are not read atomically (and
 390   // further the computation of totalSizeInIndexedFreeLists()
 391   // is itself a non-atomic computation. The normal use of
 392   // this is during a resize operation at the end of GC
 393   // and at that time you are guaranteed to get the
 394   // correct actual value. However, for instance, this is
 395   // also read completely asynchronously by the "perf-sampler"
 396   // that supports jvmstat, and you are apt to see the values
 397   // flicker in such cases.
 398   assert(_dictionary != NULL, "No _dictionary?");
 399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 400           totalSizeInIndexedFreeLists() +
 401           _smallLinearAllocBlock._word_size) * HeapWordSize;
 402 }
 403 
 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 405   assert(_dictionary != NULL, "No _dictionary?");
 406   assert_locked();
 407   size_t res = _dictionary->max_chunk_size();
 408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 409                        (size_t) SmallForLinearAlloc - 1));
 410   // XXX the following could potentially be pretty slow;
 411   // should one, pessimistically for the rare cases when res
 412   // calculated above is less than IndexSetSize,
 413   // just return res calculated above? My reasoning was that
 414   // those cases will be so rare that the extra time spent doesn't
 415   // really matter....
 416   // Note: do not change the loop test i >= res + IndexSetStride
 417   // to i > res below, because i is unsigned and res may be zero.
 418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 419        i -= IndexSetStride) {
 420     if (_indexedFreeList[i].head() != NULL) {
 421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 422       return i;
 423     }
 424   }
 425   return res;
 426 }
 427 
 428 void LinearAllocBlock::print_on(outputStream* st) const {
 429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 431             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 432 }
 433 
 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 435   st->print_cr("COMPACTIBLE FREELIST SPACE");
 436   st->print_cr(" Space:");
 437   Space::print_on(st);
 438 
 439   st->print_cr("promoInfo:");
 440   _promoInfo.print_on(st);
 441 
 442   st->print_cr("_smallLinearAllocBlock");
 443   _smallLinearAllocBlock.print_on(st);
 444 
 445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 446 
 447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 449 }
 450 
 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 452 const {
 453   reportIndexedFreeListStatistics();
 454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 455   gclog_or_tty->print_cr("---------------------------");
 456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 458     _indexedFreeList[i].print_on(gclog_or_tty);
 459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 460          fc = fc->next()) {
 461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 462                           p2i(fc), p2i((HeapWord*)fc + i),
 463                           fc->cantCoalesce() ? "\t CC" : "");
 464     }
 465   }
 466 }
 467 
 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 469 const {
 470   _promoInfo.print_on(st);
 471 }
 472 
 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 474 const {
 475   _dictionary->report_statistics();
 476   st->print_cr("Layout of Freelists in Tree");
 477   st->print_cr("---------------------------");
 478   _dictionary->print_free_lists(st);
 479 }
 480 
 481 class BlkPrintingClosure: public BlkClosure {
 482   const CMSCollector*             _collector;
 483   const CompactibleFreeListSpace* _sp;
 484   const CMSBitMap*                _live_bit_map;
 485   const bool                      _post_remark;
 486   outputStream*                   _st;
 487 public:
 488   BlkPrintingClosure(const CMSCollector* collector,
 489                      const CompactibleFreeListSpace* sp,
 490                      const CMSBitMap* live_bit_map,
 491                      outputStream* st):
 492     _collector(collector),
 493     _sp(sp),
 494     _live_bit_map(live_bit_map),
 495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 496     _st(st) { }
 497   size_t do_blk(HeapWord* addr);
 498 };
 499 
 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 501   size_t sz = _sp->block_size_no_stall(addr, _collector);
 502   assert(sz != 0, "Should always be able to compute a size");
 503   if (_sp->block_is_obj(addr)) {
 504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 506       p2i(addr),
 507       dead ? "dead" : "live",
 508       sz,
 509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 510     if (CMSPrintObjectsInDump && !dead) {
 511       oop(addr)->print_on(_st);
 512       _st->print_cr("--------------------------------------");
 513     }
 514   } else { // free block
 515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 516       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 517     if (CMSPrintChunksInDump) {
 518       ((FreeChunk*)addr)->print_on(_st);
 519       _st->print_cr("--------------------------------------");
 520     }
 521   }
 522   return sz;
 523 }
 524 
 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 526   outputStream* st) {
 527   st->print_cr("\n=========================");
 528   st->print_cr("Block layout in CMS Heap:");
 529   st->print_cr("=========================");
 530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 531   blk_iterate(&bpcl);
 532 
 533   st->print_cr("\n=======================================");
 534   st->print_cr("Order & Layout of Promotion Info Blocks");
 535   st->print_cr("=======================================");
 536   print_promo_info_blocks(st);
 537 
 538   st->print_cr("\n===========================");
 539   st->print_cr("Order of Indexed Free Lists");
 540   st->print_cr("=========================");
 541   print_indexed_free_lists(st);
 542 
 543   st->print_cr("\n=================================");
 544   st->print_cr("Order of Free Lists in Dictionary");
 545   st->print_cr("=================================");
 546   print_dictionary_free_lists(st);
 547 }
 548 
 549 
 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 551   assert_lock_strong(&_freelistLock);
 552   assert(PrintFLSStatistics != 0, "Reporting error");
 553   _dictionary->report_statistics();
 554   if (PrintFLSStatistics > 1) {
 555     reportIndexedFreeListStatistics();
 556     size_t total_size = totalSizeInIndexedFreeLists() +
 557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 558     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 559   }
 560 }
 561 
 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 563   assert_lock_strong(&_freelistLock);
 564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 565                       "--------------------------------\n");
 566   size_t total_size = totalSizeInIndexedFreeLists();
 567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 568   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 569   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 570   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 571   if (free_blocks != 0) {
 572     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 573   }
 574 }
 575 
 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 577   size_t res = 0;
 578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 579     debug_only(
 580       ssize_t recount = 0;
 581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 582          fc = fc->next()) {
 583         recount += 1;
 584       }
 585       assert(recount == _indexedFreeList[i].count(),
 586         "Incorrect count in list");
 587     )
 588     res += _indexedFreeList[i].count();
 589   }
 590   return res;
 591 }
 592 
 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 595     if (_indexedFreeList[i].head() != NULL) {
 596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 597       return (size_t)i;
 598     }
 599   }
 600   return 0;
 601 }
 602 
 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 604   HeapWord* prevEnd = end();
 605   assert(prevEnd != value, "unnecessary set_end call");
 606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 607         "New end is below unallocated block");
 608   _end = value;
 609   if (prevEnd != NULL) {
 610     // Resize the underlying block offset table.
 611     _bt.resize(pointer_delta(value, bottom()));
 612     if (value <= prevEnd) {
 613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 614              "New end is below unallocated block");
 615     } else {
 616       // Now, take this new chunk and add it to the free blocks.
 617       // Note that the BOT has not yet been updated for this block.
 618       size_t newFcSize = pointer_delta(value, prevEnd);
 619       // XXX This is REALLY UGLY and should be fixed up. XXX
 620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 621         // Mark the boundary of the new block in BOT
 622         _bt.mark_block(prevEnd, value);
 623         // put it all in the linAB
 624         MutexLockerEx x(parDictionaryAllocLock(),
 625                         Mutex::_no_safepoint_check_flag);
 626         _smallLinearAllocBlock._ptr = prevEnd;
 627         _smallLinearAllocBlock._word_size = newFcSize;
 628         repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         // Births of chunks put into a LinAB are not recorded.  Births
 630         // of chunks as they are allocated out of a LinAB are.
 631       } else {
 632         // Add the block to the free lists, if possible coalescing it
 633         // with the last free block, and update the BOT and census data.
 634         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 635       }
 636     }
 637   }
 638 }
 639 
 640 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 641   CompactibleFreeListSpace* _cfls;
 642   CMSCollector* _collector;
 643 protected:
 644   // Override.
 645 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 646   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 647                                        HeapWord* bottom, HeapWord* top, \
 648                                        ClosureType* cl);                \
 649       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 650                                        HeapWord* bottom, HeapWord* top, \
 651                                        ClosureType* cl);                \
 652     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl)
 655   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 656   walk_mem_region_with_cl_DECL(FilteringClosure);
 657 
 658 public:
 659   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 660                       CMSCollector* collector,
 661                       ExtendedOopClosure* cl,
 662                       CardTableModRefBS::PrecisionStyle precision,
 663                       HeapWord* boundary) :
 664     Filtering_DCTOC(sp, cl, precision, boundary),
 665     _cfls(sp), _collector(collector) {}
 666 };
 667 
 668 // We de-virtualize the block-related calls below, since we know that our
 669 // space is a CompactibleFreeListSpace.
 670 
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 677    if (is_par) {                                                                \
 678      assert(SharedHeap::heap()->n_par_threads() ==                              \
 679             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \
 700   while (bottom < top) {                                                        \
 701     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 702         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 703                     oop(bottom)) &&                                             \
 704         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 705       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 706       bottom += _cfls->adjustObjectSize(word_sz);                               \
 707     } else {                                                                    \
 708       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 709     }                                                                           \
 710   }                                                                             \
 711 }                                                                               \
 712 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 713                                                         HeapWord* bottom,       \
 714                                                         HeapWord* top,          \
 715                                                         ClosureType* cl) {      \
 716   /* Skip parts that are before "mr", in case "block_start" sent us             \
 717      back too far. */                                                           \
 718   HeapWord* mr_start = mr.start();                                              \
 719   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 720   HeapWord* next = bottom + bot_size;                                           \
 721   while (next < mr_start) {                                                     \
 722     bottom = next;                                                              \
 723     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 724     next = bottom + bot_size;                                                   \
 725   }                                                                             \
 726                                                                                 \
 727   while (bottom < top) {                                                        \
 728     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 729         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 730                     oop(bottom)) &&                                             \
 731         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 732       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 733       bottom += _cfls->adjustObjectSize(word_sz);                               \
 734     } else {                                                                    \
 735       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 736     }                                                                           \
 737   }                                                                             \
 738 }
 739 
 740 // (There are only two of these, rather than N, because the split is due
 741 // only to the introduction of the FilteringClosure, a local part of the
 742 // impl of this abstraction.)
 743 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 745 
 746 DirtyCardToOopClosure*
 747 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 748                                       CardTableModRefBS::PrecisionStyle precision,
 749                                       HeapWord* boundary) {
 750   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 751 }
 752 
 753 
 754 // Note on locking for the space iteration functions:
 755 // since the collector's iteration activities are concurrent with
 756 // allocation activities by mutators, absent a suitable mutual exclusion
 757 // mechanism the iterators may go awry. For instance a block being iterated
 758 // may suddenly be allocated or divided up and part of it allocated and
 759 // so on.
 760 
 761 // Apply the given closure to each block in the space.
 762 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 763   assert_lock_strong(freelistLock());
 764   HeapWord *cur, *limit;
 765   size_t res;
 766   for (cur = bottom(), limit = end(); cur < limit; cur += res) {
 767     res = cl->do_blk_careful(cur);
 768     assert(cur + res > cur, "Not monotonically increasing ?");
 769   }
 770 }
 771 
 772 // Apply the given closure to each block in the space.
 773 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 774   assert_lock_strong(freelistLock());
 775   HeapWord *cur, *limit;
 776   for (cur = bottom(), limit = end(); cur < limit;
 777        cur += cl->do_blk(cur));
 778 }
 779 
 780 // Apply the given closure to each oop in the space.
 781 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 782   assert_lock_strong(freelistLock());
 783   HeapWord *cur, *limit;
 784   size_t curSize;
 785   for (cur = bottom(), limit = end(); cur < limit;
 786        cur += curSize) {
 787     curSize = block_size(cur);
 788     if (block_is_obj(cur)) {
 789       oop(cur)->oop_iterate(cl);
 790     }
 791   }
 792 }
 793 
 794 // NOTE: In the following methods, in order to safely be able to
 795 // apply the closure to an object, we need to be sure that the
 796 // object has been initialized. We are guaranteed that an object
 797 // is initialized if we are holding the Heap_lock with the
 798 // world stopped.
 799 void CompactibleFreeListSpace::verify_objects_initialized() const {
 800   if (is_init_completed()) {
 801     assert_locked_or_safepoint(Heap_lock);
 802     if (Universe::is_fully_initialized()) {
 803       guarantee(SafepointSynchronize::is_at_safepoint(),
 804                 "Required for objects to be initialized");
 805     }
 806   } // else make a concession at vm start-up
 807 }
 808 
 809 // Apply the given closure to each object in the space
 810 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 811   assert_lock_strong(freelistLock());
 812   NOT_PRODUCT(verify_objects_initialized());
 813   HeapWord *cur, *limit;
 814   size_t curSize;
 815   for (cur = bottom(), limit = end(); cur < limit;
 816        cur += curSize) {
 817     curSize = block_size(cur);
 818     if (block_is_obj(cur)) {
 819       blk->do_object(oop(cur));
 820     }
 821   }
 822 }
 823 
 824 // Apply the given closure to each live object in the space
 825 //   The usage of CompactibleFreeListSpace
 826 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 827 // objects in the space with references to objects that are no longer
 828 // valid.  For example, an object may reference another object
 829 // that has already been sweep up (collected).  This method uses
 830 // obj_is_alive() to determine whether it is safe to apply the closure to
 831 // an object.  See obj_is_alive() for details on how liveness of an
 832 // object is decided.
 833 
 834 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 835   assert_lock_strong(freelistLock());
 836   NOT_PRODUCT(verify_objects_initialized());
 837   HeapWord *cur, *limit;
 838   size_t curSize;
 839   for (cur = bottom(), limit = end(); cur < limit;
 840        cur += curSize) {
 841     curSize = block_size(cur);
 842     if (block_is_obj(cur) && obj_is_alive(cur)) {
 843       blk->do_object(oop(cur));
 844     }
 845   }
 846 }
 847 
 848 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 849                                                   UpwardsObjectClosure* cl) {
 850   assert_locked(freelistLock());
 851   NOT_PRODUCT(verify_objects_initialized());
 852   assert(!mr.is_empty(), "Should be non-empty");
 853   // We use MemRegion(bottom(), end()) rather than used_region() below
 854   // because the two are not necessarily equal for some kinds of
 855   // spaces, in particular, certain kinds of free list spaces.
 856   // We could use the more complicated but more precise:
 857   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 858   // but the slight imprecision seems acceptable in the assertion check.
 859   assert(MemRegion(bottom(), end()).contains(mr),
 860          "Should be within used space");
 861   HeapWord* prev = cl->previous();   // max address from last time
 862   if (prev >= mr.end()) { // nothing to do
 863     return;
 864   }
 865   // This assert will not work when we go from cms space to perm
 866   // space, and use same closure. Easy fix deferred for later. XXX YSR
 867   // assert(prev == NULL || contains(prev), "Should be within space");
 868 
 869   bool last_was_obj_array = false;
 870   HeapWord *blk_start_addr, *region_start_addr;
 871   if (prev > mr.start()) {
 872     region_start_addr = prev;
 873     blk_start_addr    = prev;
 874     // The previous invocation may have pushed "prev" beyond the
 875     // last allocated block yet there may be still be blocks
 876     // in this region due to a particular coalescing policy.
 877     // Relax the assertion so that the case where the unallocated
 878     // block is maintained and "prev" is beyond the unallocated
 879     // block does not cause the assertion to fire.
 880     assert((BlockOffsetArrayUseUnallocatedBlock &&
 881             (!is_in(prev))) ||
 882            (blk_start_addr == block_start(region_start_addr)), "invariant");
 883   } else {
 884     region_start_addr = mr.start();
 885     blk_start_addr    = block_start(region_start_addr);
 886   }
 887   HeapWord* region_end_addr = mr.end();
 888   MemRegion derived_mr(region_start_addr, region_end_addr);
 889   while (blk_start_addr < region_end_addr) {
 890     const size_t size = block_size(blk_start_addr);
 891     if (block_is_obj(blk_start_addr)) {
 892       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 893     } else {
 894       last_was_obj_array = false;
 895     }
 896     blk_start_addr += size;
 897   }
 898   if (!last_was_obj_array) {
 899     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 900            "Should be within (closed) used space");
 901     assert(blk_start_addr > prev, "Invariant");
 902     cl->set_previous(blk_start_addr); // min address for next time
 903   }
 904 }
 905 
 906 
 907 // Callers of this iterator beware: The closure application should
 908 // be robust in the face of uninitialized objects and should (always)
 909 // return a correct size so that the next addr + size below gives us a
 910 // valid block boundary. [See for instance,
 911 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 912 // in ConcurrentMarkSweepGeneration.cpp.]
 913 HeapWord*
 914 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 915   ObjectClosureCareful* cl) {
 916   assert_lock_strong(freelistLock());
 917   // Can't use used_region() below because it may not necessarily
 918   // be the same as [bottom(),end()); although we could
 919   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 920   // that appears too cumbersome, so we just do the simpler check
 921   // in the assertion below.
 922   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 923          "mr should be non-empty and within used space");
 924   HeapWord *addr, *end;
 925   size_t size;
 926   for (addr = block_start_careful(mr.start()), end  = mr.end();
 927        addr < end; addr += size) {
 928     FreeChunk* fc = (FreeChunk*)addr;
 929     if (fc->is_free()) {
 930       // Since we hold the free list lock, which protects direct
 931       // allocation in this generation by mutators, a free object
 932       // will remain free throughout this iteration code.
 933       size = fc->size();
 934     } else {
 935       // Note that the object need not necessarily be initialized,
 936       // because (for instance) the free list lock does NOT protect
 937       // object initialization. The closure application below must
 938       // therefore be correct in the face of uninitialized objects.
 939       size = cl->do_object_careful_m(oop(addr), mr);
 940       if (size == 0) {
 941         // An unparsable object found. Signal early termination.
 942         return addr;
 943       }
 944     }
 945   }
 946   return NULL;
 947 }
 948 
 949 
 950 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 951   NOT_PRODUCT(verify_objects_initialized());
 952   return _bt.block_start(p);
 953 }
 954 
 955 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 956   return _bt.block_start_careful(p);
 957 }
 958 
 959 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 960   NOT_PRODUCT(verify_objects_initialized());
 961   // This must be volatile, or else there is a danger that the compiler
 962   // will compile the code below into a sometimes-infinite loop, by keeping
 963   // the value read the first time in a register.
 964   while (true) {
 965     // We must do this until we get a consistent view of the object.
 966     if (FreeChunk::indicatesFreeChunk(p)) {
 967       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 968       size_t res = fc->size();
 969 
 970       // Bugfix for systems with weak memory model (PPC64/IA64). The
 971       // block's free bit was set and we have read the size of the
 972       // block. Acquire and check the free bit again. If the block is
 973       // still free, the read size is correct.
 974       OrderAccess::acquire();
 975 
 976       // If the object is still a free chunk, return the size, else it
 977       // has been allocated so try again.
 978       if (FreeChunk::indicatesFreeChunk(p)) {
 979         assert(res != 0, "Block size should not be 0");
 980         return res;
 981       }
 982     } else {
 983       // must read from what 'p' points to in each loop.
 984       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 985       if (k != NULL) {
 986         assert(k->is_klass(), "Should really be klass oop.");
 987         oop o = (oop)p;
 988         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 989 
 990         // Bugfix for systems with weak memory model (PPC64/IA64).
 991         // The object o may be an array. Acquire to make sure that the array
 992         // size (third word) is consistent.
 993         OrderAccess::acquire();
 994 
 995         size_t res = o->size_given_klass(k);
 996         res = adjustObjectSize(res);
 997         assert(res != 0, "Block size should not be 0");
 998         return res;
 999       }
1000     }
1001   }
1002 }
1003 
1004 // TODO: Now that is_parsable is gone, we should combine these two functions.
1005 // A variant of the above that uses the Printezis bits for
1006 // unparsable but allocated objects. This avoids any possible
1007 // stalls waiting for mutators to initialize objects, and is
1008 // thus potentially faster than the variant above. However,
1009 // this variant may return a zero size for a block that is
1010 // under mutation and for which a consistent size cannot be
1011 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1012 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1013                                                      const CMSCollector* c)
1014 const {
1015   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1016   // This must be volatile, or else there is a danger that the compiler
1017   // will compile the code below into a sometimes-infinite loop, by keeping
1018   // the value read the first time in a register.
1019   DEBUG_ONLY(uint loops = 0;)
1020   while (true) {
1021     // We must do this until we get a consistent view of the object.
1022     if (FreeChunk::indicatesFreeChunk(p)) {
1023       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1024       size_t res = fc->size();
1025 
1026       // Bugfix for systems with weak memory model (PPC64/IA64). The
1027       // free bit of the block was set and we have read the size of
1028       // the block. Acquire and check the free bit again. If the
1029       // block is still free, the read size is correct.
1030       OrderAccess::acquire();
1031 
1032       if (FreeChunk::indicatesFreeChunk(p)) {
1033         assert(res != 0, "Block size should not be 0");
1034         assert(loops == 0, "Should be 0");
1035         return res;
1036       }
1037     } else {
1038       // must read from what 'p' points to in each loop.
1039       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1040       // We trust the size of any object that has a non-NULL
1041       // klass and (for those in the perm gen) is parsable
1042       // -- irrespective of its conc_safe-ty.
1043       if (k != NULL) {
1044         assert(k->is_klass(), "Should really be klass oop.");
1045         oop o = (oop)p;
1046         assert(o->is_oop(), "Should be an oop");
1047 
1048         // Bugfix for systems with weak memory model (PPC64/IA64).
1049         // The object o may be an array. Acquire to make sure that the array
1050         // size (third word) is consistent.
1051         OrderAccess::acquire();
1052 
1053         size_t res = o->size_given_klass(k);
1054         res = adjustObjectSize(res);
1055         assert(res != 0, "Block size should not be 0");
1056         return res;
1057       } else {
1058         // May return 0 if P-bits not present.
1059         return c->block_size_if_printezis_bits(p);
1060       }
1061     }
1062     assert(loops == 0, "Can loop at most once");
1063     DEBUG_ONLY(loops++;)
1064   }
1065 }
1066 
1067 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1068   NOT_PRODUCT(verify_objects_initialized());
1069   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1070   FreeChunk* fc = (FreeChunk*)p;
1071   if (fc->is_free()) {
1072     return fc->size();
1073   } else {
1074     // Ignore mark word because this may be a recently promoted
1075     // object whose mark word is used to chain together grey
1076     // objects (the last one would have a null value).
1077     assert(oop(p)->is_oop(true), "Should be an oop");
1078     return adjustObjectSize(oop(p)->size());
1079   }
1080 }
1081 
1082 // This implementation assumes that the property of "being an object" is
1083 // stable.  But being a free chunk may not be (because of parallel
1084 // promotion.)
1085 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1086   FreeChunk* fc = (FreeChunk*)p;
1087   assert(is_in_reserved(p), "Should be in space");
1088   // When doing a mark-sweep-compact of the CMS generation, this
1089   // assertion may fail because prepare_for_compaction() uses
1090   // space that is garbage to maintain information on ranges of
1091   // live objects so that these live ranges can be moved as a whole.
1092   // Comment out this assertion until that problem can be solved
1093   // (i.e., that the block start calculation may look at objects
1094   // at address below "p" in finding the object that contains "p"
1095   // and those objects (if garbage) may have been modified to hold
1096   // live range information.
1097   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1098   //        "Should be a block boundary");
1099   if (FreeChunk::indicatesFreeChunk(p)) return false;
1100   Klass* k = oop(p)->klass_or_null();
1101   if (k != NULL) {
1102     // Ignore mark word because it may have been used to
1103     // chain together promoted objects (the last one
1104     // would have a null value).
1105     assert(oop(p)->is_oop(true), "Should be an oop");
1106     return true;
1107   } else {
1108     return false;  // Was not an object at the start of collection.
1109   }
1110 }
1111 
1112 // Check if the object is alive. This fact is checked either by consulting
1113 // the main marking bitmap in the sweeping phase or, if it's a permanent
1114 // generation and we're not in the sweeping phase, by checking the
1115 // perm_gen_verify_bit_map where we store the "deadness" information if
1116 // we did not sweep the perm gen in the most recent previous GC cycle.
1117 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1118   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1119          "Else races are possible");
1120   assert(block_is_obj(p), "The address should point to an object");
1121 
1122   // If we're sweeping, we use object liveness information from the main bit map
1123   // for both perm gen and old gen.
1124   // We don't need to lock the bitmap (live_map or dead_map below), because
1125   // EITHER we are in the middle of the sweeping phase, and the
1126   // main marking bit map (live_map below) is locked,
1127   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1128   // is stable, because it's mutated only in the sweeping phase.
1129   // NOTE: This method is also used by jmap where, if class unloading is
1130   // off, the results can return "false" for legitimate perm objects,
1131   // when we are not in the midst of a sweeping phase, which can result
1132   // in jmap not reporting certain perm gen objects. This will be moot
1133   // if/when the perm gen goes away in the future.
1134   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1135     CMSBitMap* live_map = _collector->markBitMap();
1136     return live_map->par_isMarked((HeapWord*) p);
1137   }
1138   return true;
1139 }
1140 
1141 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1142   FreeChunk* fc = (FreeChunk*)p;
1143   assert(is_in_reserved(p), "Should be in space");
1144   assert(_bt.block_start(p) == p, "Should be a block boundary");
1145   if (!fc->is_free()) {
1146     // Ignore mark word because it may have been used to
1147     // chain together promoted objects (the last one
1148     // would have a null value).
1149     assert(oop(p)->is_oop(true), "Should be an oop");
1150     return true;
1151   }
1152   return false;
1153 }
1154 
1155 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1156 // approximate answer if you don't hold the freelistlock when you call this.
1157 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1158   size_t size = 0;
1159   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1160     debug_only(
1161       // We may be calling here without the lock in which case we
1162       // won't do this modest sanity check.
1163       if (freelistLock()->owned_by_self()) {
1164         size_t total_list_size = 0;
1165         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1166           fc = fc->next()) {
1167           total_list_size += i;
1168         }
1169         assert(total_list_size == i * _indexedFreeList[i].count(),
1170                "Count in list is incorrect");
1171       }
1172     )
1173     size += i * _indexedFreeList[i].count();
1174   }
1175   return size;
1176 }
1177 
1178 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1179   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1180   return allocate(size);
1181 }
1182 
1183 HeapWord*
1184 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1185   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1186 }
1187 
1188 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1189   assert_lock_strong(freelistLock());
1190   HeapWord* res = NULL;
1191   assert(size == adjustObjectSize(size),
1192          "use adjustObjectSize() before calling into allocate()");
1193 
1194   if (_adaptive_freelists) {
1195     res = allocate_adaptive_freelists(size);
1196   } else {  // non-adaptive free lists
1197     res = allocate_non_adaptive_freelists(size);
1198   }
1199 
1200   if (res != NULL) {
1201     // check that res does lie in this space!
1202     assert(is_in_reserved(res), "Not in this space!");
1203     assert(is_aligned((void*)res), "alignment check");
1204 
1205     FreeChunk* fc = (FreeChunk*)res;
1206     fc->markNotFree();
1207     assert(!fc->is_free(), "shouldn't be marked free");
1208     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1209     // Verify that the block offset table shows this to
1210     // be a single block, but not one which is unallocated.
1211     _bt.verify_single_block(res, size);
1212     _bt.verify_not_unallocated(res, size);
1213     // mangle a just allocated object with a distinct pattern.
1214     debug_only(fc->mangleAllocated(size));
1215   }
1216 
1217   return res;
1218 }
1219 
1220 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1221   HeapWord* res = NULL;
1222   // try and use linear allocation for smaller blocks
1223   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1224     // if successful, the following also adjusts block offset table
1225     res = getChunkFromSmallLinearAllocBlock(size);
1226   }
1227   // Else triage to indexed lists for smaller sizes
1228   if (res == NULL) {
1229     if (size < SmallForDictionary) {
1230       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1231     } else {
1232       // else get it from the big dictionary; if even this doesn't
1233       // work we are out of luck.
1234       res = (HeapWord*)getChunkFromDictionaryExact(size);
1235     }
1236   }
1237 
1238   return res;
1239 }
1240 
1241 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1242   assert_lock_strong(freelistLock());
1243   HeapWord* res = NULL;
1244   assert(size == adjustObjectSize(size),
1245          "use adjustObjectSize() before calling into allocate()");
1246 
1247   // Strategy
1248   //   if small
1249   //     exact size from small object indexed list if small
1250   //     small or large linear allocation block (linAB) as appropriate
1251   //     take from lists of greater sized chunks
1252   //   else
1253   //     dictionary
1254   //     small or large linear allocation block if it has the space
1255   // Try allocating exact size from indexTable first
1256   if (size < IndexSetSize) {
1257     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1258     if(res != NULL) {
1259       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1260         "Not removed from free list");
1261       // no block offset table adjustment is necessary on blocks in
1262       // the indexed lists.
1263 
1264     // Try allocating from the small LinAB
1265     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1266         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1267         // if successful, the above also adjusts block offset table
1268         // Note that this call will refill the LinAB to
1269         // satisfy the request.  This is different that
1270         // evm.
1271         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1272     } else {
1273       // Raid the exact free lists larger than size, even if they are not
1274       // overpopulated.
1275       res = (HeapWord*) getChunkFromGreater(size);
1276     }
1277   } else {
1278     // Big objects get allocated directly from the dictionary.
1279     res = (HeapWord*) getChunkFromDictionaryExact(size);
1280     if (res == NULL) {
1281       // Try hard not to fail since an allocation failure will likely
1282       // trigger a synchronous GC.  Try to get the space from the
1283       // allocation blocks.
1284       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1285     }
1286   }
1287 
1288   return res;
1289 }
1290 
1291 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1292 // when promoting obj.
1293 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1294   // Depending on the object size, expansion may require refilling either a
1295   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1296   // is added because the dictionary may over-allocate to avoid fragmentation.
1297   size_t space = obj_size;
1298   if (!_adaptive_freelists) {
1299     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1300   }
1301   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1302   return space;
1303 }
1304 
1305 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1306   FreeChunk* ret;
1307 
1308   assert(numWords >= MinChunkSize, "Size is less than minimum");
1309   assert(linearAllocationWouldFail() || bestFitFirst(),
1310     "Should not be here");
1311 
1312   size_t i;
1313   size_t currSize = numWords + MinChunkSize;
1314   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1315   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1316     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1317     if (fl->head()) {
1318       ret = getFromListGreater(fl, numWords);
1319       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1320       return ret;
1321     }
1322   }
1323 
1324   currSize = MAX2((size_t)SmallForDictionary,
1325                   (size_t)(numWords + MinChunkSize));
1326 
1327   /* Try to get a chunk that satisfies request, while avoiding
1328      fragmentation that can't be handled. */
1329   {
1330     ret =  dictionary()->get_chunk(currSize);
1331     if (ret != NULL) {
1332       assert(ret->size() - numWords >= MinChunkSize,
1333              "Chunk is too small");
1334       _bt.allocated((HeapWord*)ret, ret->size());
1335       /* Carve returned chunk. */
1336       (void) splitChunkAndReturnRemainder(ret, numWords);
1337       /* Label this as no longer a free chunk. */
1338       assert(ret->is_free(), "This chunk should be free");
1339       ret->link_prev(NULL);
1340     }
1341     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1342     return ret;
1343   }
1344   ShouldNotReachHere();
1345 }
1346 
1347 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1348   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1349   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1350 }
1351 
1352 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1353   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1354          (_smallLinearAllocBlock._word_size == fc->size()),
1355          "Linear allocation block shows incorrect size");
1356   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1357           (_smallLinearAllocBlock._word_size == fc->size()));
1358 }
1359 
1360 // Check if the purported free chunk is present either as a linear
1361 // allocation block, the size-indexed table of (smaller) free blocks,
1362 // or the larger free blocks kept in the binary tree dictionary.
1363 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1364   if (verify_chunk_is_linear_alloc_block(fc)) {
1365     return true;
1366   } else if (fc->size() < IndexSetSize) {
1367     return verifyChunkInIndexedFreeLists(fc);
1368   } else {
1369     return dictionary()->verify_chunk_in_free_list(fc);
1370   }
1371 }
1372 
1373 #ifndef PRODUCT
1374 void CompactibleFreeListSpace::assert_locked() const {
1375   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1376 }
1377 
1378 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1379   CMSLockVerifier::assert_locked(lock);
1380 }
1381 #endif
1382 
1383 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1384   // In the parallel case, the main thread holds the free list lock
1385   // on behalf the parallel threads.
1386   FreeChunk* fc;
1387   {
1388     // If GC is parallel, this might be called by several threads.
1389     // This should be rare enough that the locking overhead won't affect
1390     // the sequential code.
1391     MutexLockerEx x(parDictionaryAllocLock(),
1392                     Mutex::_no_safepoint_check_flag);
1393     fc = getChunkFromDictionary(size);
1394   }
1395   if (fc != NULL) {
1396     fc->dontCoalesce();
1397     assert(fc->is_free(), "Should be free, but not coalescable");
1398     // Verify that the block offset table shows this to
1399     // be a single block, but not one which is unallocated.
1400     _bt.verify_single_block((HeapWord*)fc, fc->size());
1401     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1402   }
1403   return fc;
1404 }
1405 
1406 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1407   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1408   assert_locked();
1409 
1410   // if we are tracking promotions, then first ensure space for
1411   // promotion (including spooling space for saving header if necessary).
1412   // then allocate and copy, then track promoted info if needed.
1413   // When tracking (see PromotionInfo::track()), the mark word may
1414   // be displaced and in this case restoration of the mark word
1415   // occurs in the (oop_since_save_marks_)iterate phase.
1416   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1417     return NULL;
1418   }
1419   // Call the allocate(size_t, bool) form directly to avoid the
1420   // additional call through the allocate(size_t) form.  Having
1421   // the compile inline the call is problematic because allocate(size_t)
1422   // is a virtual method.
1423   HeapWord* res = allocate(adjustObjectSize(obj_size));
1424   if (res != NULL) {
1425     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1426     // if we should be tracking promotions, do so.
1427     if (_promoInfo.tracking()) {
1428         _promoInfo.track((PromotedObject*)res);
1429     }
1430   }
1431   return oop(res);
1432 }
1433 
1434 HeapWord*
1435 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1436   assert_locked();
1437   assert(size >= MinChunkSize, "minimum chunk size");
1438   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1439     "maximum from smallLinearAllocBlock");
1440   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1441 }
1442 
1443 HeapWord*
1444 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1445                                                        size_t size) {
1446   assert_locked();
1447   assert(size >= MinChunkSize, "too small");
1448   HeapWord* res = NULL;
1449   // Try to do linear allocation from blk, making sure that
1450   if (blk->_word_size == 0) {
1451     // We have probably been unable to fill this either in the prologue or
1452     // when it was exhausted at the last linear allocation. Bail out until
1453     // next time.
1454     assert(blk->_ptr == NULL, "consistency check");
1455     return NULL;
1456   }
1457   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1458   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1459   if (res != NULL) return res;
1460 
1461   // about to exhaust this linear allocation block
1462   if (blk->_word_size == size) { // exactly satisfied
1463     res = blk->_ptr;
1464     _bt.allocated(res, blk->_word_size);
1465   } else if (size + MinChunkSize <= blk->_refillSize) {
1466     size_t sz = blk->_word_size;
1467     // Update _unallocated_block if the size is such that chunk would be
1468     // returned to the indexed free list.  All other chunks in the indexed
1469     // free lists are allocated from the dictionary so that _unallocated_block
1470     // has already been adjusted for them.  Do it here so that the cost
1471     // for all chunks added back to the indexed free lists.
1472     if (sz < SmallForDictionary) {
1473       _bt.allocated(blk->_ptr, sz);
1474     }
1475     // Return the chunk that isn't big enough, and then refill below.
1476     addChunkToFreeLists(blk->_ptr, sz);
1477     split_birth(sz);
1478     // Don't keep statistics on adding back chunk from a LinAB.
1479   } else {
1480     // A refilled block would not satisfy the request.
1481     return NULL;
1482   }
1483 
1484   blk->_ptr = NULL; blk->_word_size = 0;
1485   refillLinearAllocBlock(blk);
1486   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1487          "block was replenished");
1488   if (res != NULL) {
1489     split_birth(size);
1490     repairLinearAllocBlock(blk);
1491   } else if (blk->_ptr != NULL) {
1492     res = blk->_ptr;
1493     size_t blk_size = blk->_word_size;
1494     blk->_word_size -= size;
1495     blk->_ptr  += size;
1496     split_birth(size);
1497     repairLinearAllocBlock(blk);
1498     // Update BOT last so that other (parallel) GC threads see a consistent
1499     // view of the BOT and free blocks.
1500     // Above must occur before BOT is updated below.
1501     OrderAccess::storestore();
1502     _bt.split_block(res, blk_size, size);  // adjust block offset table
1503   }
1504   return res;
1505 }
1506 
1507 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1508                                         LinearAllocBlock* blk,
1509                                         size_t size) {
1510   assert_locked();
1511   assert(size >= MinChunkSize, "too small");
1512 
1513   HeapWord* res = NULL;
1514   // This is the common case.  Keep it simple.
1515   if (blk->_word_size >= size + MinChunkSize) {
1516     assert(blk->_ptr != NULL, "consistency check");
1517     res = blk->_ptr;
1518     // Note that the BOT is up-to-date for the linAB before allocation.  It
1519     // indicates the start of the linAB.  The split_block() updates the
1520     // BOT for the linAB after the allocation (indicates the start of the
1521     // next chunk to be allocated).
1522     size_t blk_size = blk->_word_size;
1523     blk->_word_size -= size;
1524     blk->_ptr  += size;
1525     split_birth(size);
1526     repairLinearAllocBlock(blk);
1527     // Update BOT last so that other (parallel) GC threads see a consistent
1528     // view of the BOT and free blocks.
1529     // Above must occur before BOT is updated below.
1530     OrderAccess::storestore();
1531     _bt.split_block(res, blk_size, size);  // adjust block offset table
1532     _bt.allocated(res, size);
1533   }
1534   return res;
1535 }
1536 
1537 FreeChunk*
1538 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1539   assert_locked();
1540   assert(size < SmallForDictionary, "just checking");
1541   FreeChunk* res;
1542   res = _indexedFreeList[size].get_chunk_at_head();
1543   if (res == NULL) {
1544     res = getChunkFromIndexedFreeListHelper(size);
1545   }
1546   _bt.verify_not_unallocated((HeapWord*) res, size);
1547   assert(res == NULL || res->size() == size, "Incorrect block size");
1548   return res;
1549 }
1550 
1551 FreeChunk*
1552 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1553   bool replenish) {
1554   assert_locked();
1555   FreeChunk* fc = NULL;
1556   if (size < SmallForDictionary) {
1557     assert(_indexedFreeList[size].head() == NULL ||
1558       _indexedFreeList[size].surplus() <= 0,
1559       "List for this size should be empty or under populated");
1560     // Try best fit in exact lists before replenishing the list
1561     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1562       // Replenish list.
1563       //
1564       // Things tried that failed.
1565       //   Tried allocating out of the two LinAB's first before
1566       // replenishing lists.
1567       //   Tried small linAB of size 256 (size in indexed list)
1568       // and replenishing indexed lists from the small linAB.
1569       //
1570       FreeChunk* newFc = NULL;
1571       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1572       if (replenish_size < SmallForDictionary) {
1573         // Do not replenish from an underpopulated size.
1574         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1575             _indexedFreeList[replenish_size].head() != NULL) {
1576           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1577         } else if (bestFitFirst()) {
1578           newFc = bestFitSmall(replenish_size);
1579         }
1580       }
1581       if (newFc == NULL && replenish_size > size) {
1582         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1583         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1584       }
1585       // Note: The stats update re split-death of block obtained above
1586       // will be recorded below precisely when we know we are going to
1587       // be actually splitting it into more than one pieces below.
1588       if (newFc != NULL) {
1589         if  (replenish || CMSReplenishIntermediate) {
1590           // Replenish this list and return one block to caller.
1591           size_t i;
1592           FreeChunk *curFc, *nextFc;
1593           size_t num_blk = newFc->size() / size;
1594           assert(num_blk >= 1, "Smaller than requested?");
1595           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1596           if (num_blk > 1) {
1597             // we are sure we will be splitting the block just obtained
1598             // into multiple pieces; record the split-death of the original
1599             splitDeath(replenish_size);
1600           }
1601           // carve up and link blocks 0, ..., num_blk - 2
1602           // The last chunk is not added to the lists but is returned as the
1603           // free chunk.
1604           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1605                i = 0;
1606                i < (num_blk - 1);
1607                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1608                i++) {
1609             curFc->set_size(size);
1610             // Don't record this as a return in order to try and
1611             // determine the "returns" from a GC.
1612             _bt.verify_not_unallocated((HeapWord*) fc, size);
1613             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1614             _bt.mark_block((HeapWord*)curFc, size);
1615             split_birth(size);
1616             // Don't record the initial population of the indexed list
1617             // as a split birth.
1618           }
1619 
1620           // check that the arithmetic was OK above
1621           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1622             "inconsistency in carving newFc");
1623           curFc->set_size(size);
1624           _bt.mark_block((HeapWord*)curFc, size);
1625           split_birth(size);
1626           fc = curFc;
1627         } else {
1628           // Return entire block to caller
1629           fc = newFc;
1630         }
1631       }
1632     }
1633   } else {
1634     // Get a free chunk from the free chunk dictionary to be returned to
1635     // replenish the indexed free list.
1636     fc = getChunkFromDictionaryExact(size);
1637   }
1638   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1639   return fc;
1640 }
1641 
1642 FreeChunk*
1643 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1644   assert_locked();
1645   FreeChunk* fc = _dictionary->get_chunk(size,
1646                                          FreeBlockDictionary<FreeChunk>::atLeast);
1647   if (fc == NULL) {
1648     return NULL;
1649   }
1650   _bt.allocated((HeapWord*)fc, fc->size());
1651   if (fc->size() >= size + MinChunkSize) {
1652     fc = splitChunkAndReturnRemainder(fc, size);
1653   }
1654   assert(fc->size() >= size, "chunk too small");
1655   assert(fc->size() < size + MinChunkSize, "chunk too big");
1656   _bt.verify_single_block((HeapWord*)fc, fc->size());
1657   return fc;
1658 }
1659 
1660 FreeChunk*
1661 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1662   assert_locked();
1663   FreeChunk* fc = _dictionary->get_chunk(size,
1664                                          FreeBlockDictionary<FreeChunk>::atLeast);
1665   if (fc == NULL) {
1666     return fc;
1667   }
1668   _bt.allocated((HeapWord*)fc, fc->size());
1669   if (fc->size() == size) {
1670     _bt.verify_single_block((HeapWord*)fc, size);
1671     return fc;
1672   }
1673   assert(fc->size() > size, "get_chunk() guarantee");
1674   if (fc->size() < size + MinChunkSize) {
1675     // Return the chunk to the dictionary and go get a bigger one.
1676     returnChunkToDictionary(fc);
1677     fc = _dictionary->get_chunk(size + MinChunkSize,
1678                                 FreeBlockDictionary<FreeChunk>::atLeast);
1679     if (fc == NULL) {
1680       return NULL;
1681     }
1682     _bt.allocated((HeapWord*)fc, fc->size());
1683   }
1684   assert(fc->size() >= size + MinChunkSize, "tautology");
1685   fc = splitChunkAndReturnRemainder(fc, size);
1686   assert(fc->size() == size, "chunk is wrong size");
1687   _bt.verify_single_block((HeapWord*)fc, size);
1688   return fc;
1689 }
1690 
1691 void
1692 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1693   assert_locked();
1694 
1695   size_t size = chunk->size();
1696   _bt.verify_single_block((HeapWord*)chunk, size);
1697   // adjust _unallocated_block downward, as necessary
1698   _bt.freed((HeapWord*)chunk, size);
1699   _dictionary->return_chunk(chunk);
1700 #ifndef PRODUCT
1701   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1702     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1703     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1704     tl->verify_stats();
1705   }
1706 #endif // PRODUCT
1707 }
1708 
1709 void
1710 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1711   assert_locked();
1712   size_t size = fc->size();
1713   _bt.verify_single_block((HeapWord*) fc, size);
1714   _bt.verify_not_unallocated((HeapWord*) fc, size);
1715   if (_adaptive_freelists) {
1716     _indexedFreeList[size].return_chunk_at_tail(fc);
1717   } else {
1718     _indexedFreeList[size].return_chunk_at_head(fc);
1719   }
1720 #ifndef PRODUCT
1721   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1722      _indexedFreeList[size].verify_stats();
1723   }
1724 #endif // PRODUCT
1725 }
1726 
1727 // Add chunk to end of last block -- if it's the largest
1728 // block -- and update BOT and census data. We would
1729 // of course have preferred to coalesce it with the
1730 // last block, but it's currently less expensive to find the
1731 // largest block than it is to find the last.
1732 void
1733 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1734   HeapWord* chunk, size_t     size) {
1735   // check that the chunk does lie in this space!
1736   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1737   // One of the parallel gc task threads may be here
1738   // whilst others are allocating.
1739   Mutex* lock = &_parDictionaryAllocLock;
1740   FreeChunk* ec;
1741   {
1742     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1743     ec = dictionary()->find_largest_dict();  // get largest block
1744     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1745       // It's a coterminal block - we can coalesce.
1746       size_t old_size = ec->size();
1747       coalDeath(old_size);
1748       removeChunkFromDictionary(ec);
1749       size += old_size;
1750     } else {
1751       ec = (FreeChunk*)chunk;
1752     }
1753   }
1754   ec->set_size(size);
1755   debug_only(ec->mangleFreed(size));
1756   if (size < SmallForDictionary) {
1757     lock = _indexedFreeListParLocks[size];
1758   }
1759   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1760   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1761   // record the birth under the lock since the recording involves
1762   // manipulation of the list on which the chunk lives and
1763   // if the chunk is allocated and is the last on the list,
1764   // the list can go away.
1765   coalBirth(size);
1766 }
1767 
1768 void
1769 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1770                                               size_t     size) {
1771   // check that the chunk does lie in this space!
1772   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1773   assert_locked();
1774   _bt.verify_single_block(chunk, size);
1775 
1776   FreeChunk* fc = (FreeChunk*) chunk;
1777   fc->set_size(size);
1778   debug_only(fc->mangleFreed(size));
1779   if (size < SmallForDictionary) {
1780     returnChunkToFreeList(fc);
1781   } else {
1782     returnChunkToDictionary(fc);
1783   }
1784 }
1785 
1786 void
1787 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1788   size_t size, bool coalesced) {
1789   assert_locked();
1790   assert(chunk != NULL, "null chunk");
1791   if (coalesced) {
1792     // repair BOT
1793     _bt.single_block(chunk, size);
1794   }
1795   addChunkToFreeLists(chunk, size);
1796 }
1797 
1798 // We _must_ find the purported chunk on our free lists;
1799 // we assert if we don't.
1800 void
1801 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1802   size_t size = fc->size();
1803   assert_locked();
1804   debug_only(verifyFreeLists());
1805   if (size < SmallForDictionary) {
1806     removeChunkFromIndexedFreeList(fc);
1807   } else {
1808     removeChunkFromDictionary(fc);
1809   }
1810   _bt.verify_single_block((HeapWord*)fc, size);
1811   debug_only(verifyFreeLists());
1812 }
1813 
1814 void
1815 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1816   size_t size = fc->size();
1817   assert_locked();
1818   assert(fc != NULL, "null chunk");
1819   _bt.verify_single_block((HeapWord*)fc, size);
1820   _dictionary->remove_chunk(fc);
1821   // adjust _unallocated_block upward, as necessary
1822   _bt.allocated((HeapWord*)fc, size);
1823 }
1824 
1825 void
1826 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1827   assert_locked();
1828   size_t size = fc->size();
1829   _bt.verify_single_block((HeapWord*)fc, size);
1830   NOT_PRODUCT(
1831     if (FLSVerifyIndexTable) {
1832       verifyIndexedFreeList(size);
1833     }
1834   )
1835   _indexedFreeList[size].remove_chunk(fc);
1836   NOT_PRODUCT(
1837     if (FLSVerifyIndexTable) {
1838       verifyIndexedFreeList(size);
1839     }
1840   )
1841 }
1842 
1843 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1844   /* A hint is the next larger size that has a surplus.
1845      Start search at a size large enough to guarantee that
1846      the excess is >= MIN_CHUNK. */
1847   size_t start = align_object_size(numWords + MinChunkSize);
1848   if (start < IndexSetSize) {
1849     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1850     size_t    hint = _indexedFreeList[start].hint();
1851     while (hint < IndexSetSize) {
1852       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1853       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1854       if (fl->surplus() > 0 && fl->head() != NULL) {
1855         // Found a list with surplus, reset original hint
1856         // and split out a free chunk which is returned.
1857         _indexedFreeList[start].set_hint(hint);
1858         FreeChunk* res = getFromListGreater(fl, numWords);
1859         assert(res == NULL || res->is_free(),
1860           "Should be returning a free chunk");
1861         return res;
1862       }
1863       hint = fl->hint(); /* keep looking */
1864     }
1865     /* None found. */
1866     it[start].set_hint(IndexSetSize);
1867   }
1868   return NULL;
1869 }
1870 
1871 /* Requires fl->size >= numWords + MinChunkSize */
1872 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1873   size_t numWords) {
1874   FreeChunk *curr = fl->head();
1875   size_t oldNumWords = curr->size();
1876   assert(numWords >= MinChunkSize, "Word size is too small");
1877   assert(curr != NULL, "List is empty");
1878   assert(oldNumWords >= numWords + MinChunkSize,
1879         "Size of chunks in the list is too small");
1880 
1881   fl->remove_chunk(curr);
1882   // recorded indirectly by splitChunkAndReturnRemainder -
1883   // smallSplit(oldNumWords, numWords);
1884   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1885   // Does anything have to be done for the remainder in terms of
1886   // fixing the card table?
1887   assert(new_chunk == NULL || new_chunk->is_free(),
1888     "Should be returning a free chunk");
1889   return new_chunk;
1890 }
1891 
1892 FreeChunk*
1893 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1894   size_t new_size) {
1895   assert_locked();
1896   size_t size = chunk->size();
1897   assert(size > new_size, "Split from a smaller block?");
1898   assert(is_aligned(chunk), "alignment problem");
1899   assert(size == adjustObjectSize(size), "alignment problem");
1900   size_t rem_sz = size - new_size;
1901   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1902   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1903   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1904   assert(is_aligned(ffc), "alignment problem");
1905   ffc->set_size(rem_sz);
1906   ffc->link_next(NULL);
1907   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1908   // Above must occur before BOT is updated below.
1909   // adjust block offset table
1910   OrderAccess::storestore();
1911   assert(chunk->is_free() && ffc->is_free(), "Error");
1912   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1913   if (rem_sz < SmallForDictionary) {
1914     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1915     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1916     assert(!is_par ||
1917            (SharedHeap::heap()->n_par_threads() ==
1918             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1919     returnChunkToFreeList(ffc);
1920     split(size, rem_sz);
1921     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1922   } else {
1923     returnChunkToDictionary(ffc);
1924     split(size, rem_sz);
1925   }
1926   chunk->set_size(new_size);
1927   return chunk;
1928 }
1929 
1930 void
1931 CompactibleFreeListSpace::sweep_completed() {
1932   // Now that space is probably plentiful, refill linear
1933   // allocation blocks as needed.
1934   refillLinearAllocBlocksIfNeeded();
1935 }
1936 
1937 void
1938 CompactibleFreeListSpace::gc_prologue() {
1939   assert_locked();
1940   if (PrintFLSStatistics != 0) {
1941     gclog_or_tty->print("Before GC:\n");
1942     reportFreeListStatistics();
1943   }
1944   refillLinearAllocBlocksIfNeeded();
1945 }
1946 
1947 void
1948 CompactibleFreeListSpace::gc_epilogue() {
1949   assert_locked();
1950   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1951     if (_smallLinearAllocBlock._word_size == 0)
1952       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1953   }
1954   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1955   _promoInfo.stopTrackingPromotions();
1956   repairLinearAllocationBlocks();
1957   // Print Space's stats
1958   if (PrintFLSStatistics != 0) {
1959     gclog_or_tty->print("After GC:\n");
1960     reportFreeListStatistics();
1961   }
1962 }
1963 
1964 // Iteration support, mostly delegated from a CMS generation
1965 
1966 void CompactibleFreeListSpace::save_marks() {
1967   assert(Thread::current()->is_VM_thread(),
1968          "Global variable should only be set when single-threaded");
1969   // Mark the "end" of the used space at the time of this call;
1970   // note, however, that promoted objects from this point
1971   // on are tracked in the _promoInfo below.
1972   set_saved_mark_word(unallocated_block());
1973 #ifdef ASSERT
1974   // Check the sanity of save_marks() etc.
1975   MemRegion ur    = used_region();
1976   MemRegion urasm = used_region_at_save_marks();
1977   assert(ur.contains(urasm),
1978          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1979                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1980                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1981 #endif
1982   // inform allocator that promotions should be tracked.
1983   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1984   _promoInfo.startTrackingPromotions();
1985 }
1986 
1987 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1988   assert(_promoInfo.tracking(), "No preceding save_marks?");
1989   assert(SharedHeap::heap()->n_par_threads() == 0,
1990          "Shouldn't be called if using parallel gc.");
1991   return _promoInfo.noPromotions();
1992 }
1993 
1994 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1995                                                                             \
1996 void CompactibleFreeListSpace::                                             \
1997 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1998   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1999          "Shouldn't be called (yet) during parallel part of gc.");          \
2000   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2001   /*                                                                        \
2002    * This also restores any displaced headers and removes the elements from \
2003    * the iteration set as they are processed, so that we have a clean slate \
2004    * at the end of the iteration. Note, thus, that if new objects are       \
2005    * promoted as a result of the iteration they are iterated over as well.  \
2006    */                                                                       \
2007   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2008 }
2009 
2010 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2011 
2012 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2013   return _smallLinearAllocBlock._word_size == 0;
2014 }
2015 
2016 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2017   // Fix up linear allocation blocks to look like free blocks
2018   repairLinearAllocBlock(&_smallLinearAllocBlock);
2019 }
2020 
2021 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2022   assert_locked();
2023   if (blk->_ptr != NULL) {
2024     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2025            "Minimum block size requirement");
2026     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2027     fc->set_size(blk->_word_size);
2028     fc->link_prev(NULL);   // mark as free
2029     fc->dontCoalesce();
2030     assert(fc->is_free(), "just marked it free");
2031     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2032   }
2033 }
2034 
2035 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2036   assert_locked();
2037   if (_smallLinearAllocBlock._ptr == NULL) {
2038     assert(_smallLinearAllocBlock._word_size == 0,
2039       "Size of linAB should be zero if the ptr is NULL");
2040     // Reset the linAB refill and allocation size limit.
2041     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2042   }
2043   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2044 }
2045 
2046 void
2047 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2048   assert_locked();
2049   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2050          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2051          "blk invariant");
2052   if (blk->_ptr == NULL) {
2053     refillLinearAllocBlock(blk);
2054   }
2055   if (PrintMiscellaneous && Verbose) {
2056     if (blk->_word_size == 0) {
2057       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2058     }
2059   }
2060 }
2061 
2062 void
2063 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2064   assert_locked();
2065   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2066          "linear allocation block should be empty");
2067   FreeChunk* fc;
2068   if (blk->_refillSize < SmallForDictionary &&
2069       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2070     // A linAB's strategy might be to use small sizes to reduce
2071     // fragmentation but still get the benefits of allocation from a
2072     // linAB.
2073   } else {
2074     fc = getChunkFromDictionary(blk->_refillSize);
2075   }
2076   if (fc != NULL) {
2077     blk->_ptr  = (HeapWord*)fc;
2078     blk->_word_size = fc->size();
2079     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2080   }
2081 }
2082 
2083 // Support for concurrent collection policy decisions.
2084 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2085   // In the future we might want to add in fragmentation stats --
2086   // including erosion of the "mountain" into this decision as well.
2087   return !adaptive_freelists() && linearAllocationWouldFail();
2088 }
2089 
2090 // Support for compaction
2091 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2092   scan_and_forward(this, cp);
2093   // Prepare_for_compaction() uses the space between live objects
2094   // so that later phase can skip dead space quickly.  So verification
2095   // of the free lists doesn't work after.
2096 }
2097 
2098 void CompactibleFreeListSpace::adjust_pointers() {
2099   // In other versions of adjust_pointers(), a bail out
2100   // based on the amount of live data in the generation
2101   // (i.e., if 0, bail out) may be used.
2102   // Cannot test used() == 0 here because the free lists have already
2103   // been mangled by the compaction.
2104 
2105   scan_and_adjust_pointers(this);
2106   // See note about verification in prepare_for_compaction().
2107 }
2108 
2109 void CompactibleFreeListSpace::compact() {
2110   scan_and_compact(this);
2111 }
2112 
2113 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2114 // where fbs is free block sizes
2115 double CompactibleFreeListSpace::flsFrag() const {
2116   size_t itabFree = totalSizeInIndexedFreeLists();
2117   double frag = 0.0;
2118   size_t i;
2119 
2120   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2121     double sz  = i;
2122     frag      += _indexedFreeList[i].count() * (sz * sz);
2123   }
2124 
2125   double totFree = itabFree +
2126                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2127   if (totFree > 0) {
2128     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2129             (totFree * totFree));
2130     frag = (double)1.0  - frag;
2131   } else {
2132     assert(frag == 0.0, "Follows from totFree == 0");
2133   }
2134   return frag;
2135 }
2136 
2137 void CompactibleFreeListSpace::beginSweepFLCensus(
2138   float inter_sweep_current,
2139   float inter_sweep_estimate,
2140   float intra_sweep_estimate) {
2141   assert_locked();
2142   size_t i;
2143   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2144     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2145     if (PrintFLSStatistics > 1) {
2146       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2147     }
2148     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2149     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2150     fl->set_before_sweep(fl->count());
2151     fl->set_bfr_surp(fl->surplus());
2152   }
2153   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2154                                     inter_sweep_current,
2155                                     inter_sweep_estimate,
2156                                     intra_sweep_estimate);
2157 }
2158 
2159 void CompactibleFreeListSpace::setFLSurplus() {
2160   assert_locked();
2161   size_t i;
2162   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2163     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2164     fl->set_surplus(fl->count() -
2165                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2166   }
2167 }
2168 
2169 void CompactibleFreeListSpace::setFLHints() {
2170   assert_locked();
2171   size_t i;
2172   size_t h = IndexSetSize;
2173   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2174     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2175     fl->set_hint(h);
2176     if (fl->surplus() > 0) {
2177       h = i;
2178     }
2179   }
2180 }
2181 
2182 void CompactibleFreeListSpace::clearFLCensus() {
2183   assert_locked();
2184   size_t i;
2185   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2186     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2187     fl->set_prev_sweep(fl->count());
2188     fl->set_coal_births(0);
2189     fl->set_coal_deaths(0);
2190     fl->set_split_births(0);
2191     fl->set_split_deaths(0);
2192   }
2193 }
2194 
2195 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2196   if (PrintFLSStatistics > 0) {
2197     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2198     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2199                            p2i(largestAddr));
2200   }
2201   setFLSurplus();
2202   setFLHints();
2203   if (PrintGC && PrintFLSCensus > 0) {
2204     printFLCensus(sweep_count);
2205   }
2206   clearFLCensus();
2207   assert_locked();
2208   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2209 }
2210 
2211 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2212   if (size < SmallForDictionary) {
2213     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2214     return (fl->coal_desired() < 0) ||
2215            ((int)fl->count() > fl->coal_desired());
2216   } else {
2217     return dictionary()->coal_dict_over_populated(size);
2218   }
2219 }
2220 
2221 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2222   assert(size < SmallForDictionary, "Size too large for indexed list");
2223   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2224   fl->increment_coal_births();
2225   fl->increment_surplus();
2226 }
2227 
2228 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2229   assert(size < SmallForDictionary, "Size too large for indexed list");
2230   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2231   fl->increment_coal_deaths();
2232   fl->decrement_surplus();
2233 }
2234 
2235 void CompactibleFreeListSpace::coalBirth(size_t size) {
2236   if (size  < SmallForDictionary) {
2237     smallCoalBirth(size);
2238   } else {
2239     dictionary()->dict_census_update(size,
2240                                    false /* split */,
2241                                    true /* birth */);
2242   }
2243 }
2244 
2245 void CompactibleFreeListSpace::coalDeath(size_t size) {
2246   if(size  < SmallForDictionary) {
2247     smallCoalDeath(size);
2248   } else {
2249     dictionary()->dict_census_update(size,
2250                                    false /* split */,
2251                                    false /* birth */);
2252   }
2253 }
2254 
2255 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2256   assert(size < SmallForDictionary, "Size too large for indexed list");
2257   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2258   fl->increment_split_births();
2259   fl->increment_surplus();
2260 }
2261 
2262 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2263   assert(size < SmallForDictionary, "Size too large for indexed list");
2264   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2265   fl->increment_split_deaths();
2266   fl->decrement_surplus();
2267 }
2268 
2269 void CompactibleFreeListSpace::split_birth(size_t size) {
2270   if (size  < SmallForDictionary) {
2271     smallSplitBirth(size);
2272   } else {
2273     dictionary()->dict_census_update(size,
2274                                    true /* split */,
2275                                    true /* birth */);
2276   }
2277 }
2278 
2279 void CompactibleFreeListSpace::splitDeath(size_t size) {
2280   if (size  < SmallForDictionary) {
2281     smallSplitDeath(size);
2282   } else {
2283     dictionary()->dict_census_update(size,
2284                                    true /* split */,
2285                                    false /* birth */);
2286   }
2287 }
2288 
2289 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2290   size_t to2 = from - to1;
2291   splitDeath(from);
2292   split_birth(to1);
2293   split_birth(to2);
2294 }
2295 
2296 void CompactibleFreeListSpace::print() const {
2297   print_on(tty);
2298 }
2299 
2300 void CompactibleFreeListSpace::prepare_for_verify() {
2301   assert_locked();
2302   repairLinearAllocationBlocks();
2303   // Verify that the SpoolBlocks look like free blocks of
2304   // appropriate sizes... To be done ...
2305 }
2306 
2307 class VerifyAllBlksClosure: public BlkClosure {
2308  private:
2309   const CompactibleFreeListSpace* _sp;
2310   const MemRegion                 _span;
2311   HeapWord*                       _last_addr;
2312   size_t                          _last_size;
2313   bool                            _last_was_obj;
2314   bool                            _last_was_live;
2315 
2316  public:
2317   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2318     MemRegion span) :  _sp(sp), _span(span),
2319                        _last_addr(NULL), _last_size(0),
2320                        _last_was_obj(false), _last_was_live(false) { }
2321 
2322   virtual size_t do_blk(HeapWord* addr) {
2323     size_t res;
2324     bool   was_obj  = false;
2325     bool   was_live = false;
2326     if (_sp->block_is_obj(addr)) {
2327       was_obj = true;
2328       oop p = oop(addr);
2329       guarantee(p->is_oop(), "Should be an oop");
2330       res = _sp->adjustObjectSize(p->size());
2331       if (_sp->obj_is_alive(addr)) {
2332         was_live = true;
2333         p->verify();
2334       }
2335     } else {
2336       FreeChunk* fc = (FreeChunk*)addr;
2337       res = fc->size();
2338       if (FLSVerifyLists && !fc->cantCoalesce()) {
2339         guarantee(_sp->verify_chunk_in_free_list(fc),
2340                   "Chunk should be on a free list");
2341       }
2342     }
2343     if (res == 0) {
2344       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2345       gclog_or_tty->print_cr(
2346         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2347         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2348         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2349         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2350       _sp->print_on(gclog_or_tty);
2351       guarantee(false, "Seppuku!");
2352     }
2353     _last_addr = addr;
2354     _last_size = res;
2355     _last_was_obj  = was_obj;
2356     _last_was_live = was_live;
2357     return res;
2358   }
2359 };
2360 
2361 class VerifyAllOopsClosure: public OopClosure {
2362  private:
2363   const CMSCollector*             _collector;
2364   const CompactibleFreeListSpace* _sp;
2365   const MemRegion                 _span;
2366   const bool                      _past_remark;
2367   const CMSBitMap*                _bit_map;
2368 
2369  protected:
2370   void do_oop(void* p, oop obj) {
2371     if (_span.contains(obj)) { // the interior oop points into CMS heap
2372       if (!_span.contains(p)) { // reference from outside CMS heap
2373         // Should be a valid object; the first disjunct below allows
2374         // us to sidestep an assertion in block_is_obj() that insists
2375         // that p be in _sp. Note that several generations (and spaces)
2376         // are spanned by _span (CMS heap) above.
2377         guarantee(!_sp->is_in_reserved(obj) ||
2378                   _sp->block_is_obj((HeapWord*)obj),
2379                   "Should be an object");
2380         guarantee(obj->is_oop(), "Should be an oop");
2381         obj->verify();
2382         if (_past_remark) {
2383           // Remark has been completed, the object should be marked
2384           _bit_map->isMarked((HeapWord*)obj);
2385         }
2386       } else { // reference within CMS heap
2387         if (_past_remark) {
2388           // Remark has been completed -- so the referent should have
2389           // been marked, if referring object is.
2390           if (_bit_map->isMarked(_collector->block_start(p))) {
2391             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2392           }
2393         }
2394       }
2395     } else if (_sp->is_in_reserved(p)) {
2396       // the reference is from FLS, and points out of FLS
2397       guarantee(obj->is_oop(), "Should be an oop");
2398       obj->verify();
2399     }
2400   }
2401 
2402   template <class T> void do_oop_work(T* p) {
2403     T heap_oop = oopDesc::load_heap_oop(p);
2404     if (!oopDesc::is_null(heap_oop)) {
2405       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2406       do_oop(p, obj);
2407     }
2408   }
2409 
2410  public:
2411   VerifyAllOopsClosure(const CMSCollector* collector,
2412     const CompactibleFreeListSpace* sp, MemRegion span,
2413     bool past_remark, CMSBitMap* bit_map) :
2414     _collector(collector), _sp(sp), _span(span),
2415     _past_remark(past_remark), _bit_map(bit_map) { }
2416 
2417   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2418   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2419 };
2420 
2421 void CompactibleFreeListSpace::verify() const {
2422   assert_lock_strong(&_freelistLock);
2423   verify_objects_initialized();
2424   MemRegion span = _collector->_span;
2425   bool past_remark = (_collector->abstract_state() ==
2426                       CMSCollector::Sweeping);
2427 
2428   ResourceMark rm;
2429   HandleMark  hm;
2430 
2431   // Check integrity of CFL data structures
2432   _promoInfo.verify();
2433   _dictionary->verify();
2434   if (FLSVerifyIndexTable) {
2435     verifyIndexedFreeLists();
2436   }
2437   // Check integrity of all objects and free blocks in space
2438   {
2439     VerifyAllBlksClosure cl(this, span);
2440     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2441   }
2442   // Check that all references in the heap to FLS
2443   // are to valid objects in FLS or that references in
2444   // FLS are to valid objects elsewhere in the heap
2445   if (FLSVerifyAllHeapReferences)
2446   {
2447     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2448       _collector->markBitMap());
2449     CollectedHeap* ch = Universe::heap();
2450 
2451     // Iterate over all oops in the heap. Uses the _no_header version
2452     // since we are not interested in following the klass pointers.
2453     ch->oop_iterate_no_header(&cl);
2454   }
2455 
2456   if (VerifyObjectStartArray) {
2457     // Verify the block offset table
2458     _bt.verify();
2459   }
2460 }
2461 
2462 #ifndef PRODUCT
2463 void CompactibleFreeListSpace::verifyFreeLists() const {
2464   if (FLSVerifyLists) {
2465     _dictionary->verify();
2466     verifyIndexedFreeLists();
2467   } else {
2468     if (FLSVerifyDictionary) {
2469       _dictionary->verify();
2470     }
2471     if (FLSVerifyIndexTable) {
2472       verifyIndexedFreeLists();
2473     }
2474   }
2475 }
2476 #endif
2477 
2478 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2479   size_t i = 0;
2480   for (; i < IndexSetStart; i++) {
2481     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2482   }
2483   for (; i < IndexSetSize; i++) {
2484     verifyIndexedFreeList(i);
2485   }
2486 }
2487 
2488 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2489   FreeChunk* fc   =  _indexedFreeList[size].head();
2490   FreeChunk* tail =  _indexedFreeList[size].tail();
2491   size_t    num = _indexedFreeList[size].count();
2492   size_t      n = 0;
2493   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2494             "Slot should have been empty");
2495   for (; fc != NULL; fc = fc->next(), n++) {
2496     guarantee(fc->size() == size, "Size inconsistency");
2497     guarantee(fc->is_free(), "!free?");
2498     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2499     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2500   }
2501   guarantee(n == num, "Incorrect count");
2502 }
2503 
2504 #ifndef PRODUCT
2505 void CompactibleFreeListSpace::check_free_list_consistency() const {
2506   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2507     "Some sizes can't be allocated without recourse to"
2508     " linear allocation buffers");
2509   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2510     "else MIN_TREE_CHUNK_SIZE is wrong");
2511   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2512   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2513 }
2514 #endif
2515 
2516 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2517   assert_lock_strong(&_freelistLock);
2518   AdaptiveFreeList<FreeChunk> total;
2519   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2520   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2521   size_t total_free = 0;
2522   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2523     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2524     total_free += fl->count() * fl->size();
2525     if (i % (40*IndexSetStride) == 0) {
2526       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2527     }
2528     fl->print_on(gclog_or_tty);
2529     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2530     total.set_surplus(    total.surplus()     + fl->surplus()    );
2531     total.set_desired(    total.desired()     + fl->desired()    );
2532     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2533     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2534     total.set_count(      total.count()       + fl->count()      );
2535     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2536     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2537     total.set_split_births(total.split_births() + fl->split_births());
2538     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2539   }
2540   total.print_on(gclog_or_tty, "TOTAL");
2541   gclog_or_tty->print_cr("Total free in indexed lists "
2542                          SIZE_FORMAT " words", total_free);
2543   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2544     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2545             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2546     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2547   _dictionary->print_dict_census();
2548 }
2549 
2550 ///////////////////////////////////////////////////////////////////////////
2551 // CFLS_LAB
2552 ///////////////////////////////////////////////////////////////////////////
2553 
2554 #define VECTOR_257(x)                                                                                  \
2555   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2556   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2559      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2560      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2561      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2562      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2563      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2564      x }
2565 
2566 // Initialize with default setting for CMS, _not_
2567 // generic OldPLABSize, whose static default is different; if overridden at the
2568 // command-line, this will get reinitialized via a call to
2569 // modify_initialization() below.
2570 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2571   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2572 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2573 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2574 
2575 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2576   _cfls(cfls)
2577 {
2578   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2579   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2580        i < CompactibleFreeListSpace::IndexSetSize;
2581        i += CompactibleFreeListSpace::IndexSetStride) {
2582     _indexedFreeList[i].set_size(i);
2583     _num_blocks[i] = 0;
2584   }
2585 }
2586 
2587 static bool _CFLS_LAB_modified = false;
2588 
2589 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2590   assert(!_CFLS_LAB_modified, "Call only once");
2591   _CFLS_LAB_modified = true;
2592   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2593        i < CompactibleFreeListSpace::IndexSetSize;
2594        i += CompactibleFreeListSpace::IndexSetStride) {
2595     _blocks_to_claim[i].modify(n, wt, true /* force */);
2596   }
2597 }
2598 
2599 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2600   FreeChunk* res;
2601   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2602   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2603     // This locking manages sync with other large object allocations.
2604     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2605                     Mutex::_no_safepoint_check_flag);
2606     res = _cfls->getChunkFromDictionaryExact(word_sz);
2607     if (res == NULL) return NULL;
2608   } else {
2609     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2610     if (fl->count() == 0) {
2611       // Attempt to refill this local free list.
2612       get_from_global_pool(word_sz, fl);
2613       // If it didn't work, give up.
2614       if (fl->count() == 0) return NULL;
2615     }
2616     res = fl->get_chunk_at_head();
2617     assert(res != NULL, "Why was count non-zero?");
2618   }
2619   res->markNotFree();
2620   assert(!res->is_free(), "shouldn't be marked free");
2621   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2622   // mangle a just allocated object with a distinct pattern.
2623   debug_only(res->mangleAllocated(word_sz));
2624   return (HeapWord*)res;
2625 }
2626 
2627 // Get a chunk of blocks of the right size and update related
2628 // book-keeping stats
2629 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2630   // Get the #blocks we want to claim
2631   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2632   assert(n_blks > 0, "Error");
2633   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2634   // In some cases, when the application has a phase change,
2635   // there may be a sudden and sharp shift in the object survival
2636   // profile, and updating the counts at the end of a scavenge
2637   // may not be quick enough, giving rise to large scavenge pauses
2638   // during these phase changes. It is beneficial to detect such
2639   // changes on-the-fly during a scavenge and avoid such a phase-change
2640   // pothole. The following code is a heuristic attempt to do that.
2641   // It is protected by a product flag until we have gained
2642   // enough experience with this heuristic and fine-tuned its behavior.
2643   // WARNING: This might increase fragmentation if we overreact to
2644   // small spikes, so some kind of historical smoothing based on
2645   // previous experience with the greater reactivity might be useful.
2646   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2647   // default.
2648   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2649     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2650     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2651     n_blks = MIN2(n_blks, CMSOldPLABMax);
2652   }
2653   assert(n_blks > 0, "Error");
2654   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2655   // Update stats table entry for this block size
2656   _num_blocks[word_sz] += fl->count();
2657 }
2658 
2659 void CFLS_LAB::compute_desired_plab_size() {
2660   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2661        i < CompactibleFreeListSpace::IndexSetSize;
2662        i += CompactibleFreeListSpace::IndexSetStride) {
2663     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2664            "Counter inconsistency");
2665     if (_global_num_workers[i] > 0) {
2666       // Need to smooth wrt historical average
2667       if (ResizeOldPLAB) {
2668         _blocks_to_claim[i].sample(
2669           MAX2((size_t)CMSOldPLABMin,
2670           MIN2((size_t)CMSOldPLABMax,
2671                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2672       }
2673       // Reset counters for next round
2674       _global_num_workers[i] = 0;
2675       _global_num_blocks[i] = 0;
2676       if (PrintOldPLAB) {
2677         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2678                                i, (size_t)_blocks_to_claim[i].average());
2679       }
2680     }
2681   }
2682 }
2683 
2684 // If this is changed in the future to allow parallel
2685 // access, one would need to take the FL locks and,
2686 // depending on how it is used, stagger access from
2687 // parallel threads to reduce contention.
2688 void CFLS_LAB::retire(int tid) {
2689   // We run this single threaded with the world stopped;
2690   // so no need for locks and such.
2691   NOT_PRODUCT(Thread* t = Thread::current();)
2692   assert(Thread::current()->is_VM_thread(), "Error");
2693   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2694        i < CompactibleFreeListSpace::IndexSetSize;
2695        i += CompactibleFreeListSpace::IndexSetStride) {
2696     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2697            "Can't retire more than what we obtained");
2698     if (_num_blocks[i] > 0) {
2699       size_t num_retire =  _indexedFreeList[i].count();
2700       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2701       {
2702         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2703         //                Mutex::_no_safepoint_check_flag);
2704 
2705         // Update globals stats for num_blocks used
2706         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2707         _global_num_workers[i]++;
2708         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2709         if (num_retire > 0) {
2710           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2711           // Reset this list.
2712           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2713           _indexedFreeList[i].set_size(i);
2714         }
2715       }
2716       if (PrintOldPLAB) {
2717         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2718                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2719       }
2720       // Reset stats for next round
2721       _num_blocks[i]         = 0;
2722     }
2723   }
2724 }
2725 
2726 // Used by par_get_chunk_of_blocks() for the chunks from the
2727 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2728 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2729 // to the free list "fl".  "n" is the maximum number of chunks to
2730 // be added to "fl".
2731 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2732 
2733   // We'll try all multiples of word_sz in the indexed set, starting with
2734   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2735   // then try getting a big chunk and splitting it.
2736   {
2737     bool found;
2738     int  k;
2739     size_t cur_sz;
2740     for (k = 1, cur_sz = k * word_sz, found = false;
2741          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2742          (CMSSplitIndexedFreeListBlocks || k <= 1);
2743          k++, cur_sz = k * word_sz) {
2744       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2745       fl_for_cur_sz.set_size(cur_sz);
2746       {
2747         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2748                         Mutex::_no_safepoint_check_flag);
2749         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2750         if (gfl->count() != 0) {
2751           // nn is the number of chunks of size cur_sz that
2752           // we'd need to split k-ways each, in order to create
2753           // "n" chunks of size word_sz each.
2754           const size_t nn = MAX2(n/k, (size_t)1);
2755           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2756           found = true;
2757           if (k > 1) {
2758             // Update split death stats for the cur_sz-size blocks list:
2759             // we increment the split death count by the number of blocks
2760             // we just took from the cur_sz-size blocks list and which
2761             // we will be splitting below.
2762             ssize_t deaths = gfl->split_deaths() +
2763                              fl_for_cur_sz.count();
2764             gfl->set_split_deaths(deaths);
2765           }
2766         }
2767       }
2768       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2769       if (found) {
2770         if (k == 1) {
2771           fl->prepend(&fl_for_cur_sz);
2772         } else {
2773           // Divide each block on fl_for_cur_sz up k ways.
2774           FreeChunk* fc;
2775           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2776             // Must do this in reverse order, so that anybody attempting to
2777             // access the main chunk sees it as a single free block until we
2778             // change it.
2779             size_t fc_size = fc->size();
2780             assert(fc->is_free(), "Error");
2781             for (int i = k-1; i >= 0; i--) {
2782               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2783               assert((i != 0) ||
2784                         ((fc == ffc) && ffc->is_free() &&
2785                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2786                         "Counting error");
2787               ffc->set_size(word_sz);
2788               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2789               ffc->link_next(NULL);
2790               // Above must occur before BOT is updated below.
2791               OrderAccess::storestore();
2792               // splitting from the right, fc_size == i * word_sz
2793               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2794               fc_size -= word_sz;
2795               assert(fc_size == i*word_sz, "Error");
2796               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2797               _bt.verify_single_block((HeapWord*)fc, fc_size);
2798               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2799               // Push this on "fl".
2800               fl->return_chunk_at_head(ffc);
2801             }
2802             // TRAP
2803             assert(fl->tail()->next() == NULL, "List invariant.");
2804           }
2805         }
2806         // Update birth stats for this block size.
2807         size_t num = fl->count();
2808         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2809                         Mutex::_no_safepoint_check_flag);
2810         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2811         _indexedFreeList[word_sz].set_split_births(births);
2812         return true;
2813       }
2814     }
2815     return found;
2816   }
2817 }
2818 
2819 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2820 
2821   FreeChunk* fc = NULL;
2822   FreeChunk* rem_fc = NULL;
2823   size_t rem;
2824   {
2825     MutexLockerEx x(parDictionaryAllocLock(),
2826                     Mutex::_no_safepoint_check_flag);
2827     while (n > 0) {
2828       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2829                                   FreeBlockDictionary<FreeChunk>::atLeast);
2830       if (fc != NULL) {
2831         break;
2832       } else {
2833         n--;
2834       }
2835     }
2836     if (fc == NULL) return NULL;
2837     // Otherwise, split up that block.
2838     assert((ssize_t)n >= 1, "Control point invariant");
2839     assert(fc->is_free(), "Error: should be a free block");
2840     _bt.verify_single_block((HeapWord*)fc, fc->size());
2841     const size_t nn = fc->size() / word_sz;
2842     n = MIN2(nn, n);
2843     assert((ssize_t)n >= 1, "Control point invariant");
2844     rem = fc->size() - n * word_sz;
2845     // If there is a remainder, and it's too small, allocate one fewer.
2846     if (rem > 0 && rem < MinChunkSize) {
2847       n--; rem += word_sz;
2848     }
2849     // Note that at this point we may have n == 0.
2850     assert((ssize_t)n >= 0, "Control point invariant");
2851 
2852     // If n is 0, the chunk fc that was found is not large
2853     // enough to leave a viable remainder.  We are unable to
2854     // allocate even one block.  Return fc to the
2855     // dictionary and return, leaving "fl" empty.
2856     if (n == 0) {
2857       returnChunkToDictionary(fc);
2858       return NULL;
2859     }
2860 
2861     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2862     dictionary()->dict_census_update(fc->size(),
2863                                      true /*split*/,
2864                                      false /*birth*/);
2865 
2866     // First return the remainder, if any.
2867     // Note that we hold the lock until we decide if we're going to give
2868     // back the remainder to the dictionary, since a concurrent allocation
2869     // may otherwise see the heap as empty.  (We're willing to take that
2870     // hit if the block is a small block.)
2871     if (rem > 0) {
2872       size_t prefix_size = n * word_sz;
2873       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2874       rem_fc->set_size(rem);
2875       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2876       rem_fc->link_next(NULL);
2877       // Above must occur before BOT is updated below.
2878       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2879       OrderAccess::storestore();
2880       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2881       assert(fc->is_free(), "Error");
2882       fc->set_size(prefix_size);
2883       if (rem >= IndexSetSize) {
2884         returnChunkToDictionary(rem_fc);
2885         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2886         rem_fc = NULL;
2887       }
2888       // Otherwise, return it to the small list below.
2889     }
2890   }
2891   if (rem_fc != NULL) {
2892     MutexLockerEx x(_indexedFreeListParLocks[rem],
2893                     Mutex::_no_safepoint_check_flag);
2894     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2895     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2896     smallSplitBirth(rem);
2897   }
2898   assert(n * word_sz == fc->size(),
2899     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2900     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2901     fc->size(), n, word_sz));
2902   return fc;
2903 }
2904 
2905 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2906 
2907   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2908 
2909   if (fc == NULL) {
2910     return;
2911   }
2912 
2913   size_t n = fc->size() / word_sz;
2914 
2915   assert((ssize_t)n > 0, "Consistency");
2916   // Now do the splitting up.
2917   // Must do this in reverse order, so that anybody attempting to
2918   // access the main chunk sees it as a single free block until we
2919   // change it.
2920   size_t fc_size = n * word_sz;
2921   // All but first chunk in this loop
2922   for (ssize_t i = n-1; i > 0; i--) {
2923     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2924     ffc->set_size(word_sz);
2925     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2926     ffc->link_next(NULL);
2927     // Above must occur before BOT is updated below.
2928     OrderAccess::storestore();
2929     // splitting from the right, fc_size == (n - i + 1) * wordsize
2930     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2931     fc_size -= word_sz;
2932     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2933     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2934     _bt.verify_single_block((HeapWord*)fc, fc_size);
2935     // Push this on "fl".
2936     fl->return_chunk_at_head(ffc);
2937   }
2938   // First chunk
2939   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2940   // The blocks above should show their new sizes before the first block below
2941   fc->set_size(word_sz);
2942   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2943   fc->link_next(NULL);
2944   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2945   _bt.verify_single_block((HeapWord*)fc, fc->size());
2946   fl->return_chunk_at_head(fc);
2947 
2948   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2949   {
2950     // Update the stats for this block size.
2951     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2952                     Mutex::_no_safepoint_check_flag);
2953     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2954     _indexedFreeList[word_sz].set_split_births(births);
2955     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2956     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2957   }
2958 
2959   // TRAP
2960   assert(fl->tail()->next() == NULL, "List invariant.");
2961 }
2962 
2963 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2964   assert(fl->count() == 0, "Precondition.");
2965   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2966          "Precondition");
2967 
2968   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2969     // Got it
2970     return;
2971   }
2972 
2973   // Otherwise, we'll split a block from the dictionary.
2974   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2975 }
2976 
2977 // Set up the space's par_seq_tasks structure for work claiming
2978 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2979 // XXX Need to suitably abstract and generalize this and the next
2980 // method into one.
2981 void
2982 CompactibleFreeListSpace::
2983 initialize_sequential_subtasks_for_rescan(int n_threads) {
2984   // The "size" of each task is fixed according to rescan_task_size.
2985   assert(n_threads > 0, "Unexpected n_threads argument");
2986   const size_t task_size = rescan_task_size();
2987   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2988   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2989   assert(n_tasks == 0 ||
2990          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2991           (used_region().start() + n_tasks*task_size >= used_region().end())),
2992          "n_tasks calculation incorrect");
2993   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2994   assert(!pst->valid(), "Clobbering existing data?");
2995   // Sets the condition for completion of the subtask (how many threads
2996   // need to finish in order to be done).
2997   pst->set_n_threads(n_threads);
2998   pst->set_n_tasks((int)n_tasks);
2999 }
3000 
3001 // Set up the space's par_seq_tasks structure for work claiming
3002 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3003 void
3004 CompactibleFreeListSpace::
3005 initialize_sequential_subtasks_for_marking(int n_threads,
3006                                            HeapWord* low) {
3007   // The "size" of each task is fixed according to rescan_task_size.
3008   assert(n_threads > 0, "Unexpected n_threads argument");
3009   const size_t task_size = marking_task_size();
3010   assert(task_size > CardTableModRefBS::card_size_in_words &&
3011          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3012          "Otherwise arithmetic below would be incorrect");
3013   MemRegion span = _gen->reserved();
3014   if (low != NULL) {
3015     if (span.contains(low)) {
3016       // Align low down to  a card boundary so that
3017       // we can use block_offset_careful() on span boundaries.
3018       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3019                                  CardTableModRefBS::card_size);
3020       // Clip span prefix at aligned_low
3021       span = span.intersection(MemRegion(aligned_low, span.end()));
3022     } else if (low > span.end()) {
3023       span = MemRegion(low, low);  // Null region
3024     } // else use entire span
3025   }
3026   assert(span.is_empty() ||
3027          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3028         "span should start at a card boundary");
3029   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3030   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3031   assert(n_tasks == 0 ||
3032          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3033           (span.start() + n_tasks*task_size >= span.end())),
3034          "n_tasks calculation incorrect");
3035   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3036   assert(!pst->valid(), "Clobbering existing data?");
3037   // Sets the condition for completion of the subtask (how many threads
3038   // need to finish in order to be done).
3039   pst->set_n_threads(n_threads);
3040   pst->set_n_tasks((int)n_tasks);
3041 }