1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/gcTraceTime.hpp"
  33 #include "gc/shared/genCollectedHeap.hpp"
  34 #include "gc/shared/liveRange.hpp"
  35 #include "gc/shared/space.inline.hpp"
  36 #include "gc/shared/spaceDecorator.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 /////////////////////////////////////////////////////////////////////////
  50 //// CompactibleFreeListSpace
  51 /////////////////////////////////////////////////////////////////////////
  52 
  53 // highest ranked  free list lock rank
  54 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  55 
  56 // Defaults are 0 so things will break badly if incorrectly initialized.
  57 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  58 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  59 
  60 size_t MinChunkSize = 0;
  61 
  62 void CompactibleFreeListSpace::set_cms_values() {
  63   // Set CMS global values
  64   assert(MinChunkSize == 0, "already set");
  65 
  66   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  67   // for chunks to contain a FreeChunk.
  68   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  69   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  70 
  71   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  72   IndexSetStart  = MinChunkSize;
  73   IndexSetStride = MinObjAlignment;
  74 }
  75 
  76 // Constructor
  77 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  78   MemRegion mr, bool use_adaptive_freelists,
  79   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  80   _dictionaryChoice(dictionaryChoice),
  81   _adaptive_freelists(use_adaptive_freelists),
  82   _bt(bs, mr),
  83   // free list locks are in the range of values taken by _lockRank
  84   // This range currently is [_leaf+2, _leaf+3]
  85   // Note: this requires that CFLspace c'tors
  86   // are called serially in the order in which the locks are
  87   // are acquired in the program text. This is true today.
  88   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  89                 Monitor::_safepoint_check_sometimes),
  90   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  91                           "CompactibleFreeListSpace._dict_par_lock", true,
  92                           Monitor::_safepoint_check_never),
  93   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  94                     CMSRescanMultiple),
  95   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  96                     CMSConcMarkMultiple),
  97   _collector(NULL),
  98   _preconsumptionDirtyCardClosure(NULL)
  99 {
 100   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 101          "FreeChunk is larger than expected");
 102   _bt.set_space(this);
 103   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 104   // We have all of "mr", all of which we place in the dictionary
 105   // as one big chunk. We'll need to decide here which of several
 106   // possible alternative dictionary implementations to use. For
 107   // now the choice is easy, since we have only one working
 108   // implementation, namely, the simple binary tree (splaying
 109   // temporarily disabled).
 110   switch (dictionaryChoice) {
 111     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 112       _dictionary = new AFLBinaryTreeDictionary(mr);
 113       break;
 114     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 115     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 116     default:
 117       warning("dictionaryChoice: selected option not understood; using"
 118               " default BinaryTreeDictionary implementation instead.");
 119   }
 120   assert(_dictionary != NULL, "CMS dictionary initialization");
 121   // The indexed free lists are initially all empty and are lazily
 122   // filled in on demand. Initialize the array elements to NULL.
 123   initializeIndexedFreeListArray();
 124 
 125   // Not using adaptive free lists assumes that allocation is first
 126   // from the linAB's.  Also a cms perm gen which can be compacted
 127   // has to have the klass's klassKlass allocated at a lower
 128   // address in the heap than the klass so that the klassKlass is
 129   // moved to its new location before the klass is moved.
 130   // Set the _refillSize for the linear allocation blocks
 131   if (!use_adaptive_freelists) {
 132     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 133                                            FreeBlockDictionary<FreeChunk>::atLeast);
 134     // The small linAB initially has all the space and will allocate
 135     // a chunk of any size.
 136     HeapWord* addr = (HeapWord*) fc;
 137     _smallLinearAllocBlock.set(addr, fc->size() ,
 138       1024*SmallForLinearAlloc, fc->size());
 139     // Note that _unallocated_block is not updated here.
 140     // Allocations from the linear allocation block should
 141     // update it.
 142   } else {
 143     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 144                                SmallForLinearAlloc);
 145   }
 146   // CMSIndexedFreeListReplenish should be at least 1
 147   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 148   _promoInfo.setSpace(this);
 149   if (UseCMSBestFit) {
 150     _fitStrategy = FreeBlockBestFitFirst;
 151   } else {
 152     _fitStrategy = FreeBlockStrategyNone;
 153   }
 154   check_free_list_consistency();
 155 
 156   // Initialize locks for parallel case.
 157   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 158     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 159                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 160     DEBUG_ONLY(
 161       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 162     )
 163   }
 164   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 165 }
 166 
 167 // Like CompactibleSpace forward() but always calls cross_threshold() to
 168 // update the block offset table.  Removed initialize_threshold call because
 169 // CFLS does not use a block offset array for contiguous spaces.
 170 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 171                                     CompactPoint* cp, HeapWord* compact_top) {
 172   // q is alive
 173   // First check if we should switch compaction space
 174   assert(this == cp->space, "'this' should be current compaction space.");
 175   size_t compaction_max_size = pointer_delta(end(), compact_top);
 176   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 177     "virtual adjustObjectSize_v() method is not correct");
 178   size_t adjusted_size = adjustObjectSize(size);
 179   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 180          "no small fragments allowed");
 181   assert(minimum_free_block_size() == MinChunkSize,
 182          "for de-virtualized reference below");
 183   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 184   if (adjusted_size + MinChunkSize > compaction_max_size &&
 185       adjusted_size != compaction_max_size) {
 186     do {
 187       // switch to next compaction space
 188       cp->space->set_compaction_top(compact_top);
 189       cp->space = cp->space->next_compaction_space();
 190       if (cp->space == NULL) {
 191         cp->gen = GenCollectedHeap::heap()->young_gen();
 192         assert(cp->gen != NULL, "compaction must succeed");
 193         cp->space = cp->gen->first_compaction_space();
 194         assert(cp->space != NULL, "generation must have a first compaction space");
 195       }
 196       compact_top = cp->space->bottom();
 197       cp->space->set_compaction_top(compact_top);
 198       // The correct adjusted_size may not be the same as that for this method
 199       // (i.e., cp->space may no longer be "this" so adjust the size again.
 200       // Use the virtual method which is not used above to save the virtual
 201       // dispatch.
 202       adjusted_size = cp->space->adjust_object_size_v(size);
 203       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 204       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 205     } while (adjusted_size > compaction_max_size);
 206   }
 207 
 208   // store the forwarding pointer into the mark word
 209   if ((HeapWord*)q != compact_top) {
 210     q->forward_to(oop(compact_top));
 211     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 212   } else {
 213     // if the object isn't moving we can just set the mark to the default
 214     // mark and handle it specially later on.
 215     q->init_mark();
 216     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 217   }
 218 
 219   compact_top += adjusted_size;
 220 
 221   // we need to update the offset table so that the beginnings of objects can be
 222   // found during scavenge.  Note that we are updating the offset table based on
 223   // where the object will be once the compaction phase finishes.
 224 
 225   // Always call cross_threshold().  A contiguous space can only call it when
 226   // the compaction_top exceeds the current threshold but not for an
 227   // non-contiguous space.
 228   cp->threshold =
 229     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 230   return compact_top;
 231 }
 232 
 233 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 234 // and use of single_block instead of alloc_block.  The name here is not really
 235 // appropriate - maybe a more general name could be invented for both the
 236 // contiguous and noncontiguous spaces.
 237 
 238 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 239   _bt.single_block(start, the_end);
 240   return end();
 241 }
 242 
 243 // Initialize them to NULL.
 244 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 245   for (size_t i = 0; i < IndexSetSize; i++) {
 246     // Note that on platforms where objects are double word aligned,
 247     // the odd array elements are not used.  It is convenient, however,
 248     // to map directly from the object size to the array element.
 249     _indexedFreeList[i].reset(IndexSetSize);
 250     _indexedFreeList[i].set_size(i);
 251     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 252     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 254     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 255   }
 256 }
 257 
 258 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 259   for (size_t i = 1; i < IndexSetSize; i++) {
 260     assert(_indexedFreeList[i].size() == (size_t) i,
 261       "Indexed free list sizes are incorrect");
 262     _indexedFreeList[i].reset(IndexSetSize);
 263     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 264     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 266     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 267   }
 268 }
 269 
 270 void CompactibleFreeListSpace::reset(MemRegion mr) {
 271   resetIndexedFreeListArray();
 272   dictionary()->reset();
 273   if (BlockOffsetArrayUseUnallocatedBlock) {
 274     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 275     // Everything's allocated until proven otherwise.
 276     _bt.set_unallocated_block(end());
 277   }
 278   if (!mr.is_empty()) {
 279     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 280     _bt.single_block(mr.start(), mr.word_size());
 281     FreeChunk* fc = (FreeChunk*) mr.start();
 282     fc->set_size(mr.word_size());
 283     if (mr.word_size() >= IndexSetSize ) {
 284       returnChunkToDictionary(fc);
 285     } else {
 286       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 287       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 288     }
 289     coalBirth(mr.word_size());
 290   }
 291   _promoInfo.reset();
 292   _smallLinearAllocBlock._ptr = NULL;
 293   _smallLinearAllocBlock._word_size = 0;
 294 }
 295 
 296 void CompactibleFreeListSpace::reset_after_compaction() {
 297   // Reset the space to the new reality - one free chunk.
 298   MemRegion mr(compaction_top(), end());
 299   reset(mr);
 300   // Now refill the linear allocation block(s) if possible.
 301   if (_adaptive_freelists) {
 302     refillLinearAllocBlocksIfNeeded();
 303   } else {
 304     // Place as much of mr in the linAB as we can get,
 305     // provided it was big enough to go into the dictionary.
 306     FreeChunk* fc = dictionary()->find_largest_dict();
 307     if (fc != NULL) {
 308       assert(fc->size() == mr.word_size(),
 309              "Why was the chunk broken up?");
 310       removeChunkFromDictionary(fc);
 311       HeapWord* addr = (HeapWord*) fc;
 312       _smallLinearAllocBlock.set(addr, fc->size() ,
 313         1024*SmallForLinearAlloc, fc->size());
 314       // Note that _unallocated_block is not updated here.
 315     }
 316   }
 317 }
 318 
 319 // Walks the entire dictionary, returning a coterminal
 320 // chunk, if it exists. Use with caution since it involves
 321 // a potentially complete walk of a potentially large tree.
 322 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 323 
 324   assert_lock_strong(&_freelistLock);
 325 
 326   return dictionary()->find_chunk_ends_at(end());
 327 }
 328 
 329 
 330 #ifndef PRODUCT
 331 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 332   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 333     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 334   }
 335 }
 336 
 337 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 338   size_t sum = 0;
 339   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 340     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 341   }
 342   return sum;
 343 }
 344 
 345 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 346   size_t count = 0;
 347   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 348     debug_only(
 349       ssize_t total_list_count = 0;
 350       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 351          fc = fc->next()) {
 352         total_list_count++;
 353       }
 354       assert(total_list_count ==  _indexedFreeList[i].count(),
 355         "Count in list is incorrect");
 356     )
 357     count += _indexedFreeList[i].count();
 358   }
 359   return count;
 360 }
 361 
 362 size_t CompactibleFreeListSpace::totalCount() {
 363   size_t num = totalCountInIndexedFreeLists();
 364   num +=  dictionary()->total_count();
 365   if (_smallLinearAllocBlock._word_size != 0) {
 366     num++;
 367   }
 368   return num;
 369 }
 370 #endif
 371 
 372 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 373   FreeChunk* fc = (FreeChunk*) p;
 374   return fc->is_free();
 375 }
 376 
 377 size_t CompactibleFreeListSpace::used() const {
 378   return capacity() - free();
 379 }
 380 
 381 size_t CompactibleFreeListSpace::free() const {
 382   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 383   // if you do this while the structures are in flux you
 384   // may get an approximate answer only; for instance
 385   // because there is concurrent allocation either
 386   // directly by mutators or for promotion during a GC.
 387   // It's "MT-safe", however, in the sense that you are guaranteed
 388   // not to crash and burn, for instance, because of walking
 389   // pointers that could disappear as you were walking them.
 390   // The approximation is because the various components
 391   // that are read below are not read atomically (and
 392   // further the computation of totalSizeInIndexedFreeLists()
 393   // is itself a non-atomic computation. The normal use of
 394   // this is during a resize operation at the end of GC
 395   // and at that time you are guaranteed to get the
 396   // correct actual value. However, for instance, this is
 397   // also read completely asynchronously by the "perf-sampler"
 398   // that supports jvmstat, and you are apt to see the values
 399   // flicker in such cases.
 400   assert(_dictionary != NULL, "No _dictionary?");
 401   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 402           totalSizeInIndexedFreeLists() +
 403           _smallLinearAllocBlock._word_size) * HeapWordSize;
 404 }
 405 
 406 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 407   assert(_dictionary != NULL, "No _dictionary?");
 408   assert_locked();
 409   size_t res = _dictionary->max_chunk_size();
 410   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 411                        (size_t) SmallForLinearAlloc - 1));
 412   // XXX the following could potentially be pretty slow;
 413   // should one, pessimistically for the rare cases when res
 414   // calculated above is less than IndexSetSize,
 415   // just return res calculated above? My reasoning was that
 416   // those cases will be so rare that the extra time spent doesn't
 417   // really matter....
 418   // Note: do not change the loop test i >= res + IndexSetStride
 419   // to i > res below, because i is unsigned and res may be zero.
 420   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 421        i -= IndexSetStride) {
 422     if (_indexedFreeList[i].head() != NULL) {
 423       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 424       return i;
 425     }
 426   }
 427   return res;
 428 }
 429 
 430 void LinearAllocBlock::print_on(outputStream* st) const {
 431   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 432             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 433             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 434 }
 435 
 436 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 437   st->print_cr("COMPACTIBLE FREELIST SPACE");
 438   st->print_cr(" Space:");
 439   Space::print_on(st);
 440 
 441   st->print_cr("promoInfo:");
 442   _promoInfo.print_on(st);
 443 
 444   st->print_cr("_smallLinearAllocBlock");
 445   _smallLinearAllocBlock.print_on(st);
 446 
 447   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 448 
 449   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 450                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 451 }
 452 
 453 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 454 const {
 455   reportIndexedFreeListStatistics();
 456   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 457   gclog_or_tty->print_cr("---------------------------");
 458   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 459   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 460     _indexedFreeList[i].print_on(gclog_or_tty);
 461     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 462          fc = fc->next()) {
 463       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 464                           p2i(fc), p2i((HeapWord*)fc + i),
 465                           fc->cantCoalesce() ? "\t CC" : "");
 466     }
 467   }
 468 }
 469 
 470 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 471 const {
 472   _promoInfo.print_on(st);
 473 }
 474 
 475 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 476 const {
 477   _dictionary->report_statistics();
 478   st->print_cr("Layout of Freelists in Tree");
 479   st->print_cr("---------------------------");
 480   _dictionary->print_free_lists(st);
 481 }
 482 
 483 class BlkPrintingClosure: public BlkClosure {
 484   const CMSCollector*             _collector;
 485   const CompactibleFreeListSpace* _sp;
 486   const CMSBitMap*                _live_bit_map;
 487   const bool                      _post_remark;
 488   outputStream*                   _st;
 489 public:
 490   BlkPrintingClosure(const CMSCollector* collector,
 491                      const CompactibleFreeListSpace* sp,
 492                      const CMSBitMap* live_bit_map,
 493                      outputStream* st):
 494     _collector(collector),
 495     _sp(sp),
 496     _live_bit_map(live_bit_map),
 497     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 498     _st(st) { }
 499   size_t do_blk(HeapWord* addr);
 500 };
 501 
 502 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 503   size_t sz = _sp->block_size_no_stall(addr, _collector);
 504   assert(sz != 0, "Should always be able to compute a size");
 505   if (_sp->block_is_obj(addr)) {
 506     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 507     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 508       p2i(addr),
 509       dead ? "dead" : "live",
 510       sz,
 511       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 512     if (CMSPrintObjectsInDump && !dead) {
 513       oop(addr)->print_on(_st);
 514       _st->print_cr("--------------------------------------");
 515     }
 516   } else { // free block
 517     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 518       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 519     if (CMSPrintChunksInDump) {
 520       ((FreeChunk*)addr)->print_on(_st);
 521       _st->print_cr("--------------------------------------");
 522     }
 523   }
 524   return sz;
 525 }
 526 
 527 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 528   outputStream* st) {
 529   st->print_cr("\n=========================");
 530   st->print_cr("Block layout in CMS Heap:");
 531   st->print_cr("=========================");
 532   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 533   blk_iterate(&bpcl);
 534 
 535   st->print_cr("\n=======================================");
 536   st->print_cr("Order & Layout of Promotion Info Blocks");
 537   st->print_cr("=======================================");
 538   print_promo_info_blocks(st);
 539 
 540   st->print_cr("\n===========================");
 541   st->print_cr("Order of Indexed Free Lists");
 542   st->print_cr("=========================");
 543   print_indexed_free_lists(st);
 544 
 545   st->print_cr("\n=================================");
 546   st->print_cr("Order of Free Lists in Dictionary");
 547   st->print_cr("=================================");
 548   print_dictionary_free_lists(st);
 549 }
 550 
 551 
 552 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 553   assert_lock_strong(&_freelistLock);
 554   assert(PrintFLSStatistics != 0, "Reporting error");
 555   _dictionary->report_statistics();
 556   if (PrintFLSStatistics > 1) {
 557     reportIndexedFreeListStatistics();
 558     size_t total_size = totalSizeInIndexedFreeLists() +
 559                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 560     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 561   }
 562 }
 563 
 564 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 565   assert_lock_strong(&_freelistLock);
 566   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 567                       "--------------------------------\n");
 568   size_t total_size = totalSizeInIndexedFreeLists();
 569   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 570   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 571   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 572   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 573   if (free_blocks != 0) {
 574     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 575   }
 576 }
 577 
 578 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 579   size_t res = 0;
 580   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 581     debug_only(
 582       ssize_t recount = 0;
 583       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 584          fc = fc->next()) {
 585         recount += 1;
 586       }
 587       assert(recount == _indexedFreeList[i].count(),
 588         "Incorrect count in list");
 589     )
 590     res += _indexedFreeList[i].count();
 591   }
 592   return res;
 593 }
 594 
 595 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 596   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 597     if (_indexedFreeList[i].head() != NULL) {
 598       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 599       return (size_t)i;
 600     }
 601   }
 602   return 0;
 603 }
 604 
 605 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 606   HeapWord* prevEnd = end();
 607   assert(prevEnd != value, "unnecessary set_end call");
 608   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 609         "New end is below unallocated block");
 610   _end = value;
 611   if (prevEnd != NULL) {
 612     // Resize the underlying block offset table.
 613     _bt.resize(pointer_delta(value, bottom()));
 614     if (value <= prevEnd) {
 615       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 616              "New end is below unallocated block");
 617     } else {
 618       // Now, take this new chunk and add it to the free blocks.
 619       // Note that the BOT has not yet been updated for this block.
 620       size_t newFcSize = pointer_delta(value, prevEnd);
 621       // XXX This is REALLY UGLY and should be fixed up. XXX
 622       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 623         // Mark the boundary of the new block in BOT
 624         _bt.mark_block(prevEnd, value);
 625         // put it all in the linAB
 626         MutexLockerEx x(parDictionaryAllocLock(),
 627                         Mutex::_no_safepoint_check_flag);
 628         _smallLinearAllocBlock._ptr = prevEnd;
 629         _smallLinearAllocBlock._word_size = newFcSize;
 630         repairLinearAllocBlock(&_smallLinearAllocBlock);
 631         // Births of chunks put into a LinAB are not recorded.  Births
 632         // of chunks as they are allocated out of a LinAB are.
 633       } else {
 634         // Add the block to the free lists, if possible coalescing it
 635         // with the last free block, and update the BOT and census data.
 636         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 637       }
 638     }
 639   }
 640 }
 641 
 642 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 643   CompactibleFreeListSpace* _cfls;
 644   CMSCollector* _collector;
 645   bool _parallel;
 646 protected:
 647   // Override.
 648 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 649   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 650                                        HeapWord* bottom, HeapWord* top, \
 651                                        ClosureType* cl);                \
 652       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl);                \
 655     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 656                                        HeapWord* bottom, HeapWord* top, \
 657                                        ClosureType* cl)
 658   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 659   walk_mem_region_with_cl_DECL(FilteringClosure);
 660 
 661 public:
 662   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 663                       CMSCollector* collector,
 664                       ExtendedOopClosure* cl,
 665                       CardTableModRefBS::PrecisionStyle precision,
 666                       HeapWord* boundary,
 667                       bool parallel) :
 668     Filtering_DCTOC(sp, cl, precision, boundary),
 669     _cfls(sp), _collector(collector), _parallel(parallel) {}
 670 };
 671 
 672 // We de-virtualize the block-related calls below, since we know that our
 673 // space is a CompactibleFreeListSpace.
 674 
 675 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 676 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 677                                                  HeapWord* bottom,              \
 678                                                  HeapWord* top,                 \
 679                                                  ClosureType* cl) {             \
 680    if (_parallel) {                                                             \
 681      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 682    } else {                                                                     \
 683      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 684    }                                                                            \
 685 }                                                                               \
 686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 687                                                       HeapWord* bottom,         \
 688                                                       HeapWord* top,            \
 689                                                       ClosureType* cl) {        \
 690   /* Skip parts that are before "mr", in case "block_start" sent us             \
 691      back too far. */                                                           \
 692   HeapWord* mr_start = mr.start();                                              \
 693   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 694   HeapWord* next = bottom + bot_size;                                           \
 695   while (next < mr_start) {                                                     \
 696     bottom = next;                                                              \
 697     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 698     next = bottom + bot_size;                                                   \
 699   }                                                                             \
 700                                                                                 \
 701   while (bottom < top) {                                                        \
 702     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 703         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 704                     oop(bottom)) &&                                             \
 705         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 706       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 707       bottom += _cfls->adjustObjectSize(word_sz);                               \
 708     } else {                                                                    \
 709       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 710     }                                                                           \
 711   }                                                                             \
 712 }                                                                               \
 713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 714                                                         HeapWord* bottom,       \
 715                                                         HeapWord* top,          \
 716                                                         ClosureType* cl) {      \
 717   /* Skip parts that are before "mr", in case "block_start" sent us             \
 718      back too far. */                                                           \
 719   HeapWord* mr_start = mr.start();                                              \
 720   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 721   HeapWord* next = bottom + bot_size;                                           \
 722   while (next < mr_start) {                                                     \
 723     bottom = next;                                                              \
 724     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 725     next = bottom + bot_size;                                                   \
 726   }                                                                             \
 727                                                                                 \
 728   while (bottom < top) {                                                        \
 729     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 730         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 731                     oop(bottom)) &&                                             \
 732         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 733       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 734       bottom += _cfls->adjustObjectSize(word_sz);                               \
 735     } else {                                                                    \
 736       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 737     }                                                                           \
 738   }                                                                             \
 739 }
 740 
 741 // (There are only two of these, rather than N, because the split is due
 742 // only to the introduction of the FilteringClosure, a local part of the
 743 // impl of this abstraction.)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 746 
 747 DirtyCardToOopClosure*
 748 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 749                                       CardTableModRefBS::PrecisionStyle precision,
 750                                       HeapWord* boundary,
 751                                       bool parallel) {
 752   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
 753 }
 754 
 755 
 756 // Note on locking for the space iteration functions:
 757 // since the collector's iteration activities are concurrent with
 758 // allocation activities by mutators, absent a suitable mutual exclusion
 759 // mechanism the iterators may go awry. For instance a block being iterated
 760 // may suddenly be allocated or divided up and part of it allocated and
 761 // so on.
 762 
 763 // Apply the given closure to each block in the space.
 764 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 765   assert_lock_strong(freelistLock());
 766   HeapWord *cur, *limit;
 767   for (cur = bottom(), limit = end(); cur < limit;
 768        cur += cl->do_blk_careful(cur));
 769 }
 770 
 771 // Apply the given closure to each block in the space.
 772 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 773   assert_lock_strong(freelistLock());
 774   HeapWord *cur, *limit;
 775   for (cur = bottom(), limit = end(); cur < limit;
 776        cur += cl->do_blk(cur));
 777 }
 778 
 779 // Apply the given closure to each oop in the space.
 780 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 781   assert_lock_strong(freelistLock());
 782   HeapWord *cur, *limit;
 783   size_t curSize;
 784   for (cur = bottom(), limit = end(); cur < limit;
 785        cur += curSize) {
 786     curSize = block_size(cur);
 787     if (block_is_obj(cur)) {
 788       oop(cur)->oop_iterate(cl);
 789     }
 790   }
 791 }
 792 
 793 // NOTE: In the following methods, in order to safely be able to
 794 // apply the closure to an object, we need to be sure that the
 795 // object has been initialized. We are guaranteed that an object
 796 // is initialized if we are holding the Heap_lock with the
 797 // world stopped.
 798 void CompactibleFreeListSpace::verify_objects_initialized() const {
 799   if (is_init_completed()) {
 800     assert_locked_or_safepoint(Heap_lock);
 801     if (Universe::is_fully_initialized()) {
 802       guarantee(SafepointSynchronize::is_at_safepoint(),
 803                 "Required for objects to be initialized");
 804     }
 805   } // else make a concession at vm start-up
 806 }
 807 
 808 // Apply the given closure to each object in the space
 809 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 810   assert_lock_strong(freelistLock());
 811   NOT_PRODUCT(verify_objects_initialized());
 812   HeapWord *cur, *limit;
 813   size_t curSize;
 814   for (cur = bottom(), limit = end(); cur < limit;
 815        cur += curSize) {
 816     curSize = block_size(cur);
 817     if (block_is_obj(cur)) {
 818       blk->do_object(oop(cur));
 819     }
 820   }
 821 }
 822 
 823 // Apply the given closure to each live object in the space
 824 //   The usage of CompactibleFreeListSpace
 825 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 826 // objects in the space with references to objects that are no longer
 827 // valid.  For example, an object may reference another object
 828 // that has already been sweep up (collected).  This method uses
 829 // obj_is_alive() to determine whether it is safe to apply the closure to
 830 // an object.  See obj_is_alive() for details on how liveness of an
 831 // object is decided.
 832 
 833 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 834   assert_lock_strong(freelistLock());
 835   NOT_PRODUCT(verify_objects_initialized());
 836   HeapWord *cur, *limit;
 837   size_t curSize;
 838   for (cur = bottom(), limit = end(); cur < limit;
 839        cur += curSize) {
 840     curSize = block_size(cur);
 841     if (block_is_obj(cur) && obj_is_alive(cur)) {
 842       blk->do_object(oop(cur));
 843     }
 844   }
 845 }
 846 
 847 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 848                                                   UpwardsObjectClosure* cl) {
 849   assert_locked(freelistLock());
 850   NOT_PRODUCT(verify_objects_initialized());
 851   assert(!mr.is_empty(), "Should be non-empty");
 852   // We use MemRegion(bottom(), end()) rather than used_region() below
 853   // because the two are not necessarily equal for some kinds of
 854   // spaces, in particular, certain kinds of free list spaces.
 855   // We could use the more complicated but more precise:
 856   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 857   // but the slight imprecision seems acceptable in the assertion check.
 858   assert(MemRegion(bottom(), end()).contains(mr),
 859          "Should be within used space");
 860   HeapWord* prev = cl->previous();   // max address from last time
 861   if (prev >= mr.end()) { // nothing to do
 862     return;
 863   }
 864   // This assert will not work when we go from cms space to perm
 865   // space, and use same closure. Easy fix deferred for later. XXX YSR
 866   // assert(prev == NULL || contains(prev), "Should be within space");
 867 
 868   bool last_was_obj_array = false;
 869   HeapWord *blk_start_addr, *region_start_addr;
 870   if (prev > mr.start()) {
 871     region_start_addr = prev;
 872     blk_start_addr    = prev;
 873     // The previous invocation may have pushed "prev" beyond the
 874     // last allocated block yet there may be still be blocks
 875     // in this region due to a particular coalescing policy.
 876     // Relax the assertion so that the case where the unallocated
 877     // block is maintained and "prev" is beyond the unallocated
 878     // block does not cause the assertion to fire.
 879     assert((BlockOffsetArrayUseUnallocatedBlock &&
 880             (!is_in(prev))) ||
 881            (blk_start_addr == block_start(region_start_addr)), "invariant");
 882   } else {
 883     region_start_addr = mr.start();
 884     blk_start_addr    = block_start(region_start_addr);
 885   }
 886   HeapWord* region_end_addr = mr.end();
 887   MemRegion derived_mr(region_start_addr, region_end_addr);
 888   while (blk_start_addr < region_end_addr) {
 889     const size_t size = block_size(blk_start_addr);
 890     if (block_is_obj(blk_start_addr)) {
 891       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 892     } else {
 893       last_was_obj_array = false;
 894     }
 895     blk_start_addr += size;
 896   }
 897   if (!last_was_obj_array) {
 898     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 899            "Should be within (closed) used space");
 900     assert(blk_start_addr > prev, "Invariant");
 901     cl->set_previous(blk_start_addr); // min address for next time
 902   }
 903 }
 904 
 905 // Callers of this iterator beware: The closure application should
 906 // be robust in the face of uninitialized objects and should (always)
 907 // return a correct size so that the next addr + size below gives us a
 908 // valid block boundary. [See for instance,
 909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 910 // in ConcurrentMarkSweepGeneration.cpp.]
 911 HeapWord*
 912 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 913   ObjectClosureCareful* cl) {
 914   assert_lock_strong(freelistLock());
 915   // Can't use used_region() below because it may not necessarily
 916   // be the same as [bottom(),end()); although we could
 917   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 918   // that appears too cumbersome, so we just do the simpler check
 919   // in the assertion below.
 920   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 921          "mr should be non-empty and within used space");
 922   HeapWord *addr, *end;
 923   size_t size;
 924   for (addr = block_start_careful(mr.start()), end  = mr.end();
 925        addr < end; addr += size) {
 926     FreeChunk* fc = (FreeChunk*)addr;
 927     if (fc->is_free()) {
 928       // Since we hold the free list lock, which protects direct
 929       // allocation in this generation by mutators, a free object
 930       // will remain free throughout this iteration code.
 931       size = fc->size();
 932     } else {
 933       // Note that the object need not necessarily be initialized,
 934       // because (for instance) the free list lock does NOT protect
 935       // object initialization. The closure application below must
 936       // therefore be correct in the face of uninitialized objects.
 937       size = cl->do_object_careful_m(oop(addr), mr);
 938       if (size == 0) {
 939         // An unparsable object found. Signal early termination.
 940         return addr;
 941       }
 942     }
 943   }
 944   return NULL;
 945 }
 946 
 947 
 948 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 949   NOT_PRODUCT(verify_objects_initialized());
 950   return _bt.block_start(p);
 951 }
 952 
 953 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 954   return _bt.block_start_careful(p);
 955 }
 956 
 957 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 958   NOT_PRODUCT(verify_objects_initialized());
 959   // This must be volatile, or else there is a danger that the compiler
 960   // will compile the code below into a sometimes-infinite loop, by keeping
 961   // the value read the first time in a register.
 962   while (true) {
 963     // We must do this until we get a consistent view of the object.
 964     if (FreeChunk::indicatesFreeChunk(p)) {
 965       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 966       size_t res = fc->size();
 967 
 968       // Bugfix for systems with weak memory model (PPC64/IA64). The
 969       // block's free bit was set and we have read the size of the
 970       // block. Acquire and check the free bit again. If the block is
 971       // still free, the read size is correct.
 972       OrderAccess::acquire();
 973 
 974       // If the object is still a free chunk, return the size, else it
 975       // has been allocated so try again.
 976       if (FreeChunk::indicatesFreeChunk(p)) {
 977         assert(res != 0, "Block size should not be 0");
 978         return res;
 979       }
 980     } else {
 981       // must read from what 'p' points to in each loop.
 982       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 983       if (k != NULL) {
 984         assert(k->is_klass(), "Should really be klass oop.");
 985         oop o = (oop)p;
 986         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 987 
 988         // Bugfix for systems with weak memory model (PPC64/IA64).
 989         // The object o may be an array. Acquire to make sure that the array
 990         // size (third word) is consistent.
 991         OrderAccess::acquire();
 992 
 993         size_t res = o->size_given_klass(k);
 994         res = adjustObjectSize(res);
 995         assert(res != 0, "Block size should not be 0");
 996         return res;
 997       }
 998     }
 999   }
1000 }
1001 
1002 // TODO: Now that is_parsable is gone, we should combine these two functions.
1003 // A variant of the above that uses the Printezis bits for
1004 // unparsable but allocated objects. This avoids any possible
1005 // stalls waiting for mutators to initialize objects, and is
1006 // thus potentially faster than the variant above. However,
1007 // this variant may return a zero size for a block that is
1008 // under mutation and for which a consistent size cannot be
1009 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1010 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1011                                                      const CMSCollector* c)
1012 const {
1013   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1014   // This must be volatile, or else there is a danger that the compiler
1015   // will compile the code below into a sometimes-infinite loop, by keeping
1016   // the value read the first time in a register.
1017   DEBUG_ONLY(uint loops = 0;)
1018   while (true) {
1019     // We must do this until we get a consistent view of the object.
1020     if (FreeChunk::indicatesFreeChunk(p)) {
1021       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1022       size_t res = fc->size();
1023 
1024       // Bugfix for systems with weak memory model (PPC64/IA64). The
1025       // free bit of the block was set and we have read the size of
1026       // the block. Acquire and check the free bit again. If the
1027       // block is still free, the read size is correct.
1028       OrderAccess::acquire();
1029 
1030       if (FreeChunk::indicatesFreeChunk(p)) {
1031         assert(res != 0, "Block size should not be 0");
1032         assert(loops == 0, "Should be 0");
1033         return res;
1034       }
1035     } else {
1036       // must read from what 'p' points to in each loop.
1037       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1038       // We trust the size of any object that has a non-NULL
1039       // klass and (for those in the perm gen) is parsable
1040       // -- irrespective of its conc_safe-ty.
1041       if (k != NULL) {
1042         assert(k->is_klass(), "Should really be klass oop.");
1043         oop o = (oop)p;
1044         assert(o->is_oop(), "Should be an oop");
1045 
1046         // Bugfix for systems with weak memory model (PPC64/IA64).
1047         // The object o may be an array. Acquire to make sure that the array
1048         // size (third word) is consistent.
1049         OrderAccess::acquire();
1050 
1051         size_t res = o->size_given_klass(k);
1052         res = adjustObjectSize(res);
1053         assert(res != 0, "Block size should not be 0");
1054         return res;
1055       } else {
1056         // May return 0 if P-bits not present.
1057         return c->block_size_if_printezis_bits(p);
1058       }
1059     }
1060     assert(loops == 0, "Can loop at most once");
1061     DEBUG_ONLY(loops++;)
1062   }
1063 }
1064 
1065 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1066   NOT_PRODUCT(verify_objects_initialized());
1067   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1068   FreeChunk* fc = (FreeChunk*)p;
1069   if (fc->is_free()) {
1070     return fc->size();
1071   } else {
1072     // Ignore mark word because this may be a recently promoted
1073     // object whose mark word is used to chain together grey
1074     // objects (the last one would have a null value).
1075     assert(oop(p)->is_oop(true), "Should be an oop");
1076     return adjustObjectSize(oop(p)->size());
1077   }
1078 }
1079 
1080 // This implementation assumes that the property of "being an object" is
1081 // stable.  But being a free chunk may not be (because of parallel
1082 // promotion.)
1083 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1084   FreeChunk* fc = (FreeChunk*)p;
1085   assert(is_in_reserved(p), "Should be in space");
1086   if (FreeChunk::indicatesFreeChunk(p)) return false;
1087   Klass* k = oop(p)->klass_or_null();
1088   if (k != NULL) {
1089     // Ignore mark word because it may have been used to
1090     // chain together promoted objects (the last one
1091     // would have a null value).
1092     assert(oop(p)->is_oop(true), "Should be an oop");
1093     return true;
1094   } else {
1095     return false;  // Was not an object at the start of collection.
1096   }
1097 }
1098 
1099 // Check if the object is alive. This fact is checked either by consulting
1100 // the main marking bitmap in the sweeping phase or, if it's a permanent
1101 // generation and we're not in the sweeping phase, by checking the
1102 // perm_gen_verify_bit_map where we store the "deadness" information if
1103 // we did not sweep the perm gen in the most recent previous GC cycle.
1104 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1105   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1106          "Else races are possible");
1107   assert(block_is_obj(p), "The address should point to an object");
1108 
1109   // If we're sweeping, we use object liveness information from the main bit map
1110   // for both perm gen and old gen.
1111   // We don't need to lock the bitmap (live_map or dead_map below), because
1112   // EITHER we are in the middle of the sweeping phase, and the
1113   // main marking bit map (live_map below) is locked,
1114   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1115   // is stable, because it's mutated only in the sweeping phase.
1116   // NOTE: This method is also used by jmap where, if class unloading is
1117   // off, the results can return "false" for legitimate perm objects,
1118   // when we are not in the midst of a sweeping phase, which can result
1119   // in jmap not reporting certain perm gen objects. This will be moot
1120   // if/when the perm gen goes away in the future.
1121   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1122     CMSBitMap* live_map = _collector->markBitMap();
1123     return live_map->par_isMarked((HeapWord*) p);
1124   }
1125   return true;
1126 }
1127 
1128 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1129   FreeChunk* fc = (FreeChunk*)p;
1130   assert(is_in_reserved(p), "Should be in space");
1131   assert(_bt.block_start(p) == p, "Should be a block boundary");
1132   if (!fc->is_free()) {
1133     // Ignore mark word because it may have been used to
1134     // chain together promoted objects (the last one
1135     // would have a null value).
1136     assert(oop(p)->is_oop(true), "Should be an oop");
1137     return true;
1138   }
1139   return false;
1140 }
1141 
1142 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1143 // approximate answer if you don't hold the freelistlock when you call this.
1144 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1145   size_t size = 0;
1146   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1147     debug_only(
1148       // We may be calling here without the lock in which case we
1149       // won't do this modest sanity check.
1150       if (freelistLock()->owned_by_self()) {
1151         size_t total_list_size = 0;
1152         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1153           fc = fc->next()) {
1154           total_list_size += i;
1155         }
1156         assert(total_list_size == i * _indexedFreeList[i].count(),
1157                "Count in list is incorrect");
1158       }
1159     )
1160     size += i * _indexedFreeList[i].count();
1161   }
1162   return size;
1163 }
1164 
1165 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1166   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1167   return allocate(size);
1168 }
1169 
1170 HeapWord*
1171 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1172   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1173 }
1174 
1175 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1176   assert_lock_strong(freelistLock());
1177   HeapWord* res = NULL;
1178   assert(size == adjustObjectSize(size),
1179          "use adjustObjectSize() before calling into allocate()");
1180 
1181   if (_adaptive_freelists) {
1182     res = allocate_adaptive_freelists(size);
1183   } else {  // non-adaptive free lists
1184     res = allocate_non_adaptive_freelists(size);
1185   }
1186 
1187   if (res != NULL) {
1188     // check that res does lie in this space!
1189     assert(is_in_reserved(res), "Not in this space!");
1190     assert(is_aligned((void*)res), "alignment check");
1191 
1192     FreeChunk* fc = (FreeChunk*)res;
1193     fc->markNotFree();
1194     assert(!fc->is_free(), "shouldn't be marked free");
1195     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1196     // Verify that the block offset table shows this to
1197     // be a single block, but not one which is unallocated.
1198     _bt.verify_single_block(res, size);
1199     _bt.verify_not_unallocated(res, size);
1200     // mangle a just allocated object with a distinct pattern.
1201     debug_only(fc->mangleAllocated(size));
1202   }
1203 
1204   return res;
1205 }
1206 
1207 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1208   HeapWord* res = NULL;
1209   // try and use linear allocation for smaller blocks
1210   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1211     // if successful, the following also adjusts block offset table
1212     res = getChunkFromSmallLinearAllocBlock(size);
1213   }
1214   // Else triage to indexed lists for smaller sizes
1215   if (res == NULL) {
1216     if (size < SmallForDictionary) {
1217       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1218     } else {
1219       // else get it from the big dictionary; if even this doesn't
1220       // work we are out of luck.
1221       res = (HeapWord*)getChunkFromDictionaryExact(size);
1222     }
1223   }
1224 
1225   return res;
1226 }
1227 
1228 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1229   assert_lock_strong(freelistLock());
1230   HeapWord* res = NULL;
1231   assert(size == adjustObjectSize(size),
1232          "use adjustObjectSize() before calling into allocate()");
1233 
1234   // Strategy
1235   //   if small
1236   //     exact size from small object indexed list if small
1237   //     small or large linear allocation block (linAB) as appropriate
1238   //     take from lists of greater sized chunks
1239   //   else
1240   //     dictionary
1241   //     small or large linear allocation block if it has the space
1242   // Try allocating exact size from indexTable first
1243   if (size < IndexSetSize) {
1244     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1245     if(res != NULL) {
1246       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1247         "Not removed from free list");
1248       // no block offset table adjustment is necessary on blocks in
1249       // the indexed lists.
1250 
1251     // Try allocating from the small LinAB
1252     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1253         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1254         // if successful, the above also adjusts block offset table
1255         // Note that this call will refill the LinAB to
1256         // satisfy the request.  This is different that
1257         // evm.
1258         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1259     } else {
1260       // Raid the exact free lists larger than size, even if they are not
1261       // overpopulated.
1262       res = (HeapWord*) getChunkFromGreater(size);
1263     }
1264   } else {
1265     // Big objects get allocated directly from the dictionary.
1266     res = (HeapWord*) getChunkFromDictionaryExact(size);
1267     if (res == NULL) {
1268       // Try hard not to fail since an allocation failure will likely
1269       // trigger a synchronous GC.  Try to get the space from the
1270       // allocation blocks.
1271       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1272     }
1273   }
1274 
1275   return res;
1276 }
1277 
1278 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1279 // when promoting obj.
1280 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1281   // Depending on the object size, expansion may require refilling either a
1282   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1283   // is added because the dictionary may over-allocate to avoid fragmentation.
1284   size_t space = obj_size;
1285   if (!_adaptive_freelists) {
1286     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1287   }
1288   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1289   return space;
1290 }
1291 
1292 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1293   FreeChunk* ret;
1294 
1295   assert(numWords >= MinChunkSize, "Size is less than minimum");
1296   assert(linearAllocationWouldFail() || bestFitFirst(),
1297     "Should not be here");
1298 
1299   size_t i;
1300   size_t currSize = numWords + MinChunkSize;
1301   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1302   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1303     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1304     if (fl->head()) {
1305       ret = getFromListGreater(fl, numWords);
1306       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1307       return ret;
1308     }
1309   }
1310 
1311   currSize = MAX2((size_t)SmallForDictionary,
1312                   (size_t)(numWords + MinChunkSize));
1313 
1314   /* Try to get a chunk that satisfies request, while avoiding
1315      fragmentation that can't be handled. */
1316   {
1317     ret =  dictionary()->get_chunk(currSize);
1318     if (ret != NULL) {
1319       assert(ret->size() - numWords >= MinChunkSize,
1320              "Chunk is too small");
1321       _bt.allocated((HeapWord*)ret, ret->size());
1322       /* Carve returned chunk. */
1323       (void) splitChunkAndReturnRemainder(ret, numWords);
1324       /* Label this as no longer a free chunk. */
1325       assert(ret->is_free(), "This chunk should be free");
1326       ret->link_prev(NULL);
1327     }
1328     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1329     return ret;
1330   }
1331   ShouldNotReachHere();
1332 }
1333 
1334 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1335   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1336   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1337 }
1338 
1339 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1340   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1341          (_smallLinearAllocBlock._word_size == fc->size()),
1342          "Linear allocation block shows incorrect size");
1343   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1344           (_smallLinearAllocBlock._word_size == fc->size()));
1345 }
1346 
1347 // Check if the purported free chunk is present either as a linear
1348 // allocation block, the size-indexed table of (smaller) free blocks,
1349 // or the larger free blocks kept in the binary tree dictionary.
1350 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1351   if (verify_chunk_is_linear_alloc_block(fc)) {
1352     return true;
1353   } else if (fc->size() < IndexSetSize) {
1354     return verifyChunkInIndexedFreeLists(fc);
1355   } else {
1356     return dictionary()->verify_chunk_in_free_list(fc);
1357   }
1358 }
1359 
1360 #ifndef PRODUCT
1361 void CompactibleFreeListSpace::assert_locked() const {
1362   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1363 }
1364 
1365 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1366   CMSLockVerifier::assert_locked(lock);
1367 }
1368 #endif
1369 
1370 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1371   // In the parallel case, the main thread holds the free list lock
1372   // on behalf the parallel threads.
1373   FreeChunk* fc;
1374   {
1375     // If GC is parallel, this might be called by several threads.
1376     // This should be rare enough that the locking overhead won't affect
1377     // the sequential code.
1378     MutexLockerEx x(parDictionaryAllocLock(),
1379                     Mutex::_no_safepoint_check_flag);
1380     fc = getChunkFromDictionary(size);
1381   }
1382   if (fc != NULL) {
1383     fc->dontCoalesce();
1384     assert(fc->is_free(), "Should be free, but not coalescable");
1385     // Verify that the block offset table shows this to
1386     // be a single block, but not one which is unallocated.
1387     _bt.verify_single_block((HeapWord*)fc, fc->size());
1388     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1389   }
1390   return fc;
1391 }
1392 
1393 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1394   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1395   assert_locked();
1396 
1397   // if we are tracking promotions, then first ensure space for
1398   // promotion (including spooling space for saving header if necessary).
1399   // then allocate and copy, then track promoted info if needed.
1400   // When tracking (see PromotionInfo::track()), the mark word may
1401   // be displaced and in this case restoration of the mark word
1402   // occurs in the (oop_since_save_marks_)iterate phase.
1403   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1404     return NULL;
1405   }
1406   // Call the allocate(size_t, bool) form directly to avoid the
1407   // additional call through the allocate(size_t) form.  Having
1408   // the compile inline the call is problematic because allocate(size_t)
1409   // is a virtual method.
1410   HeapWord* res = allocate(adjustObjectSize(obj_size));
1411   if (res != NULL) {
1412     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1413     // if we should be tracking promotions, do so.
1414     if (_promoInfo.tracking()) {
1415         _promoInfo.track((PromotedObject*)res);
1416     }
1417   }
1418   return oop(res);
1419 }
1420 
1421 HeapWord*
1422 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1423   assert_locked();
1424   assert(size >= MinChunkSize, "minimum chunk size");
1425   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1426     "maximum from smallLinearAllocBlock");
1427   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1428 }
1429 
1430 HeapWord*
1431 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1432                                                        size_t size) {
1433   assert_locked();
1434   assert(size >= MinChunkSize, "too small");
1435   HeapWord* res = NULL;
1436   // Try to do linear allocation from blk, making sure that
1437   if (blk->_word_size == 0) {
1438     // We have probably been unable to fill this either in the prologue or
1439     // when it was exhausted at the last linear allocation. Bail out until
1440     // next time.
1441     assert(blk->_ptr == NULL, "consistency check");
1442     return NULL;
1443   }
1444   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1445   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1446   if (res != NULL) return res;
1447 
1448   // about to exhaust this linear allocation block
1449   if (blk->_word_size == size) { // exactly satisfied
1450     res = blk->_ptr;
1451     _bt.allocated(res, blk->_word_size);
1452   } else if (size + MinChunkSize <= blk->_refillSize) {
1453     size_t sz = blk->_word_size;
1454     // Update _unallocated_block if the size is such that chunk would be
1455     // returned to the indexed free list.  All other chunks in the indexed
1456     // free lists are allocated from the dictionary so that _unallocated_block
1457     // has already been adjusted for them.  Do it here so that the cost
1458     // for all chunks added back to the indexed free lists.
1459     if (sz < SmallForDictionary) {
1460       _bt.allocated(blk->_ptr, sz);
1461     }
1462     // Return the chunk that isn't big enough, and then refill below.
1463     addChunkToFreeLists(blk->_ptr, sz);
1464     split_birth(sz);
1465     // Don't keep statistics on adding back chunk from a LinAB.
1466   } else {
1467     // A refilled block would not satisfy the request.
1468     return NULL;
1469   }
1470 
1471   blk->_ptr = NULL; blk->_word_size = 0;
1472   refillLinearAllocBlock(blk);
1473   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1474          "block was replenished");
1475   if (res != NULL) {
1476     split_birth(size);
1477     repairLinearAllocBlock(blk);
1478   } else if (blk->_ptr != NULL) {
1479     res = blk->_ptr;
1480     size_t blk_size = blk->_word_size;
1481     blk->_word_size -= size;
1482     blk->_ptr  += size;
1483     split_birth(size);
1484     repairLinearAllocBlock(blk);
1485     // Update BOT last so that other (parallel) GC threads see a consistent
1486     // view of the BOT and free blocks.
1487     // Above must occur before BOT is updated below.
1488     OrderAccess::storestore();
1489     _bt.split_block(res, blk_size, size);  // adjust block offset table
1490   }
1491   return res;
1492 }
1493 
1494 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1495                                         LinearAllocBlock* blk,
1496                                         size_t size) {
1497   assert_locked();
1498   assert(size >= MinChunkSize, "too small");
1499 
1500   HeapWord* res = NULL;
1501   // This is the common case.  Keep it simple.
1502   if (blk->_word_size >= size + MinChunkSize) {
1503     assert(blk->_ptr != NULL, "consistency check");
1504     res = blk->_ptr;
1505     // Note that the BOT is up-to-date for the linAB before allocation.  It
1506     // indicates the start of the linAB.  The split_block() updates the
1507     // BOT for the linAB after the allocation (indicates the start of the
1508     // next chunk to be allocated).
1509     size_t blk_size = blk->_word_size;
1510     blk->_word_size -= size;
1511     blk->_ptr  += size;
1512     split_birth(size);
1513     repairLinearAllocBlock(blk);
1514     // Update BOT last so that other (parallel) GC threads see a consistent
1515     // view of the BOT and free blocks.
1516     // Above must occur before BOT is updated below.
1517     OrderAccess::storestore();
1518     _bt.split_block(res, blk_size, size);  // adjust block offset table
1519     _bt.allocated(res, size);
1520   }
1521   return res;
1522 }
1523 
1524 FreeChunk*
1525 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1526   assert_locked();
1527   assert(size < SmallForDictionary, "just checking");
1528   FreeChunk* res;
1529   res = _indexedFreeList[size].get_chunk_at_head();
1530   if (res == NULL) {
1531     res = getChunkFromIndexedFreeListHelper(size);
1532   }
1533   _bt.verify_not_unallocated((HeapWord*) res, size);
1534   assert(res == NULL || res->size() == size, "Incorrect block size");
1535   return res;
1536 }
1537 
1538 FreeChunk*
1539 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1540   bool replenish) {
1541   assert_locked();
1542   FreeChunk* fc = NULL;
1543   if (size < SmallForDictionary) {
1544     assert(_indexedFreeList[size].head() == NULL ||
1545       _indexedFreeList[size].surplus() <= 0,
1546       "List for this size should be empty or under populated");
1547     // Try best fit in exact lists before replenishing the list
1548     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1549       // Replenish list.
1550       //
1551       // Things tried that failed.
1552       //   Tried allocating out of the two LinAB's first before
1553       // replenishing lists.
1554       //   Tried small linAB of size 256 (size in indexed list)
1555       // and replenishing indexed lists from the small linAB.
1556       //
1557       FreeChunk* newFc = NULL;
1558       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1559       if (replenish_size < SmallForDictionary) {
1560         // Do not replenish from an underpopulated size.
1561         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1562             _indexedFreeList[replenish_size].head() != NULL) {
1563           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1564         } else if (bestFitFirst()) {
1565           newFc = bestFitSmall(replenish_size);
1566         }
1567       }
1568       if (newFc == NULL && replenish_size > size) {
1569         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1570         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1571       }
1572       // Note: The stats update re split-death of block obtained above
1573       // will be recorded below precisely when we know we are going to
1574       // be actually splitting it into more than one pieces below.
1575       if (newFc != NULL) {
1576         if  (replenish || CMSReplenishIntermediate) {
1577           // Replenish this list and return one block to caller.
1578           size_t i;
1579           FreeChunk *curFc, *nextFc;
1580           size_t num_blk = newFc->size() / size;
1581           assert(num_blk >= 1, "Smaller than requested?");
1582           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1583           if (num_blk > 1) {
1584             // we are sure we will be splitting the block just obtained
1585             // into multiple pieces; record the split-death of the original
1586             splitDeath(replenish_size);
1587           }
1588           // carve up and link blocks 0, ..., num_blk - 2
1589           // The last chunk is not added to the lists but is returned as the
1590           // free chunk.
1591           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1592                i = 0;
1593                i < (num_blk - 1);
1594                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1595                i++) {
1596             curFc->set_size(size);
1597             // Don't record this as a return in order to try and
1598             // determine the "returns" from a GC.
1599             _bt.verify_not_unallocated((HeapWord*) fc, size);
1600             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1601             _bt.mark_block((HeapWord*)curFc, size);
1602             split_birth(size);
1603             // Don't record the initial population of the indexed list
1604             // as a split birth.
1605           }
1606 
1607           // check that the arithmetic was OK above
1608           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1609             "inconsistency in carving newFc");
1610           curFc->set_size(size);
1611           _bt.mark_block((HeapWord*)curFc, size);
1612           split_birth(size);
1613           fc = curFc;
1614         } else {
1615           // Return entire block to caller
1616           fc = newFc;
1617         }
1618       }
1619     }
1620   } else {
1621     // Get a free chunk from the free chunk dictionary to be returned to
1622     // replenish the indexed free list.
1623     fc = getChunkFromDictionaryExact(size);
1624   }
1625   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1626   return fc;
1627 }
1628 
1629 FreeChunk*
1630 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1631   assert_locked();
1632   FreeChunk* fc = _dictionary->get_chunk(size,
1633                                          FreeBlockDictionary<FreeChunk>::atLeast);
1634   if (fc == NULL) {
1635     return NULL;
1636   }
1637   _bt.allocated((HeapWord*)fc, fc->size());
1638   if (fc->size() >= size + MinChunkSize) {
1639     fc = splitChunkAndReturnRemainder(fc, size);
1640   }
1641   assert(fc->size() >= size, "chunk too small");
1642   assert(fc->size() < size + MinChunkSize, "chunk too big");
1643   _bt.verify_single_block((HeapWord*)fc, fc->size());
1644   return fc;
1645 }
1646 
1647 FreeChunk*
1648 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1649   assert_locked();
1650   FreeChunk* fc = _dictionary->get_chunk(size,
1651                                          FreeBlockDictionary<FreeChunk>::atLeast);
1652   if (fc == NULL) {
1653     return fc;
1654   }
1655   _bt.allocated((HeapWord*)fc, fc->size());
1656   if (fc->size() == size) {
1657     _bt.verify_single_block((HeapWord*)fc, size);
1658     return fc;
1659   }
1660   assert(fc->size() > size, "get_chunk() guarantee");
1661   if (fc->size() < size + MinChunkSize) {
1662     // Return the chunk to the dictionary and go get a bigger one.
1663     returnChunkToDictionary(fc);
1664     fc = _dictionary->get_chunk(size + MinChunkSize,
1665                                 FreeBlockDictionary<FreeChunk>::atLeast);
1666     if (fc == NULL) {
1667       return NULL;
1668     }
1669     _bt.allocated((HeapWord*)fc, fc->size());
1670   }
1671   assert(fc->size() >= size + MinChunkSize, "tautology");
1672   fc = splitChunkAndReturnRemainder(fc, size);
1673   assert(fc->size() == size, "chunk is wrong size");
1674   _bt.verify_single_block((HeapWord*)fc, size);
1675   return fc;
1676 }
1677 
1678 void
1679 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1680   assert_locked();
1681 
1682   size_t size = chunk->size();
1683   _bt.verify_single_block((HeapWord*)chunk, size);
1684   // adjust _unallocated_block downward, as necessary
1685   _bt.freed((HeapWord*)chunk, size);
1686   _dictionary->return_chunk(chunk);
1687 #ifndef PRODUCT
1688   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1689     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1690     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1691     tl->verify_stats();
1692   }
1693 #endif // PRODUCT
1694 }
1695 
1696 void
1697 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1698   assert_locked();
1699   size_t size = fc->size();
1700   _bt.verify_single_block((HeapWord*) fc, size);
1701   _bt.verify_not_unallocated((HeapWord*) fc, size);
1702   if (_adaptive_freelists) {
1703     _indexedFreeList[size].return_chunk_at_tail(fc);
1704   } else {
1705     _indexedFreeList[size].return_chunk_at_head(fc);
1706   }
1707 #ifndef PRODUCT
1708   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1709      _indexedFreeList[size].verify_stats();
1710   }
1711 #endif // PRODUCT
1712 }
1713 
1714 // Add chunk to end of last block -- if it's the largest
1715 // block -- and update BOT and census data. We would
1716 // of course have preferred to coalesce it with the
1717 // last block, but it's currently less expensive to find the
1718 // largest block than it is to find the last.
1719 void
1720 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1721   HeapWord* chunk, size_t     size) {
1722   // check that the chunk does lie in this space!
1723   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1724   // One of the parallel gc task threads may be here
1725   // whilst others are allocating.
1726   Mutex* lock = &_parDictionaryAllocLock;
1727   FreeChunk* ec;
1728   {
1729     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1730     ec = dictionary()->find_largest_dict();  // get largest block
1731     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1732       // It's a coterminal block - we can coalesce.
1733       size_t old_size = ec->size();
1734       coalDeath(old_size);
1735       removeChunkFromDictionary(ec);
1736       size += old_size;
1737     } else {
1738       ec = (FreeChunk*)chunk;
1739     }
1740   }
1741   ec->set_size(size);
1742   debug_only(ec->mangleFreed(size));
1743   if (size < SmallForDictionary) {
1744     lock = _indexedFreeListParLocks[size];
1745   }
1746   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1747   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1748   // record the birth under the lock since the recording involves
1749   // manipulation of the list on which the chunk lives and
1750   // if the chunk is allocated and is the last on the list,
1751   // the list can go away.
1752   coalBirth(size);
1753 }
1754 
1755 void
1756 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1757                                               size_t     size) {
1758   // check that the chunk does lie in this space!
1759   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1760   assert_locked();
1761   _bt.verify_single_block(chunk, size);
1762 
1763   FreeChunk* fc = (FreeChunk*) chunk;
1764   fc->set_size(size);
1765   debug_only(fc->mangleFreed(size));
1766   if (size < SmallForDictionary) {
1767     returnChunkToFreeList(fc);
1768   } else {
1769     returnChunkToDictionary(fc);
1770   }
1771 }
1772 
1773 void
1774 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1775   size_t size, bool coalesced) {
1776   assert_locked();
1777   assert(chunk != NULL, "null chunk");
1778   if (coalesced) {
1779     // repair BOT
1780     _bt.single_block(chunk, size);
1781   }
1782   addChunkToFreeLists(chunk, size);
1783 }
1784 
1785 // We _must_ find the purported chunk on our free lists;
1786 // we assert if we don't.
1787 void
1788 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1789   size_t size = fc->size();
1790   assert_locked();
1791   debug_only(verifyFreeLists());
1792   if (size < SmallForDictionary) {
1793     removeChunkFromIndexedFreeList(fc);
1794   } else {
1795     removeChunkFromDictionary(fc);
1796   }
1797   _bt.verify_single_block((HeapWord*)fc, size);
1798   debug_only(verifyFreeLists());
1799 }
1800 
1801 void
1802 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1803   size_t size = fc->size();
1804   assert_locked();
1805   assert(fc != NULL, "null chunk");
1806   _bt.verify_single_block((HeapWord*)fc, size);
1807   _dictionary->remove_chunk(fc);
1808   // adjust _unallocated_block upward, as necessary
1809   _bt.allocated((HeapWord*)fc, size);
1810 }
1811 
1812 void
1813 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1814   assert_locked();
1815   size_t size = fc->size();
1816   _bt.verify_single_block((HeapWord*)fc, size);
1817   NOT_PRODUCT(
1818     if (FLSVerifyIndexTable) {
1819       verifyIndexedFreeList(size);
1820     }
1821   )
1822   _indexedFreeList[size].remove_chunk(fc);
1823   NOT_PRODUCT(
1824     if (FLSVerifyIndexTable) {
1825       verifyIndexedFreeList(size);
1826     }
1827   )
1828 }
1829 
1830 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1831   /* A hint is the next larger size that has a surplus.
1832      Start search at a size large enough to guarantee that
1833      the excess is >= MIN_CHUNK. */
1834   size_t start = align_object_size(numWords + MinChunkSize);
1835   if (start < IndexSetSize) {
1836     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1837     size_t    hint = _indexedFreeList[start].hint();
1838     while (hint < IndexSetSize) {
1839       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1840       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1841       if (fl->surplus() > 0 && fl->head() != NULL) {
1842         // Found a list with surplus, reset original hint
1843         // and split out a free chunk which is returned.
1844         _indexedFreeList[start].set_hint(hint);
1845         FreeChunk* res = getFromListGreater(fl, numWords);
1846         assert(res == NULL || res->is_free(),
1847           "Should be returning a free chunk");
1848         return res;
1849       }
1850       hint = fl->hint(); /* keep looking */
1851     }
1852     /* None found. */
1853     it[start].set_hint(IndexSetSize);
1854   }
1855   return NULL;
1856 }
1857 
1858 /* Requires fl->size >= numWords + MinChunkSize */
1859 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1860   size_t numWords) {
1861   FreeChunk *curr = fl->head();
1862   size_t oldNumWords = curr->size();
1863   assert(numWords >= MinChunkSize, "Word size is too small");
1864   assert(curr != NULL, "List is empty");
1865   assert(oldNumWords >= numWords + MinChunkSize,
1866         "Size of chunks in the list is too small");
1867 
1868   fl->remove_chunk(curr);
1869   // recorded indirectly by splitChunkAndReturnRemainder -
1870   // smallSplit(oldNumWords, numWords);
1871   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1872   // Does anything have to be done for the remainder in terms of
1873   // fixing the card table?
1874   assert(new_chunk == NULL || new_chunk->is_free(),
1875     "Should be returning a free chunk");
1876   return new_chunk;
1877 }
1878 
1879 FreeChunk*
1880 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1881   size_t new_size) {
1882   assert_locked();
1883   size_t size = chunk->size();
1884   assert(size > new_size, "Split from a smaller block?");
1885   assert(is_aligned(chunk), "alignment problem");
1886   assert(size == adjustObjectSize(size), "alignment problem");
1887   size_t rem_sz = size - new_size;
1888   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1889   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1890   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1891   assert(is_aligned(ffc), "alignment problem");
1892   ffc->set_size(rem_sz);
1893   ffc->link_next(NULL);
1894   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1895   // Above must occur before BOT is updated below.
1896   // adjust block offset table
1897   OrderAccess::storestore();
1898   assert(chunk->is_free() && ffc->is_free(), "Error");
1899   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1900   if (rem_sz < SmallForDictionary) {
1901     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1902     bool is_par = Thread::current()->is_GC_task_thread();
1903     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1904     returnChunkToFreeList(ffc);
1905     split(size, rem_sz);
1906     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1907   } else {
1908     returnChunkToDictionary(ffc);
1909     split(size, rem_sz);
1910   }
1911   chunk->set_size(new_size);
1912   return chunk;
1913 }
1914 
1915 void
1916 CompactibleFreeListSpace::sweep_completed() {
1917   // Now that space is probably plentiful, refill linear
1918   // allocation blocks as needed.
1919   refillLinearAllocBlocksIfNeeded();
1920 }
1921 
1922 void
1923 CompactibleFreeListSpace::gc_prologue() {
1924   assert_locked();
1925   if (PrintFLSStatistics != 0) {
1926     gclog_or_tty->print("Before GC:\n");
1927     reportFreeListStatistics();
1928   }
1929   refillLinearAllocBlocksIfNeeded();
1930 }
1931 
1932 void
1933 CompactibleFreeListSpace::gc_epilogue() {
1934   assert_locked();
1935   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1936     if (_smallLinearAllocBlock._word_size == 0)
1937       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1938   }
1939   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1940   _promoInfo.stopTrackingPromotions();
1941   repairLinearAllocationBlocks();
1942   // Print Space's stats
1943   if (PrintFLSStatistics != 0) {
1944     gclog_or_tty->print("After GC:\n");
1945     reportFreeListStatistics();
1946   }
1947 }
1948 
1949 // Iteration support, mostly delegated from a CMS generation
1950 
1951 void CompactibleFreeListSpace::save_marks() {
1952   assert(Thread::current()->is_VM_thread(),
1953          "Global variable should only be set when single-threaded");
1954   // Mark the "end" of the used space at the time of this call;
1955   // note, however, that promoted objects from this point
1956   // on are tracked in the _promoInfo below.
1957   set_saved_mark_word(unallocated_block());
1958 #ifdef ASSERT
1959   // Check the sanity of save_marks() etc.
1960   MemRegion ur    = used_region();
1961   MemRegion urasm = used_region_at_save_marks();
1962   assert(ur.contains(urasm),
1963          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1964                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1965                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1966 #endif
1967   // inform allocator that promotions should be tracked.
1968   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1969   _promoInfo.startTrackingPromotions();
1970 }
1971 
1972 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1973   assert(_promoInfo.tracking(), "No preceding save_marks?");
1974   return _promoInfo.noPromotions();
1975 }
1976 
1977 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1978                                                                             \
1979 void CompactibleFreeListSpace::                                             \
1980 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1981   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1982   /*                                                                        \
1983    * This also restores any displaced headers and removes the elements from \
1984    * the iteration set as they are processed, so that we have a clean slate \
1985    * at the end of the iteration. Note, thus, that if new objects are       \
1986    * promoted as a result of the iteration they are iterated over as well.  \
1987    */                                                                       \
1988   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1989 }
1990 
1991 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1992 
1993 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1994   return _smallLinearAllocBlock._word_size == 0;
1995 }
1996 
1997 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1998   // Fix up linear allocation blocks to look like free blocks
1999   repairLinearAllocBlock(&_smallLinearAllocBlock);
2000 }
2001 
2002 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2003   assert_locked();
2004   if (blk->_ptr != NULL) {
2005     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2006            "Minimum block size requirement");
2007     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2008     fc->set_size(blk->_word_size);
2009     fc->link_prev(NULL);   // mark as free
2010     fc->dontCoalesce();
2011     assert(fc->is_free(), "just marked it free");
2012     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2013   }
2014 }
2015 
2016 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2017   assert_locked();
2018   if (_smallLinearAllocBlock._ptr == NULL) {
2019     assert(_smallLinearAllocBlock._word_size == 0,
2020       "Size of linAB should be zero if the ptr is NULL");
2021     // Reset the linAB refill and allocation size limit.
2022     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2023   }
2024   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2025 }
2026 
2027 void
2028 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2029   assert_locked();
2030   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2031          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2032          "blk invariant");
2033   if (blk->_ptr == NULL) {
2034     refillLinearAllocBlock(blk);
2035   }
2036   if (PrintMiscellaneous && Verbose) {
2037     if (blk->_word_size == 0) {
2038       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2039     }
2040   }
2041 }
2042 
2043 void
2044 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2045   assert_locked();
2046   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2047          "linear allocation block should be empty");
2048   FreeChunk* fc;
2049   if (blk->_refillSize < SmallForDictionary &&
2050       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2051     // A linAB's strategy might be to use small sizes to reduce
2052     // fragmentation but still get the benefits of allocation from a
2053     // linAB.
2054   } else {
2055     fc = getChunkFromDictionary(blk->_refillSize);
2056   }
2057   if (fc != NULL) {
2058     blk->_ptr  = (HeapWord*)fc;
2059     blk->_word_size = fc->size();
2060     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2061   }
2062 }
2063 
2064 // Support for concurrent collection policy decisions.
2065 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2066   // In the future we might want to add in fragmentation stats --
2067   // including erosion of the "mountain" into this decision as well.
2068   return !adaptive_freelists() && linearAllocationWouldFail();
2069 }
2070 
2071 #define cfls_obj_size(q) CompactibleFreeListSpace::adjustObjectSize(oop(q)->size())
2072 DECLARE_PMS_SPECIALIZED_CODE(CompactibleFreeListSpace, cfls_obj_size);
2073 
2074 // Support for compaction
2075 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2076   if (CMSParallelFullGC) {
2077     pms_prepare_for_compaction_work(cp);
2078   } else {
2079     scan_and_forward(this, cp);
2080   }
2081   // Prepare_for_compaction() uses the space between live objects
2082   // so that later phase can skip dead space quickly.  So verification
2083   // of the free lists doesn't work after.
2084 }
2085 
2086 void CompactibleFreeListSpace::adjust_pointers() {
2087   if (CMSParallelFullGC) {
2088     pms_adjust_pointers_work();
2089   } else {
2090     // In other versions of adjust_pointers(), a bail out
2091     // based on the amount of live data in the generation
2092     // (i.e., if 0, bail out) may be used.
2093     // Cannot test used() == 0 here because the free lists have already
2094     // been mangled by the compaction.
2095     scan_and_adjust_pointers(this);
2096   }
2097   // See note about verification in prepare_for_compaction().
2098 }
2099 
2100 void CompactibleFreeListSpace::compact() {
2101   if (CMSParallelFullGC) {
2102     pms_compact_work();
2103   } else {
2104     scan_and_compact(this);
2105   }
2106 }
2107 
2108 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2109 // where fbs is free block sizes
2110 double CompactibleFreeListSpace::flsFrag() const {
2111   size_t itabFree = totalSizeInIndexedFreeLists();
2112   double frag = 0.0;
2113   size_t i;
2114 
2115   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2116     double sz  = i;
2117     frag      += _indexedFreeList[i].count() * (sz * sz);
2118   }
2119 
2120   double totFree = itabFree +
2121                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2122   if (totFree > 0) {
2123     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2124             (totFree * totFree));
2125     frag = (double)1.0  - frag;
2126   } else {
2127     assert(frag == 0.0, "Follows from totFree == 0");
2128   }
2129   return frag;
2130 }
2131 
2132 void CompactibleFreeListSpace::beginSweepFLCensus(
2133   float inter_sweep_current,
2134   float inter_sweep_estimate,
2135   float intra_sweep_estimate) {
2136   assert_locked();
2137   size_t i;
2138   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2139     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2140     if (PrintFLSStatistics > 1) {
2141       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2142     }
2143     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2144     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2145     fl->set_before_sweep(fl->count());
2146     fl->set_bfr_surp(fl->surplus());
2147   }
2148   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2149                                     inter_sweep_current,
2150                                     inter_sweep_estimate,
2151                                     intra_sweep_estimate);
2152 }
2153 
2154 void CompactibleFreeListSpace::setFLSurplus() {
2155   assert_locked();
2156   size_t i;
2157   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2158     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2159     fl->set_surplus(fl->count() -
2160                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2161   }
2162 }
2163 
2164 void CompactibleFreeListSpace::setFLHints() {
2165   assert_locked();
2166   size_t i;
2167   size_t h = IndexSetSize;
2168   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2169     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2170     fl->set_hint(h);
2171     if (fl->surplus() > 0) {
2172       h = i;
2173     }
2174   }
2175 }
2176 
2177 void CompactibleFreeListSpace::clearFLCensus() {
2178   assert_locked();
2179   size_t i;
2180   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2181     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2182     fl->set_prev_sweep(fl->count());
2183     fl->set_coal_births(0);
2184     fl->set_coal_deaths(0);
2185     fl->set_split_births(0);
2186     fl->set_split_deaths(0);
2187   }
2188 }
2189 
2190 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2191   if (PrintFLSStatistics > 0) {
2192     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2193     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2194                            p2i(largestAddr));
2195   }
2196   setFLSurplus();
2197   setFLHints();
2198   if (PrintGC && PrintFLSCensus > 0) {
2199     printFLCensus(sweep_count);
2200   }
2201   clearFLCensus();
2202   assert_locked();
2203   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2204 }
2205 
2206 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2207   if (size < SmallForDictionary) {
2208     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2209     return (fl->coal_desired() < 0) ||
2210            ((int)fl->count() > fl->coal_desired());
2211   } else {
2212     return dictionary()->coal_dict_over_populated(size);
2213   }
2214 }
2215 
2216 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2217   assert(size < SmallForDictionary, "Size too large for indexed list");
2218   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2219   fl->increment_coal_births();
2220   fl->increment_surplus();
2221 }
2222 
2223 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2224   assert(size < SmallForDictionary, "Size too large for indexed list");
2225   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2226   fl->increment_coal_deaths();
2227   fl->decrement_surplus();
2228 }
2229 
2230 void CompactibleFreeListSpace::coalBirth(size_t size) {
2231   if (size  < SmallForDictionary) {
2232     smallCoalBirth(size);
2233   } else {
2234     dictionary()->dict_census_update(size,
2235                                    false /* split */,
2236                                    true /* birth */);
2237   }
2238 }
2239 
2240 void CompactibleFreeListSpace::coalDeath(size_t size) {
2241   if(size  < SmallForDictionary) {
2242     smallCoalDeath(size);
2243   } else {
2244     dictionary()->dict_census_update(size,
2245                                    false /* split */,
2246                                    false /* birth */);
2247   }
2248 }
2249 
2250 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2251   assert(size < SmallForDictionary, "Size too large for indexed list");
2252   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2253   fl->increment_split_births();
2254   fl->increment_surplus();
2255 }
2256 
2257 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2258   assert(size < SmallForDictionary, "Size too large for indexed list");
2259   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2260   fl->increment_split_deaths();
2261   fl->decrement_surplus();
2262 }
2263 
2264 void CompactibleFreeListSpace::split_birth(size_t size) {
2265   if (size  < SmallForDictionary) {
2266     smallSplitBirth(size);
2267   } else {
2268     dictionary()->dict_census_update(size,
2269                                    true /* split */,
2270                                    true /* birth */);
2271   }
2272 }
2273 
2274 void CompactibleFreeListSpace::splitDeath(size_t size) {
2275   if (size  < SmallForDictionary) {
2276     smallSplitDeath(size);
2277   } else {
2278     dictionary()->dict_census_update(size,
2279                                    true /* split */,
2280                                    false /* birth */);
2281   }
2282 }
2283 
2284 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2285   size_t to2 = from - to1;
2286   splitDeath(from);
2287   split_birth(to1);
2288   split_birth(to2);
2289 }
2290 
2291 void CompactibleFreeListSpace::print() const {
2292   print_on(tty);
2293 }
2294 
2295 void CompactibleFreeListSpace::prepare_for_verify() {
2296   assert_locked();
2297   repairLinearAllocationBlocks();
2298   // Verify that the SpoolBlocks look like free blocks of
2299   // appropriate sizes... To be done ...
2300 }
2301 
2302 class VerifyAllBlksClosure: public BlkClosure {
2303  private:
2304   const CompactibleFreeListSpace* _sp;
2305   const MemRegion                 _span;
2306   HeapWord*                       _last_addr;
2307   size_t                          _last_size;
2308   bool                            _last_was_obj;
2309   bool                            _last_was_live;
2310 
2311  public:
2312   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2313     MemRegion span) :  _sp(sp), _span(span),
2314                        _last_addr(NULL), _last_size(0),
2315                        _last_was_obj(false), _last_was_live(false) { }
2316 
2317   virtual size_t do_blk(HeapWord* addr) {
2318     size_t res;
2319     bool   was_obj  = false;
2320     bool   was_live = false;
2321     if (_sp->block_is_obj(addr)) {
2322       was_obj = true;
2323       oop p = oop(addr);
2324       guarantee(p->is_oop(), "Should be an oop");
2325       res = _sp->adjustObjectSize(p->size());
2326       if (_sp->obj_is_alive(addr)) {
2327         was_live = true;
2328         p->verify();
2329       }
2330     } else {
2331       FreeChunk* fc = (FreeChunk*)addr;
2332       res = fc->size();
2333       if (FLSVerifyLists && !fc->cantCoalesce()) {
2334         guarantee(_sp->verify_chunk_in_free_list(fc),
2335                   "Chunk should be on a free list");
2336       }
2337     }
2338     if (res == 0) {
2339       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2340       gclog_or_tty->print_cr(
2341         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2342         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2343         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2344         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2345       _sp->print_on(gclog_or_tty);
2346       guarantee(false, "Seppuku!");
2347     }
2348     _last_addr = addr;
2349     _last_size = res;
2350     _last_was_obj  = was_obj;
2351     _last_was_live = was_live;
2352     return res;
2353   }
2354 };
2355 
2356 class VerifyAllOopsClosure: public OopClosure {
2357  private:
2358   const CMSCollector*             _collector;
2359   const CompactibleFreeListSpace* _sp;
2360   const MemRegion                 _span;
2361   const bool                      _past_remark;
2362   const CMSBitMap*                _bit_map;
2363 
2364  protected:
2365   void do_oop(void* p, oop obj) {
2366     if (_span.contains(obj)) { // the interior oop points into CMS heap
2367       if (!_span.contains(p)) { // reference from outside CMS heap
2368         // Should be a valid object; the first disjunct below allows
2369         // us to sidestep an assertion in block_is_obj() that insists
2370         // that p be in _sp. Note that several generations (and spaces)
2371         // are spanned by _span (CMS heap) above.
2372         guarantee(!_sp->is_in_reserved(obj) ||
2373                   _sp->block_is_obj((HeapWord*)obj),
2374                   "Should be an object");
2375         guarantee(obj->is_oop(), "Should be an oop");
2376         obj->verify();
2377         if (_past_remark) {
2378           // Remark has been completed, the object should be marked
2379           _bit_map->isMarked((HeapWord*)obj);
2380         }
2381       } else { // reference within CMS heap
2382         if (_past_remark) {
2383           // Remark has been completed -- so the referent should have
2384           // been marked, if referring object is.
2385           if (_bit_map->isMarked(_collector->block_start(p))) {
2386             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2387           }
2388         }
2389       }
2390     } else if (_sp->is_in_reserved(p)) {
2391       // the reference is from FLS, and points out of FLS
2392       guarantee(obj->is_oop(), "Should be an oop");
2393       obj->verify();
2394     }
2395   }
2396 
2397   template <class T> void do_oop_work(T* p) {
2398     T heap_oop = oopDesc::load_heap_oop(p);
2399     if (!oopDesc::is_null(heap_oop)) {
2400       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2401       do_oop(p, obj);
2402     }
2403   }
2404 
2405  public:
2406   VerifyAllOopsClosure(const CMSCollector* collector,
2407     const CompactibleFreeListSpace* sp, MemRegion span,
2408     bool past_remark, CMSBitMap* bit_map) :
2409     _collector(collector), _sp(sp), _span(span),
2410     _past_remark(past_remark), _bit_map(bit_map) { }
2411 
2412   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2413   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2414 };
2415 
2416 void CompactibleFreeListSpace::verify() const {
2417   assert_lock_strong(&_freelistLock);
2418   verify_objects_initialized();
2419   MemRegion span = _collector->_span;
2420   bool past_remark = (_collector->abstract_state() ==
2421                       CMSCollector::Sweeping);
2422 
2423   ResourceMark rm;
2424   HandleMark  hm;
2425 
2426   // Check integrity of CFL data structures
2427   _promoInfo.verify();
2428   _dictionary->verify();
2429   if (FLSVerifyIndexTable) {
2430     verifyIndexedFreeLists();
2431   }
2432   // Check integrity of all objects and free blocks in space
2433   {
2434     VerifyAllBlksClosure cl(this, span);
2435     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2436   }
2437   // Check that all references in the heap to FLS
2438   // are to valid objects in FLS or that references in
2439   // FLS are to valid objects elsewhere in the heap
2440   if (FLSVerifyAllHeapReferences)
2441   {
2442     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2443       _collector->markBitMap());
2444 
2445     // Iterate over all oops in the heap. Uses the _no_header version
2446     // since we are not interested in following the klass pointers.
2447     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2448   }
2449 
2450   if (VerifyObjectStartArray) {
2451     // Verify the block offset table
2452     _bt.verify();
2453   }
2454 }
2455 
2456 #ifndef PRODUCT
2457 void CompactibleFreeListSpace::verifyFreeLists() const {
2458   if (FLSVerifyLists) {
2459     _dictionary->verify();
2460     verifyIndexedFreeLists();
2461   } else {
2462     if (FLSVerifyDictionary) {
2463       _dictionary->verify();
2464     }
2465     if (FLSVerifyIndexTable) {
2466       verifyIndexedFreeLists();
2467     }
2468   }
2469 }
2470 #endif
2471 
2472 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2473   size_t i = 0;
2474   for (; i < IndexSetStart; i++) {
2475     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2476   }
2477   for (; i < IndexSetSize; i++) {
2478     verifyIndexedFreeList(i);
2479   }
2480 }
2481 
2482 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2483   FreeChunk* fc   =  _indexedFreeList[size].head();
2484   FreeChunk* tail =  _indexedFreeList[size].tail();
2485   size_t    num = _indexedFreeList[size].count();
2486   size_t      n = 0;
2487   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2488             "Slot should have been empty");
2489   for (; fc != NULL; fc = fc->next(), n++) {
2490     guarantee(fc->size() == size, "Size inconsistency");
2491     guarantee(fc->is_free(), "!free?");
2492     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2493     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2494   }
2495   guarantee(n == num, "Incorrect count");
2496 }
2497 
2498 #ifndef PRODUCT
2499 void CompactibleFreeListSpace::check_free_list_consistency() const {
2500   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2501     "Some sizes can't be allocated without recourse to"
2502     " linear allocation buffers");
2503   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2504     "else MIN_TREE_CHUNK_SIZE is wrong");
2505   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2506   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2507 }
2508 #endif
2509 
2510 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2511   assert_lock_strong(&_freelistLock);
2512   AdaptiveFreeList<FreeChunk> total;
2513   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2514   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2515   size_t total_free = 0;
2516   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2517     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2518     total_free += fl->count() * fl->size();
2519     if (i % (40*IndexSetStride) == 0) {
2520       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2521     }
2522     fl->print_on(gclog_or_tty);
2523     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2524     total.set_surplus(    total.surplus()     + fl->surplus()    );
2525     total.set_desired(    total.desired()     + fl->desired()    );
2526     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2527     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2528     total.set_count(      total.count()       + fl->count()      );
2529     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2530     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2531     total.set_split_births(total.split_births() + fl->split_births());
2532     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2533   }
2534   total.print_on(gclog_or_tty, "TOTAL");
2535   gclog_or_tty->print_cr("Total free in indexed lists "
2536                          SIZE_FORMAT " words", total_free);
2537   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2538     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2539             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2540     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2541   _dictionary->print_dict_census();
2542 }
2543 
2544 ///////////////////////////////////////////////////////////////////////////
2545 // CFLS_LAB
2546 ///////////////////////////////////////////////////////////////////////////
2547 
2548 #define VECTOR_257(x)                                                                                  \
2549   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2550   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2551      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2552      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x }
2559 
2560 // Initialize with default setting for CMS, _not_
2561 // generic OldPLABSize, whose static default is different; if overridden at the
2562 // command-line, this will get reinitialized via a call to
2563 // modify_initialization() below.
2564 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2565   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2566 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2567 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2568 
2569 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2570   _cfls(cfls)
2571 {
2572   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2573   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2574        i < CompactibleFreeListSpace::IndexSetSize;
2575        i += CompactibleFreeListSpace::IndexSetStride) {
2576     _indexedFreeList[i].set_size(i);
2577     _num_blocks[i] = 0;
2578   }
2579 }
2580 
2581 static bool _CFLS_LAB_modified = false;
2582 
2583 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2584   assert(!_CFLS_LAB_modified, "Call only once");
2585   _CFLS_LAB_modified = true;
2586   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2587        i < CompactibleFreeListSpace::IndexSetSize;
2588        i += CompactibleFreeListSpace::IndexSetStride) {
2589     _blocks_to_claim[i].modify(n, wt, true /* force */);
2590   }
2591 }
2592 
2593 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2594   FreeChunk* res;
2595   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2596   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2597     // This locking manages sync with other large object allocations.
2598     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2599                     Mutex::_no_safepoint_check_flag);
2600     res = _cfls->getChunkFromDictionaryExact(word_sz);
2601     if (res == NULL) return NULL;
2602   } else {
2603     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2604     if (fl->count() == 0) {
2605       // Attempt to refill this local free list.
2606       get_from_global_pool(word_sz, fl);
2607       // If it didn't work, give up.
2608       if (fl->count() == 0) return NULL;
2609     }
2610     res = fl->get_chunk_at_head();
2611     assert(res != NULL, "Why was count non-zero?");
2612   }
2613   res->markNotFree();
2614   assert(!res->is_free(), "shouldn't be marked free");
2615   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2616   // mangle a just allocated object with a distinct pattern.
2617   debug_only(res->mangleAllocated(word_sz));
2618   return (HeapWord*)res;
2619 }
2620 
2621 // Get a chunk of blocks of the right size and update related
2622 // book-keeping stats
2623 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2624   // Get the #blocks we want to claim
2625   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2626   assert(n_blks > 0, "Error");
2627   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2628   // In some cases, when the application has a phase change,
2629   // there may be a sudden and sharp shift in the object survival
2630   // profile, and updating the counts at the end of a scavenge
2631   // may not be quick enough, giving rise to large scavenge pauses
2632   // during these phase changes. It is beneficial to detect such
2633   // changes on-the-fly during a scavenge and avoid such a phase-change
2634   // pothole. The following code is a heuristic attempt to do that.
2635   // It is protected by a product flag until we have gained
2636   // enough experience with this heuristic and fine-tuned its behavior.
2637   // WARNING: This might increase fragmentation if we overreact to
2638   // small spikes, so some kind of historical smoothing based on
2639   // previous experience with the greater reactivity might be useful.
2640   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2641   // default.
2642   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2643     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2644     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2645     n_blks = MIN2(n_blks, CMSOldPLABMax);
2646   }
2647   assert(n_blks > 0, "Error");
2648   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2649   // Update stats table entry for this block size
2650   _num_blocks[word_sz] += fl->count();
2651 }
2652 
2653 void CFLS_LAB::compute_desired_plab_size() {
2654   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2655        i < CompactibleFreeListSpace::IndexSetSize;
2656        i += CompactibleFreeListSpace::IndexSetStride) {
2657     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2658            "Counter inconsistency");
2659     if (_global_num_workers[i] > 0) {
2660       // Need to smooth wrt historical average
2661       if (ResizeOldPLAB) {
2662         _blocks_to_claim[i].sample(
2663           MAX2(CMSOldPLABMin,
2664           MIN2(CMSOldPLABMax,
2665                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2666       }
2667       // Reset counters for next round
2668       _global_num_workers[i] = 0;
2669       _global_num_blocks[i] = 0;
2670       if (PrintOldPLAB) {
2671         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2672                                i, (size_t)_blocks_to_claim[i].average());
2673       }
2674     }
2675   }
2676 }
2677 
2678 // If this is changed in the future to allow parallel
2679 // access, one would need to take the FL locks and,
2680 // depending on how it is used, stagger access from
2681 // parallel threads to reduce contention.
2682 void CFLS_LAB::retire(int tid) {
2683   // We run this single threaded with the world stopped;
2684   // so no need for locks and such.
2685   NOT_PRODUCT(Thread* t = Thread::current();)
2686   assert(Thread::current()->is_VM_thread(), "Error");
2687   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2688        i < CompactibleFreeListSpace::IndexSetSize;
2689        i += CompactibleFreeListSpace::IndexSetStride) {
2690     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2691            "Can't retire more than what we obtained");
2692     if (_num_blocks[i] > 0) {
2693       size_t num_retire =  _indexedFreeList[i].count();
2694       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2695       {
2696         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2697         //                Mutex::_no_safepoint_check_flag);
2698 
2699         // Update globals stats for num_blocks used
2700         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2701         _global_num_workers[i]++;
2702         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2703         if (num_retire > 0) {
2704           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2705           // Reset this list.
2706           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2707           _indexedFreeList[i].set_size(i);
2708         }
2709       }
2710       if (PrintOldPLAB) {
2711         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2712                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2713       }
2714       // Reset stats for next round
2715       _num_blocks[i]         = 0;
2716     }
2717   }
2718 }
2719 
2720 // Used by par_get_chunk_of_blocks() for the chunks from the
2721 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2722 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2723 // to the free list "fl".  "n" is the maximum number of chunks to
2724 // be added to "fl".
2725 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2726 
2727   // We'll try all multiples of word_sz in the indexed set, starting with
2728   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2729   // then try getting a big chunk and splitting it.
2730   {
2731     bool found;
2732     int  k;
2733     size_t cur_sz;
2734     for (k = 1, cur_sz = k * word_sz, found = false;
2735          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2736          (CMSSplitIndexedFreeListBlocks || k <= 1);
2737          k++, cur_sz = k * word_sz) {
2738       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2739       fl_for_cur_sz.set_size(cur_sz);
2740       {
2741         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2742                         Mutex::_no_safepoint_check_flag);
2743         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2744         if (gfl->count() != 0) {
2745           // nn is the number of chunks of size cur_sz that
2746           // we'd need to split k-ways each, in order to create
2747           // "n" chunks of size word_sz each.
2748           const size_t nn = MAX2(n/k, (size_t)1);
2749           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2750           found = true;
2751           if (k > 1) {
2752             // Update split death stats for the cur_sz-size blocks list:
2753             // we increment the split death count by the number of blocks
2754             // we just took from the cur_sz-size blocks list and which
2755             // we will be splitting below.
2756             ssize_t deaths = gfl->split_deaths() +
2757                              fl_for_cur_sz.count();
2758             gfl->set_split_deaths(deaths);
2759           }
2760         }
2761       }
2762       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2763       if (found) {
2764         if (k == 1) {
2765           fl->prepend(&fl_for_cur_sz);
2766         } else {
2767           // Divide each block on fl_for_cur_sz up k ways.
2768           FreeChunk* fc;
2769           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2770             // Must do this in reverse order, so that anybody attempting to
2771             // access the main chunk sees it as a single free block until we
2772             // change it.
2773             size_t fc_size = fc->size();
2774             assert(fc->is_free(), "Error");
2775             for (int i = k-1; i >= 0; i--) {
2776               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2777               assert((i != 0) ||
2778                         ((fc == ffc) && ffc->is_free() &&
2779                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2780                         "Counting error");
2781               ffc->set_size(word_sz);
2782               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2783               ffc->link_next(NULL);
2784               // Above must occur before BOT is updated below.
2785               OrderAccess::storestore();
2786               // splitting from the right, fc_size == i * word_sz
2787               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2788               fc_size -= word_sz;
2789               assert(fc_size == i*word_sz, "Error");
2790               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2791               _bt.verify_single_block((HeapWord*)fc, fc_size);
2792               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2793               // Push this on "fl".
2794               fl->return_chunk_at_head(ffc);
2795             }
2796             // TRAP
2797             assert(fl->tail()->next() == NULL, "List invariant.");
2798           }
2799         }
2800         // Update birth stats for this block size.
2801         size_t num = fl->count();
2802         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2803                         Mutex::_no_safepoint_check_flag);
2804         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2805         _indexedFreeList[word_sz].set_split_births(births);
2806         return true;
2807       }
2808     }
2809     return found;
2810   }
2811 }
2812 
2813 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2814 
2815   FreeChunk* fc = NULL;
2816   FreeChunk* rem_fc = NULL;
2817   size_t rem;
2818   {
2819     MutexLockerEx x(parDictionaryAllocLock(),
2820                     Mutex::_no_safepoint_check_flag);
2821     while (n > 0) {
2822       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2823                                   FreeBlockDictionary<FreeChunk>::atLeast);
2824       if (fc != NULL) {
2825         break;
2826       } else {
2827         n--;
2828       }
2829     }
2830     if (fc == NULL) return NULL;
2831     // Otherwise, split up that block.
2832     assert((ssize_t)n >= 1, "Control point invariant");
2833     assert(fc->is_free(), "Error: should be a free block");
2834     _bt.verify_single_block((HeapWord*)fc, fc->size());
2835     const size_t nn = fc->size() / word_sz;
2836     n = MIN2(nn, n);
2837     assert((ssize_t)n >= 1, "Control point invariant");
2838     rem = fc->size() - n * word_sz;
2839     // If there is a remainder, and it's too small, allocate one fewer.
2840     if (rem > 0 && rem < MinChunkSize) {
2841       n--; rem += word_sz;
2842     }
2843     // Note that at this point we may have n == 0.
2844     assert((ssize_t)n >= 0, "Control point invariant");
2845 
2846     // If n is 0, the chunk fc that was found is not large
2847     // enough to leave a viable remainder.  We are unable to
2848     // allocate even one block.  Return fc to the
2849     // dictionary and return, leaving "fl" empty.
2850     if (n == 0) {
2851       returnChunkToDictionary(fc);
2852       return NULL;
2853     }
2854 
2855     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2856     dictionary()->dict_census_update(fc->size(),
2857                                      true /*split*/,
2858                                      false /*birth*/);
2859 
2860     // First return the remainder, if any.
2861     // Note that we hold the lock until we decide if we're going to give
2862     // back the remainder to the dictionary, since a concurrent allocation
2863     // may otherwise see the heap as empty.  (We're willing to take that
2864     // hit if the block is a small block.)
2865     if (rem > 0) {
2866       size_t prefix_size = n * word_sz;
2867       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2868       rem_fc->set_size(rem);
2869       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2870       rem_fc->link_next(NULL);
2871       // Above must occur before BOT is updated below.
2872       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2873       OrderAccess::storestore();
2874       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2875       assert(fc->is_free(), "Error");
2876       fc->set_size(prefix_size);
2877       if (rem >= IndexSetSize) {
2878         returnChunkToDictionary(rem_fc);
2879         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2880         rem_fc = NULL;
2881       }
2882       // Otherwise, return it to the small list below.
2883     }
2884   }
2885   if (rem_fc != NULL) {
2886     MutexLockerEx x(_indexedFreeListParLocks[rem],
2887                     Mutex::_no_safepoint_check_flag);
2888     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2889     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2890     smallSplitBirth(rem);
2891   }
2892   assert(n * word_sz == fc->size(),
2893     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2894     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2895     fc->size(), n, word_sz));
2896   return fc;
2897 }
2898 
2899 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2900 
2901   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2902 
2903   if (fc == NULL) {
2904     return;
2905   }
2906 
2907   size_t n = fc->size() / word_sz;
2908 
2909   assert((ssize_t)n > 0, "Consistency");
2910   // Now do the splitting up.
2911   // Must do this in reverse order, so that anybody attempting to
2912   // access the main chunk sees it as a single free block until we
2913   // change it.
2914   size_t fc_size = n * word_sz;
2915   // All but first chunk in this loop
2916   for (ssize_t i = n-1; i > 0; i--) {
2917     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2918     ffc->set_size(word_sz);
2919     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2920     ffc->link_next(NULL);
2921     // Above must occur before BOT is updated below.
2922     OrderAccess::storestore();
2923     // splitting from the right, fc_size == (n - i + 1) * wordsize
2924     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2925     fc_size -= word_sz;
2926     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2927     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2928     _bt.verify_single_block((HeapWord*)fc, fc_size);
2929     // Push this on "fl".
2930     fl->return_chunk_at_head(ffc);
2931   }
2932   // First chunk
2933   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2934   // The blocks above should show their new sizes before the first block below
2935   fc->set_size(word_sz);
2936   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2937   fc->link_next(NULL);
2938   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2939   _bt.verify_single_block((HeapWord*)fc, fc->size());
2940   fl->return_chunk_at_head(fc);
2941 
2942   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2943   {
2944     // Update the stats for this block size.
2945     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2946                     Mutex::_no_safepoint_check_flag);
2947     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2948     _indexedFreeList[word_sz].set_split_births(births);
2949     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2950     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2951   }
2952 
2953   // TRAP
2954   assert(fl->tail()->next() == NULL, "List invariant.");
2955 }
2956 
2957 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2958   assert(fl->count() == 0, "Precondition.");
2959   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2960          "Precondition");
2961 
2962   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2963     // Got it
2964     return;
2965   }
2966 
2967   // Otherwise, we'll split a block from the dictionary.
2968   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2969 }
2970 
2971 // Set up the space's par_seq_tasks structure for work claiming
2972 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2973 // XXX Need to suitably abstract and generalize this and the next
2974 // method into one.
2975 void
2976 CompactibleFreeListSpace::
2977 initialize_sequential_subtasks_for_rescan(int n_threads) {
2978   // The "size" of each task is fixed according to rescan_task_size.
2979   assert(n_threads > 0, "Unexpected n_threads argument");
2980   const size_t task_size = rescan_task_size();
2981   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2982   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2983   assert(n_tasks == 0 ||
2984          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2985           (used_region().start() + n_tasks*task_size >= used_region().end())),
2986          "n_tasks calculation incorrect");
2987   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2988   assert(!pst->valid(), "Clobbering existing data?");
2989   // Sets the condition for completion of the subtask (how many threads
2990   // need to finish in order to be done).
2991   pst->set_n_threads(n_threads);
2992   pst->set_n_tasks((int)n_tasks);
2993 }
2994 
2995 // Set up the space's par_seq_tasks structure for work claiming
2996 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2997 void
2998 CompactibleFreeListSpace::
2999 initialize_sequential_subtasks_for_marking(int n_threads,
3000                                            HeapWord* low) {
3001   // The "size" of each task is fixed according to rescan_task_size.
3002   assert(n_threads > 0, "Unexpected n_threads argument");
3003   const size_t task_size = marking_task_size();
3004   assert(task_size > CardTableModRefBS::card_size_in_words &&
3005          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3006          "Otherwise arithmetic below would be incorrect");
3007   MemRegion span = _gen->reserved();
3008   if (low != NULL) {
3009     if (span.contains(low)) {
3010       // Align low down to  a card boundary so that
3011       // we can use block_offset_careful() on span boundaries.
3012       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3013                                  CardTableModRefBS::card_size);
3014       // Clip span prefix at aligned_low
3015       span = span.intersection(MemRegion(aligned_low, span.end()));
3016     } else if (low > span.end()) {
3017       span = MemRegion(low, low);  // Null region
3018     } // else use entire span
3019   }
3020   assert(span.is_empty() ||
3021          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3022         "span should start at a card boundary");
3023   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3024   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3025   assert(n_tasks == 0 ||
3026          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3027           (span.start() + n_tasks*task_size >= span.end())),
3028          "n_tasks calculation incorrect");
3029   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3030   assert(!pst->valid(), "Clobbering existing data?");
3031   // Sets the condition for completion of the subtask (how many threads
3032   // need to finish in order to be done).
3033   pst->set_n_threads(n_threads);
3034   pst->set_n_tasks((int)n_tasks);
3035 }