1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/genCollectedHeap.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/space.inline.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  77   MemRegion mr, bool use_adaptive_freelists,
  78   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  79   _dictionaryChoice(dictionaryChoice),
  80   _adaptive_freelists(use_adaptive_freelists),
  81   _bt(bs, mr),
  82   // free list locks are in the range of values taken by _lockRank
  83   // This range currently is [_leaf+2, _leaf+3]
  84   // Note: this requires that CFLspace c'tors
  85   // are called serially in the order in which the locks are
  86   // are acquired in the program text. This is true today.
  87   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  88                 Monitor::_safepoint_check_sometimes),
  89   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  90                           "CompactibleFreeListSpace._dict_par_lock", true,
  91                           Monitor::_safepoint_check_never),
  92   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  93                     CMSRescanMultiple),
  94   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  95                     CMSConcMarkMultiple),
  96   _collector(NULL),
  97   _preconsumptionDirtyCardClosure(NULL)
  98 {
  99   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 100          "FreeChunk is larger than expected");
 101   _bt.set_space(this);
 102   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 103   // We have all of "mr", all of which we place in the dictionary
 104   // as one big chunk. We'll need to decide here which of several
 105   // possible alternative dictionary implementations to use. For
 106   // now the choice is easy, since we have only one working
 107   // implementation, namely, the simple binary tree (splaying
 108   // temporarily disabled).
 109   switch (dictionaryChoice) {
 110     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 111       _dictionary = new AFLBinaryTreeDictionary(mr);
 112       break;
 113     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 114     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 115     default:
 116       warning("dictionaryChoice: selected option not understood; using"
 117               " default BinaryTreeDictionary implementation instead.");
 118   }
 119   assert(_dictionary != NULL, "CMS dictionary initialization");
 120   // The indexed free lists are initially all empty and are lazily
 121   // filled in on demand. Initialize the array elements to NULL.
 122   initializeIndexedFreeListArray();
 123 
 124   // Not using adaptive free lists assumes that allocation is first
 125   // from the linAB's.  Also a cms perm gen which can be compacted
 126   // has to have the klass's klassKlass allocated at a lower
 127   // address in the heap than the klass so that the klassKlass is
 128   // moved to its new location before the klass is moved.
 129   // Set the _refillSize for the linear allocation blocks
 130   if (!use_adaptive_freelists) {
 131     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 132                                            FreeBlockDictionary<FreeChunk>::atLeast);
 133     // The small linAB initially has all the space and will allocate
 134     // a chunk of any size.
 135     HeapWord* addr = (HeapWord*) fc;
 136     _smallLinearAllocBlock.set(addr, fc->size() ,
 137       1024*SmallForLinearAlloc, fc->size());
 138     // Note that _unallocated_block is not updated here.
 139     // Allocations from the linear allocation block should
 140     // update it.
 141   } else {
 142     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 143                                SmallForLinearAlloc);
 144   }
 145   // CMSIndexedFreeListReplenish should be at least 1
 146   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 147   _promoInfo.setSpace(this);
 148   if (UseCMSBestFit) {
 149     _fitStrategy = FreeBlockBestFitFirst;
 150   } else {
 151     _fitStrategy = FreeBlockStrategyNone;
 152   }
 153   check_free_list_consistency();
 154 
 155   // Initialize locks for parallel case.
 156   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 157     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 158                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 159     DEBUG_ONLY(
 160       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 161     )
 162   }
 163   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->young_gen();
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288     coalBirth(mr.word_size());
 289   }
 290   _promoInfo.reset();
 291   _smallLinearAllocBlock._ptr = NULL;
 292   _smallLinearAllocBlock._word_size = 0;
 293 }
 294 
 295 void CompactibleFreeListSpace::reset_after_compaction() {
 296   // Reset the space to the new reality - one free chunk.
 297   MemRegion mr(compaction_top(), end());
 298   reset(mr);
 299   // Now refill the linear allocation block(s) if possible.
 300   if (_adaptive_freelists) {
 301     refillLinearAllocBlocksIfNeeded();
 302   } else {
 303     // Place as much of mr in the linAB as we can get,
 304     // provided it was big enough to go into the dictionary.
 305     FreeChunk* fc = dictionary()->find_largest_dict();
 306     if (fc != NULL) {
 307       assert(fc->size() == mr.word_size(),
 308              "Why was the chunk broken up?");
 309       removeChunkFromDictionary(fc);
 310       HeapWord* addr = (HeapWord*) fc;
 311       _smallLinearAllocBlock.set(addr, fc->size() ,
 312         1024*SmallForLinearAlloc, fc->size());
 313       // Note that _unallocated_block is not updated here.
 314     }
 315   }
 316 }
 317 
 318 // Walks the entire dictionary, returning a coterminal
 319 // chunk, if it exists. Use with caution since it involves
 320 // a potentially complete walk of a potentially large tree.
 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 322 
 323   assert_lock_strong(&_freelistLock);
 324 
 325   return dictionary()->find_chunk_ends_at(end());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 333   }
 334 }
 335 
 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 337   size_t sum = 0;
 338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 340   }
 341   return sum;
 342 }
 343 
 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 345   size_t count = 0;
 346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 347     debug_only(
 348       ssize_t total_list_count = 0;
 349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 350          fc = fc->next()) {
 351         total_list_count++;
 352       }
 353       assert(total_list_count ==  _indexedFreeList[i].count(),
 354         "Count in list is incorrect");
 355     )
 356     count += _indexedFreeList[i].count();
 357   }
 358   return count;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::totalCount() {
 362   size_t num = totalCountInIndexedFreeLists();
 363   num +=  dictionary()->total_count();
 364   if (_smallLinearAllocBlock._word_size != 0) {
 365     num++;
 366   }
 367   return num;
 368 }
 369 #endif
 370 
 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 372   FreeChunk* fc = (FreeChunk*) p;
 373   return fc->is_free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::used() const {
 377   return capacity() - free();
 378 }
 379 
 380 size_t CompactibleFreeListSpace::free() const {
 381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 382   // if you do this while the structures are in flux you
 383   // may get an approximate answer only; for instance
 384   // because there is concurrent allocation either
 385   // directly by mutators or for promotion during a GC.
 386   // It's "MT-safe", however, in the sense that you are guaranteed
 387   // not to crash and burn, for instance, because of walking
 388   // pointers that could disappear as you were walking them.
 389   // The approximation is because the various components
 390   // that are read below are not read atomically (and
 391   // further the computation of totalSizeInIndexedFreeLists()
 392   // is itself a non-atomic computation. The normal use of
 393   // this is during a resize operation at the end of GC
 394   // and at that time you are guaranteed to get the
 395   // correct actual value. However, for instance, this is
 396   // also read completely asynchronously by the "perf-sampler"
 397   // that supports jvmstat, and you are apt to see the values
 398   // flicker in such cases.
 399   assert(_dictionary != NULL, "No _dictionary?");
 400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 401           totalSizeInIndexedFreeLists() +
 402           _smallLinearAllocBlock._word_size) * HeapWordSize;
 403 }
 404 
 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 406   assert(_dictionary != NULL, "No _dictionary?");
 407   assert_locked();
 408   size_t res = _dictionary->max_chunk_size();
 409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 410                        (size_t) SmallForLinearAlloc - 1));
 411   // XXX the following could potentially be pretty slow;
 412   // should one, pessimistically for the rare cases when res
 413   // calculated above is less than IndexSetSize,
 414   // just return res calculated above? My reasoning was that
 415   // those cases will be so rare that the extra time spent doesn't
 416   // really matter....
 417   // Note: do not change the loop test i >= res + IndexSetStride
 418   // to i > res below, because i is unsigned and res may be zero.
 419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 420        i -= IndexSetStride) {
 421     if (_indexedFreeList[i].head() != NULL) {
 422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 423       return i;
 424     }
 425   }
 426   return res;
 427 }
 428 
 429 void LinearAllocBlock::print_on(outputStream* st) const {
 430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 433 }
 434 
 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 436   st->print_cr("COMPACTIBLE FREELIST SPACE");
 437   st->print_cr(" Space:");
 438   Space::print_on(st);
 439 
 440   st->print_cr("promoInfo:");
 441   _promoInfo.print_on(st);
 442 
 443   st->print_cr("_smallLinearAllocBlock");
 444   _smallLinearAllocBlock.print_on(st);
 445 
 446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 447 
 448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 450 }
 451 
 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 453 const {
 454   reportIndexedFreeListStatistics();
 455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 456   gclog_or_tty->print_cr("---------------------------");
 457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 459     _indexedFreeList[i].print_on(gclog_or_tty);
 460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 461          fc = fc->next()) {
 462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 463                           p2i(fc), p2i((HeapWord*)fc + i),
 464                           fc->cantCoalesce() ? "\t CC" : "");
 465     }
 466   }
 467 }
 468 
 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 470 const {
 471   _promoInfo.print_on(st);
 472 }
 473 
 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 475 const {
 476   _dictionary->report_statistics();
 477   st->print_cr("Layout of Freelists in Tree");
 478   st->print_cr("---------------------------");
 479   _dictionary->print_free_lists(st);
 480 }
 481 
 482 class BlkPrintingClosure: public BlkClosure {
 483   const CMSCollector*             _collector;
 484   const CompactibleFreeListSpace* _sp;
 485   const CMSBitMap*                _live_bit_map;
 486   const bool                      _post_remark;
 487   outputStream*                   _st;
 488 public:
 489   BlkPrintingClosure(const CMSCollector* collector,
 490                      const CompactibleFreeListSpace* sp,
 491                      const CMSBitMap* live_bit_map,
 492                      outputStream* st):
 493     _collector(collector),
 494     _sp(sp),
 495     _live_bit_map(live_bit_map),
 496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 497     _st(st) { }
 498   size_t do_blk(HeapWord* addr);
 499 };
 500 
 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 502   size_t sz = _sp->block_size_no_stall(addr, _collector);
 503   assert(sz != 0, "Should always be able to compute a size");
 504   if (_sp->block_is_obj(addr)) {
 505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 507       p2i(addr),
 508       dead ? "dead" : "live",
 509       sz,
 510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 511     if (CMSPrintObjectsInDump && !dead) {
 512       oop(addr)->print_on(_st);
 513       _st->print_cr("--------------------------------------");
 514     }
 515   } else { // free block
 516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 518     if (CMSPrintChunksInDump) {
 519       ((FreeChunk*)addr)->print_on(_st);
 520       _st->print_cr("--------------------------------------");
 521     }
 522   }
 523   return sz;
 524 }
 525 
 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 527   outputStream* st) {
 528   st->print_cr("\n=========================");
 529   st->print_cr("Block layout in CMS Heap:");
 530   st->print_cr("=========================");
 531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 532   blk_iterate(&bpcl);
 533 
 534   st->print_cr("\n=======================================");
 535   st->print_cr("Order & Layout of Promotion Info Blocks");
 536   st->print_cr("=======================================");
 537   print_promo_info_blocks(st);
 538 
 539   st->print_cr("\n===========================");
 540   st->print_cr("Order of Indexed Free Lists");
 541   st->print_cr("=========================");
 542   print_indexed_free_lists(st);
 543 
 544   st->print_cr("\n=================================");
 545   st->print_cr("Order of Free Lists in Dictionary");
 546   st->print_cr("=================================");
 547   print_dictionary_free_lists(st);
 548 }
 549 
 550 
 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 552   assert_lock_strong(&_freelistLock);
 553   assert(PrintFLSStatistics != 0, "Reporting error");
 554   _dictionary->report_statistics();
 555   if (PrintFLSStatistics > 1) {
 556     reportIndexedFreeListStatistics();
 557     size_t total_size = totalSizeInIndexedFreeLists() +
 558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 560   }
 561 }
 562 
 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 564   assert_lock_strong(&_freelistLock);
 565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 566                       "--------------------------------\n");
 567   size_t total_size = totalSizeInIndexedFreeLists();
 568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 572   if (free_blocks != 0) {
 573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 574   }
 575 }
 576 
 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 578   size_t res = 0;
 579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 580     debug_only(
 581       ssize_t recount = 0;
 582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 583          fc = fc->next()) {
 584         recount += 1;
 585       }
 586       assert(recount == _indexedFreeList[i].count(),
 587         "Incorrect count in list");
 588     )
 589     res += _indexedFreeList[i].count();
 590   }
 591   return res;
 592 }
 593 
 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 596     if (_indexedFreeList[i].head() != NULL) {
 597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 598       return (size_t)i;
 599     }
 600   }
 601   return 0;
 602 }
 603 
 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 605   HeapWord* prevEnd = end();
 606   assert(prevEnd != value, "unnecessary set_end call");
 607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 608         "New end is below unallocated block");
 609   _end = value;
 610   if (prevEnd != NULL) {
 611     // Resize the underlying block offset table.
 612     _bt.resize(pointer_delta(value, bottom()));
 613     if (value <= prevEnd) {
 614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 615              "New end is below unallocated block");
 616     } else {
 617       // Now, take this new chunk and add it to the free blocks.
 618       // Note that the BOT has not yet been updated for this block.
 619       size_t newFcSize = pointer_delta(value, prevEnd);
 620       // XXX This is REALLY UGLY and should be fixed up. XXX
 621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 622         // Mark the boundary of the new block in BOT
 623         _bt.mark_block(prevEnd, value);
 624         // put it all in the linAB
 625         MutexLockerEx x(parDictionaryAllocLock(),
 626                         Mutex::_no_safepoint_check_flag);
 627         _smallLinearAllocBlock._ptr = prevEnd;
 628         _smallLinearAllocBlock._word_size = newFcSize;
 629         repairLinearAllocBlock(&_smallLinearAllocBlock);
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;
 644 protected:
 645   // Override.
 646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 648                                        HeapWord* bottom, HeapWord* top, \
 649                                        ClosureType* cl);                \
 650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl);                \
 653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl)
 656   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 657   walk_mem_region_with_cl_DECL(FilteringClosure);
 658 
 659 public:
 660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 661                       CMSCollector* collector,
 662                       ExtendedOopClosure* cl,
 663                       CardTableModRefBS::PrecisionStyle precision,
 664                       HeapWord* boundary) :
 665     Filtering_DCTOC(sp, cl, precision, boundary),
 666     _cfls(sp), _collector(collector) {}
 667 };
 668 
 669 // We de-virtualize the block-related calls below, since we know that our
 670 // space is a CompactibleFreeListSpace.
 671 
 672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 674                                                  HeapWord* bottom,              \
 675                                                  HeapWord* top,                 \
 676                                                  ClosureType* cl) {             \
 677    bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
 678    if (is_par) {                                                                \
 679      assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
 680             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
 681      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 682    } else {                                                                     \
 683      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 684    }                                                                            \
 685 }                                                                               \
 686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 687                                                       HeapWord* bottom,         \
 688                                                       HeapWord* top,            \
 689                                                       ClosureType* cl) {        \
 690   /* Skip parts that are before "mr", in case "block_start" sent us             \
 691      back too far. */                                                           \
 692   HeapWord* mr_start = mr.start();                                              \
 693   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 694   HeapWord* next = bottom + bot_size;                                           \
 695   while (next < mr_start) {                                                     \
 696     bottom = next;                                                              \
 697     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 698     next = bottom + bot_size;                                                   \
 699   }                                                                             \
 700                                                                                 \
 701   while (bottom < top) {                                                        \
 702     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 703         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 704                     oop(bottom)) &&                                             \
 705         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 706       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 707       bottom += _cfls->adjustObjectSize(word_sz);                               \
 708     } else {                                                                    \
 709       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 710     }                                                                           \
 711   }                                                                             \
 712 }                                                                               \
 713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 714                                                         HeapWord* bottom,       \
 715                                                         HeapWord* top,          \
 716                                                         ClosureType* cl) {      \
 717   /* Skip parts that are before "mr", in case "block_start" sent us             \
 718      back too far. */                                                           \
 719   HeapWord* mr_start = mr.start();                                              \
 720   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 721   HeapWord* next = bottom + bot_size;                                           \
 722   while (next < mr_start) {                                                     \
 723     bottom = next;                                                              \
 724     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 725     next = bottom + bot_size;                                                   \
 726   }                                                                             \
 727                                                                                 \
 728   while (bottom < top) {                                                        \
 729     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 730         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 731                     oop(bottom)) &&                                             \
 732         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 733       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 734       bottom += _cfls->adjustObjectSize(word_sz);                               \
 735     } else {                                                                    \
 736       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 737     }                                                                           \
 738   }                                                                             \
 739 }
 740 
 741 // (There are only two of these, rather than N, because the split is due
 742 // only to the introduction of the FilteringClosure, a local part of the
 743 // impl of this abstraction.)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 746 
 747 DirtyCardToOopClosure*
 748 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 749                                       CardTableModRefBS::PrecisionStyle precision,
 750                                       HeapWord* boundary) {
 751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 752 }
 753 
 754 
 755 // Note on locking for the space iteration functions:
 756 // since the collector's iteration activities are concurrent with
 757 // allocation activities by mutators, absent a suitable mutual exclusion
 758 // mechanism the iterators may go awry. For instance a block being iterated
 759 // may suddenly be allocated or divided up and part of it allocated and
 760 // so on.
 761 
 762 // Apply the given closure to each block in the space.
 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 764   assert_lock_strong(freelistLock());
 765   HeapWord *cur, *limit;
 766   for (cur = bottom(), limit = end(); cur < limit;
 767        cur += cl->do_blk_careful(cur));
 768 }
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 772   assert_lock_strong(freelistLock());
 773   HeapWord *cur, *limit;
 774   for (cur = bottom(), limit = end(); cur < limit;
 775        cur += cl->do_blk(cur));
 776 }
 777 
 778 // Apply the given closure to each oop in the space.
 779 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 780   assert_lock_strong(freelistLock());
 781   HeapWord *cur, *limit;
 782   size_t curSize;
 783   for (cur = bottom(), limit = end(); cur < limit;
 784        cur += curSize) {
 785     curSize = block_size(cur);
 786     if (block_is_obj(cur)) {
 787       oop(cur)->oop_iterate(cl);
 788     }
 789   }
 790 }
 791 
 792 // NOTE: In the following methods, in order to safely be able to
 793 // apply the closure to an object, we need to be sure that the
 794 // object has been initialized. We are guaranteed that an object
 795 // is initialized if we are holding the Heap_lock with the
 796 // world stopped.
 797 void CompactibleFreeListSpace::verify_objects_initialized() const {
 798   if (is_init_completed()) {
 799     assert_locked_or_safepoint(Heap_lock);
 800     if (Universe::is_fully_initialized()) {
 801       guarantee(SafepointSynchronize::is_at_safepoint(),
 802                 "Required for objects to be initialized");
 803     }
 804   } // else make a concession at vm start-up
 805 }
 806 
 807 // Apply the given closure to each object in the space
 808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 809   assert_lock_strong(freelistLock());
 810   NOT_PRODUCT(verify_objects_initialized());
 811   HeapWord *cur, *limit;
 812   size_t curSize;
 813   for (cur = bottom(), limit = end(); cur < limit;
 814        cur += curSize) {
 815     curSize = block_size(cur);
 816     if (block_is_obj(cur)) {
 817       blk->do_object(oop(cur));
 818     }
 819   }
 820 }
 821 
 822 // Apply the given closure to each live object in the space
 823 //   The usage of CompactibleFreeListSpace
 824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 825 // objects in the space with references to objects that are no longer
 826 // valid.  For example, an object may reference another object
 827 // that has already been sweep up (collected).  This method uses
 828 // obj_is_alive() to determine whether it is safe to apply the closure to
 829 // an object.  See obj_is_alive() for details on how liveness of an
 830 // object is decided.
 831 
 832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 833   assert_lock_strong(freelistLock());
 834   NOT_PRODUCT(verify_objects_initialized());
 835   HeapWord *cur, *limit;
 836   size_t curSize;
 837   for (cur = bottom(), limit = end(); cur < limit;
 838        cur += curSize) {
 839     curSize = block_size(cur);
 840     if (block_is_obj(cur) && obj_is_alive(cur)) {
 841       blk->do_object(oop(cur));
 842     }
 843   }
 844 }
 845 
 846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 847                                                   UpwardsObjectClosure* cl) {
 848   assert_locked(freelistLock());
 849   NOT_PRODUCT(verify_objects_initialized());
 850   assert(!mr.is_empty(), "Should be non-empty");
 851   // We use MemRegion(bottom(), end()) rather than used_region() below
 852   // because the two are not necessarily equal for some kinds of
 853   // spaces, in particular, certain kinds of free list spaces.
 854   // We could use the more complicated but more precise:
 855   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 856   // but the slight imprecision seems acceptable in the assertion check.
 857   assert(MemRegion(bottom(), end()).contains(mr),
 858          "Should be within used space");
 859   HeapWord* prev = cl->previous();   // max address from last time
 860   if (prev >= mr.end()) { // nothing to do
 861     return;
 862   }
 863   // This assert will not work when we go from cms space to perm
 864   // space, and use same closure. Easy fix deferred for later. XXX YSR
 865   // assert(prev == NULL || contains(prev), "Should be within space");
 866 
 867   bool last_was_obj_array = false;
 868   HeapWord *blk_start_addr, *region_start_addr;
 869   if (prev > mr.start()) {
 870     region_start_addr = prev;
 871     blk_start_addr    = prev;
 872     // The previous invocation may have pushed "prev" beyond the
 873     // last allocated block yet there may be still be blocks
 874     // in this region due to a particular coalescing policy.
 875     // Relax the assertion so that the case where the unallocated
 876     // block is maintained and "prev" is beyond the unallocated
 877     // block does not cause the assertion to fire.
 878     assert((BlockOffsetArrayUseUnallocatedBlock &&
 879             (!is_in(prev))) ||
 880            (blk_start_addr == block_start(region_start_addr)), "invariant");
 881   } else {
 882     region_start_addr = mr.start();
 883     blk_start_addr    = block_start(region_start_addr);
 884   }
 885   HeapWord* region_end_addr = mr.end();
 886   MemRegion derived_mr(region_start_addr, region_end_addr);
 887   while (blk_start_addr < region_end_addr) {
 888     const size_t size = block_size(blk_start_addr);
 889     if (block_is_obj(blk_start_addr)) {
 890       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 891     } else {
 892       last_was_obj_array = false;
 893     }
 894     blk_start_addr += size;
 895   }
 896   if (!last_was_obj_array) {
 897     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 898            "Should be within (closed) used space");
 899     assert(blk_start_addr > prev, "Invariant");
 900     cl->set_previous(blk_start_addr); // min address for next time
 901   }
 902 }
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 912   ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   // Can't use used_region() below because it may not necessarily
 915   // be the same as [bottom(),end()); although we could
 916   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 917   // that appears too cumbersome, so we just do the simpler check
 918   // in the assertion below.
 919   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 920          "mr should be non-empty and within used space");
 921   HeapWord *addr, *end;
 922   size_t size;
 923   for (addr = block_start_careful(mr.start()), end  = mr.end();
 924        addr < end; addr += size) {
 925     FreeChunk* fc = (FreeChunk*)addr;
 926     if (fc->is_free()) {
 927       // Since we hold the free list lock, which protects direct
 928       // allocation in this generation by mutators, a free object
 929       // will remain free throughout this iteration code.
 930       size = fc->size();
 931     } else {
 932       // Note that the object need not necessarily be initialized,
 933       // because (for instance) the free list lock does NOT protect
 934       // object initialization. The closure application below must
 935       // therefore be correct in the face of uninitialized objects.
 936       size = cl->do_object_careful_m(oop(addr), mr);
 937       if (size == 0) {
 938         // An unparsable object found. Signal early termination.
 939         return addr;
 940       }
 941     }
 942   }
 943   return NULL;
 944 }
 945 
 946 
 947 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 948   NOT_PRODUCT(verify_objects_initialized());
 949   return _bt.block_start(p);
 950 }
 951 
 952 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 953   return _bt.block_start_careful(p);
 954 }
 955 
 956 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 957   NOT_PRODUCT(verify_objects_initialized());
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   while (true) {
 962     // We must do this until we get a consistent view of the object.
 963     if (FreeChunk::indicatesFreeChunk(p)) {
 964       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 965       size_t res = fc->size();
 966 
 967       // Bugfix for systems with weak memory model (PPC64/IA64). The
 968       // block's free bit was set and we have read the size of the
 969       // block. Acquire and check the free bit again. If the block is
 970       // still free, the read size is correct.
 971       OrderAccess::acquire();
 972 
 973       // If the object is still a free chunk, return the size, else it
 974       // has been allocated so try again.
 975       if (FreeChunk::indicatesFreeChunk(p)) {
 976         assert(res != 0, "Block size should not be 0");
 977         return res;
 978       }
 979     } else {
 980       // must read from what 'p' points to in each loop.
 981       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       }
 997     }
 998   }
 999 }
1000 
1001 // TODO: Now that is_parsable is gone, we should combine these two functions.
1002 // A variant of the above that uses the Printezis bits for
1003 // unparsable but allocated objects. This avoids any possible
1004 // stalls waiting for mutators to initialize objects, and is
1005 // thus potentially faster than the variant above. However,
1006 // this variant may return a zero size for a block that is
1007 // under mutation and for which a consistent size cannot be
1008 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1009 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1010                                                      const CMSCollector* c)
1011 const {
1012   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1013   // This must be volatile, or else there is a danger that the compiler
1014   // will compile the code below into a sometimes-infinite loop, by keeping
1015   // the value read the first time in a register.
1016   DEBUG_ONLY(uint loops = 0;)
1017   while (true) {
1018     // We must do this until we get a consistent view of the object.
1019     if (FreeChunk::indicatesFreeChunk(p)) {
1020       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1021       size_t res = fc->size();
1022 
1023       // Bugfix for systems with weak memory model (PPC64/IA64). The
1024       // free bit of the block was set and we have read the size of
1025       // the block. Acquire and check the free bit again. If the
1026       // block is still free, the read size is correct.
1027       OrderAccess::acquire();
1028 
1029       if (FreeChunk::indicatesFreeChunk(p)) {
1030         assert(res != 0, "Block size should not be 0");
1031         assert(loops == 0, "Should be 0");
1032         return res;
1033       }
1034     } else {
1035       // must read from what 'p' points to in each loop.
1036       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1037       // We trust the size of any object that has a non-NULL
1038       // klass and (for those in the perm gen) is parsable
1039       // -- irrespective of its conc_safe-ty.
1040       if (k != NULL) {
1041         assert(k->is_klass(), "Should really be klass oop.");
1042         oop o = (oop)p;
1043         assert(o->is_oop(), "Should be an oop");
1044 
1045         // Bugfix for systems with weak memory model (PPC64/IA64).
1046         // The object o may be an array. Acquire to make sure that the array
1047         // size (third word) is consistent.
1048         OrderAccess::acquire();
1049 
1050         size_t res = o->size_given_klass(k);
1051         res = adjustObjectSize(res);
1052         assert(res != 0, "Block size should not be 0");
1053         return res;
1054       } else {
1055         // May return 0 if P-bits not present.
1056         return c->block_size_if_printezis_bits(p);
1057       }
1058     }
1059     assert(loops == 0, "Can loop at most once");
1060     DEBUG_ONLY(loops++;)
1061   }
1062 }
1063 
1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1065   NOT_PRODUCT(verify_objects_initialized());
1066   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1067   FreeChunk* fc = (FreeChunk*)p;
1068   if (fc->is_free()) {
1069     return fc->size();
1070   } else {
1071     // Ignore mark word because this may be a recently promoted
1072     // object whose mark word is used to chain together grey
1073     // objects (the last one would have a null value).
1074     assert(oop(p)->is_oop(true), "Should be an oop");
1075     return adjustObjectSize(oop(p)->size());
1076   }
1077 }
1078 
1079 // This implementation assumes that the property of "being an object" is
1080 // stable.  But being a free chunk may not be (because of parallel
1081 // promotion.)
1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1083   FreeChunk* fc = (FreeChunk*)p;
1084   assert(is_in_reserved(p), "Should be in space");
1085   // When doing a mark-sweep-compact of the CMS generation, this
1086   // assertion may fail because prepare_for_compaction() uses
1087   // space that is garbage to maintain information on ranges of
1088   // live objects so that these live ranges can be moved as a whole.
1089   // Comment out this assertion until that problem can be solved
1090   // (i.e., that the block start calculation may look at objects
1091   // at address below "p" in finding the object that contains "p"
1092   // and those objects (if garbage) may have been modified to hold
1093   // live range information.
1094   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1095   //        "Should be a block boundary");
1096   if (FreeChunk::indicatesFreeChunk(p)) return false;
1097   Klass* k = oop(p)->klass_or_null();
1098   if (k != NULL) {
1099     // Ignore mark word because it may have been used to
1100     // chain together promoted objects (the last one
1101     // would have a null value).
1102     assert(oop(p)->is_oop(true), "Should be an oop");
1103     return true;
1104   } else {
1105     return false;  // Was not an object at the start of collection.
1106   }
1107 }
1108 
1109 // Check if the object is alive. This fact is checked either by consulting
1110 // the main marking bitmap in the sweeping phase or, if it's a permanent
1111 // generation and we're not in the sweeping phase, by checking the
1112 // perm_gen_verify_bit_map where we store the "deadness" information if
1113 // we did not sweep the perm gen in the most recent previous GC cycle.
1114 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1115   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1116          "Else races are possible");
1117   assert(block_is_obj(p), "The address should point to an object");
1118 
1119   // If we're sweeping, we use object liveness information from the main bit map
1120   // for both perm gen and old gen.
1121   // We don't need to lock the bitmap (live_map or dead_map below), because
1122   // EITHER we are in the middle of the sweeping phase, and the
1123   // main marking bit map (live_map below) is locked,
1124   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1125   // is stable, because it's mutated only in the sweeping phase.
1126   // NOTE: This method is also used by jmap where, if class unloading is
1127   // off, the results can return "false" for legitimate perm objects,
1128   // when we are not in the midst of a sweeping phase, which can result
1129   // in jmap not reporting certain perm gen objects. This will be moot
1130   // if/when the perm gen goes away in the future.
1131   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1132     CMSBitMap* live_map = _collector->markBitMap();
1133     return live_map->par_isMarked((HeapWord*) p);
1134   }
1135   return true;
1136 }
1137 
1138 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1139   FreeChunk* fc = (FreeChunk*)p;
1140   assert(is_in_reserved(p), "Should be in space");
1141   assert(_bt.block_start(p) == p, "Should be a block boundary");
1142   if (!fc->is_free()) {
1143     // Ignore mark word because it may have been used to
1144     // chain together promoted objects (the last one
1145     // would have a null value).
1146     assert(oop(p)->is_oop(true), "Should be an oop");
1147     return true;
1148   }
1149   return false;
1150 }
1151 
1152 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1153 // approximate answer if you don't hold the freelistlock when you call this.
1154 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1155   size_t size = 0;
1156   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1157     debug_only(
1158       // We may be calling here without the lock in which case we
1159       // won't do this modest sanity check.
1160       if (freelistLock()->owned_by_self()) {
1161         size_t total_list_size = 0;
1162         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1163           fc = fc->next()) {
1164           total_list_size += i;
1165         }
1166         assert(total_list_size == i * _indexedFreeList[i].count(),
1167                "Count in list is incorrect");
1168       }
1169     )
1170     size += i * _indexedFreeList[i].count();
1171   }
1172   return size;
1173 }
1174 
1175 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1176   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1177   return allocate(size);
1178 }
1179 
1180 HeapWord*
1181 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1182   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1183 }
1184 
1185 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1186   assert_lock_strong(freelistLock());
1187   HeapWord* res = NULL;
1188   assert(size == adjustObjectSize(size),
1189          "use adjustObjectSize() before calling into allocate()");
1190 
1191   if (_adaptive_freelists) {
1192     res = allocate_adaptive_freelists(size);
1193   } else {  // non-adaptive free lists
1194     res = allocate_non_adaptive_freelists(size);
1195   }
1196 
1197   if (res != NULL) {
1198     // check that res does lie in this space!
1199     assert(is_in_reserved(res), "Not in this space!");
1200     assert(is_aligned((void*)res), "alignment check");
1201 
1202     FreeChunk* fc = (FreeChunk*)res;
1203     fc->markNotFree();
1204     assert(!fc->is_free(), "shouldn't be marked free");
1205     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1206     // Verify that the block offset table shows this to
1207     // be a single block, but not one which is unallocated.
1208     _bt.verify_single_block(res, size);
1209     _bt.verify_not_unallocated(res, size);
1210     // mangle a just allocated object with a distinct pattern.
1211     debug_only(fc->mangleAllocated(size));
1212   }
1213 
1214   return res;
1215 }
1216 
1217 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1218   HeapWord* res = NULL;
1219   // try and use linear allocation for smaller blocks
1220   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1221     // if successful, the following also adjusts block offset table
1222     res = getChunkFromSmallLinearAllocBlock(size);
1223   }
1224   // Else triage to indexed lists for smaller sizes
1225   if (res == NULL) {
1226     if (size < SmallForDictionary) {
1227       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1228     } else {
1229       // else get it from the big dictionary; if even this doesn't
1230       // work we are out of luck.
1231       res = (HeapWord*)getChunkFromDictionaryExact(size);
1232     }
1233   }
1234 
1235   return res;
1236 }
1237 
1238 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1239   assert_lock_strong(freelistLock());
1240   HeapWord* res = NULL;
1241   assert(size == adjustObjectSize(size),
1242          "use adjustObjectSize() before calling into allocate()");
1243 
1244   // Strategy
1245   //   if small
1246   //     exact size from small object indexed list if small
1247   //     small or large linear allocation block (linAB) as appropriate
1248   //     take from lists of greater sized chunks
1249   //   else
1250   //     dictionary
1251   //     small or large linear allocation block if it has the space
1252   // Try allocating exact size from indexTable first
1253   if (size < IndexSetSize) {
1254     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1255     if(res != NULL) {
1256       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1257         "Not removed from free list");
1258       // no block offset table adjustment is necessary on blocks in
1259       // the indexed lists.
1260 
1261     // Try allocating from the small LinAB
1262     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1263         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1264         // if successful, the above also adjusts block offset table
1265         // Note that this call will refill the LinAB to
1266         // satisfy the request.  This is different that
1267         // evm.
1268         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1269     } else {
1270       // Raid the exact free lists larger than size, even if they are not
1271       // overpopulated.
1272       res = (HeapWord*) getChunkFromGreater(size);
1273     }
1274   } else {
1275     // Big objects get allocated directly from the dictionary.
1276     res = (HeapWord*) getChunkFromDictionaryExact(size);
1277     if (res == NULL) {
1278       // Try hard not to fail since an allocation failure will likely
1279       // trigger a synchronous GC.  Try to get the space from the
1280       // allocation blocks.
1281       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1282     }
1283   }
1284 
1285   return res;
1286 }
1287 
1288 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1289 // when promoting obj.
1290 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1291   // Depending on the object size, expansion may require refilling either a
1292   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1293   // is added because the dictionary may over-allocate to avoid fragmentation.
1294   size_t space = obj_size;
1295   if (!_adaptive_freelists) {
1296     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1297   }
1298   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1299   return space;
1300 }
1301 
1302 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1303   FreeChunk* ret;
1304 
1305   assert(numWords >= MinChunkSize, "Size is less than minimum");
1306   assert(linearAllocationWouldFail() || bestFitFirst(),
1307     "Should not be here");
1308 
1309   size_t i;
1310   size_t currSize = numWords + MinChunkSize;
1311   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1312   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1313     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1314     if (fl->head()) {
1315       ret = getFromListGreater(fl, numWords);
1316       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1317       return ret;
1318     }
1319   }
1320 
1321   currSize = MAX2((size_t)SmallForDictionary,
1322                   (size_t)(numWords + MinChunkSize));
1323 
1324   /* Try to get a chunk that satisfies request, while avoiding
1325      fragmentation that can't be handled. */
1326   {
1327     ret =  dictionary()->get_chunk(currSize);
1328     if (ret != NULL) {
1329       assert(ret->size() - numWords >= MinChunkSize,
1330              "Chunk is too small");
1331       _bt.allocated((HeapWord*)ret, ret->size());
1332       /* Carve returned chunk. */
1333       (void) splitChunkAndReturnRemainder(ret, numWords);
1334       /* Label this as no longer a free chunk. */
1335       assert(ret->is_free(), "This chunk should be free");
1336       ret->link_prev(NULL);
1337     }
1338     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1339     return ret;
1340   }
1341   ShouldNotReachHere();
1342 }
1343 
1344 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1345   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1346   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1347 }
1348 
1349 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1350   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1351          (_smallLinearAllocBlock._word_size == fc->size()),
1352          "Linear allocation block shows incorrect size");
1353   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1354           (_smallLinearAllocBlock._word_size == fc->size()));
1355 }
1356 
1357 // Check if the purported free chunk is present either as a linear
1358 // allocation block, the size-indexed table of (smaller) free blocks,
1359 // or the larger free blocks kept in the binary tree dictionary.
1360 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1361   if (verify_chunk_is_linear_alloc_block(fc)) {
1362     return true;
1363   } else if (fc->size() < IndexSetSize) {
1364     return verifyChunkInIndexedFreeLists(fc);
1365   } else {
1366     return dictionary()->verify_chunk_in_free_list(fc);
1367   }
1368 }
1369 
1370 #ifndef PRODUCT
1371 void CompactibleFreeListSpace::assert_locked() const {
1372   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1373 }
1374 
1375 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1376   CMSLockVerifier::assert_locked(lock);
1377 }
1378 #endif
1379 
1380 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1381   // In the parallel case, the main thread holds the free list lock
1382   // on behalf the parallel threads.
1383   FreeChunk* fc;
1384   {
1385     // If GC is parallel, this might be called by several threads.
1386     // This should be rare enough that the locking overhead won't affect
1387     // the sequential code.
1388     MutexLockerEx x(parDictionaryAllocLock(),
1389                     Mutex::_no_safepoint_check_flag);
1390     fc = getChunkFromDictionary(size);
1391   }
1392   if (fc != NULL) {
1393     fc->dontCoalesce();
1394     assert(fc->is_free(), "Should be free, but not coalescable");
1395     // Verify that the block offset table shows this to
1396     // be a single block, but not one which is unallocated.
1397     _bt.verify_single_block((HeapWord*)fc, fc->size());
1398     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1399   }
1400   return fc;
1401 }
1402 
1403 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1404   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1405   assert_locked();
1406 
1407   // if we are tracking promotions, then first ensure space for
1408   // promotion (including spooling space for saving header if necessary).
1409   // then allocate and copy, then track promoted info if needed.
1410   // When tracking (see PromotionInfo::track()), the mark word may
1411   // be displaced and in this case restoration of the mark word
1412   // occurs in the (oop_since_save_marks_)iterate phase.
1413   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1414     return NULL;
1415   }
1416   // Call the allocate(size_t, bool) form directly to avoid the
1417   // additional call through the allocate(size_t) form.  Having
1418   // the compile inline the call is problematic because allocate(size_t)
1419   // is a virtual method.
1420   HeapWord* res = allocate(adjustObjectSize(obj_size));
1421   if (res != NULL) {
1422     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1423     // if we should be tracking promotions, do so.
1424     if (_promoInfo.tracking()) {
1425         _promoInfo.track((PromotedObject*)res);
1426     }
1427   }
1428   return oop(res);
1429 }
1430 
1431 HeapWord*
1432 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1433   assert_locked();
1434   assert(size >= MinChunkSize, "minimum chunk size");
1435   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1436     "maximum from smallLinearAllocBlock");
1437   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1438 }
1439 
1440 HeapWord*
1441 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1442                                                        size_t size) {
1443   assert_locked();
1444   assert(size >= MinChunkSize, "too small");
1445   HeapWord* res = NULL;
1446   // Try to do linear allocation from blk, making sure that
1447   if (blk->_word_size == 0) {
1448     // We have probably been unable to fill this either in the prologue or
1449     // when it was exhausted at the last linear allocation. Bail out until
1450     // next time.
1451     assert(blk->_ptr == NULL, "consistency check");
1452     return NULL;
1453   }
1454   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1455   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1456   if (res != NULL) return res;
1457 
1458   // about to exhaust this linear allocation block
1459   if (blk->_word_size == size) { // exactly satisfied
1460     res = blk->_ptr;
1461     _bt.allocated(res, blk->_word_size);
1462   } else if (size + MinChunkSize <= blk->_refillSize) {
1463     size_t sz = blk->_word_size;
1464     // Update _unallocated_block if the size is such that chunk would be
1465     // returned to the indexed free list.  All other chunks in the indexed
1466     // free lists are allocated from the dictionary so that _unallocated_block
1467     // has already been adjusted for them.  Do it here so that the cost
1468     // for all chunks added back to the indexed free lists.
1469     if (sz < SmallForDictionary) {
1470       _bt.allocated(blk->_ptr, sz);
1471     }
1472     // Return the chunk that isn't big enough, and then refill below.
1473     addChunkToFreeLists(blk->_ptr, sz);
1474     split_birth(sz);
1475     // Don't keep statistics on adding back chunk from a LinAB.
1476   } else {
1477     // A refilled block would not satisfy the request.
1478     return NULL;
1479   }
1480 
1481   blk->_ptr = NULL; blk->_word_size = 0;
1482   refillLinearAllocBlock(blk);
1483   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1484          "block was replenished");
1485   if (res != NULL) {
1486     split_birth(size);
1487     repairLinearAllocBlock(blk);
1488   } else if (blk->_ptr != NULL) {
1489     res = blk->_ptr;
1490     size_t blk_size = blk->_word_size;
1491     blk->_word_size -= size;
1492     blk->_ptr  += size;
1493     split_birth(size);
1494     repairLinearAllocBlock(blk);
1495     // Update BOT last so that other (parallel) GC threads see a consistent
1496     // view of the BOT and free blocks.
1497     // Above must occur before BOT is updated below.
1498     OrderAccess::storestore();
1499     _bt.split_block(res, blk_size, size);  // adjust block offset table
1500   }
1501   return res;
1502 }
1503 
1504 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1505                                         LinearAllocBlock* blk,
1506                                         size_t size) {
1507   assert_locked();
1508   assert(size >= MinChunkSize, "too small");
1509 
1510   HeapWord* res = NULL;
1511   // This is the common case.  Keep it simple.
1512   if (blk->_word_size >= size + MinChunkSize) {
1513     assert(blk->_ptr != NULL, "consistency check");
1514     res = blk->_ptr;
1515     // Note that the BOT is up-to-date for the linAB before allocation.  It
1516     // indicates the start of the linAB.  The split_block() updates the
1517     // BOT for the linAB after the allocation (indicates the start of the
1518     // next chunk to be allocated).
1519     size_t blk_size = blk->_word_size;
1520     blk->_word_size -= size;
1521     blk->_ptr  += size;
1522     split_birth(size);
1523     repairLinearAllocBlock(blk);
1524     // Update BOT last so that other (parallel) GC threads see a consistent
1525     // view of the BOT and free blocks.
1526     // Above must occur before BOT is updated below.
1527     OrderAccess::storestore();
1528     _bt.split_block(res, blk_size, size);  // adjust block offset table
1529     _bt.allocated(res, size);
1530   }
1531   return res;
1532 }
1533 
1534 FreeChunk*
1535 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1536   assert_locked();
1537   assert(size < SmallForDictionary, "just checking");
1538   FreeChunk* res;
1539   res = _indexedFreeList[size].get_chunk_at_head();
1540   if (res == NULL) {
1541     res = getChunkFromIndexedFreeListHelper(size);
1542   }
1543   _bt.verify_not_unallocated((HeapWord*) res, size);
1544   assert(res == NULL || res->size() == size, "Incorrect block size");
1545   return res;
1546 }
1547 
1548 FreeChunk*
1549 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1550   bool replenish) {
1551   assert_locked();
1552   FreeChunk* fc = NULL;
1553   if (size < SmallForDictionary) {
1554     assert(_indexedFreeList[size].head() == NULL ||
1555       _indexedFreeList[size].surplus() <= 0,
1556       "List for this size should be empty or under populated");
1557     // Try best fit in exact lists before replenishing the list
1558     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1559       // Replenish list.
1560       //
1561       // Things tried that failed.
1562       //   Tried allocating out of the two LinAB's first before
1563       // replenishing lists.
1564       //   Tried small linAB of size 256 (size in indexed list)
1565       // and replenishing indexed lists from the small linAB.
1566       //
1567       FreeChunk* newFc = NULL;
1568       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1569       if (replenish_size < SmallForDictionary) {
1570         // Do not replenish from an underpopulated size.
1571         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1572             _indexedFreeList[replenish_size].head() != NULL) {
1573           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1574         } else if (bestFitFirst()) {
1575           newFc = bestFitSmall(replenish_size);
1576         }
1577       }
1578       if (newFc == NULL && replenish_size > size) {
1579         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1580         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1581       }
1582       // Note: The stats update re split-death of block obtained above
1583       // will be recorded below precisely when we know we are going to
1584       // be actually splitting it into more than one pieces below.
1585       if (newFc != NULL) {
1586         if  (replenish || CMSReplenishIntermediate) {
1587           // Replenish this list and return one block to caller.
1588           size_t i;
1589           FreeChunk *curFc, *nextFc;
1590           size_t num_blk = newFc->size() / size;
1591           assert(num_blk >= 1, "Smaller than requested?");
1592           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1593           if (num_blk > 1) {
1594             // we are sure we will be splitting the block just obtained
1595             // into multiple pieces; record the split-death of the original
1596             splitDeath(replenish_size);
1597           }
1598           // carve up and link blocks 0, ..., num_blk - 2
1599           // The last chunk is not added to the lists but is returned as the
1600           // free chunk.
1601           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1602                i = 0;
1603                i < (num_blk - 1);
1604                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1605                i++) {
1606             curFc->set_size(size);
1607             // Don't record this as a return in order to try and
1608             // determine the "returns" from a GC.
1609             _bt.verify_not_unallocated((HeapWord*) fc, size);
1610             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1611             _bt.mark_block((HeapWord*)curFc, size);
1612             split_birth(size);
1613             // Don't record the initial population of the indexed list
1614             // as a split birth.
1615           }
1616 
1617           // check that the arithmetic was OK above
1618           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1619             "inconsistency in carving newFc");
1620           curFc->set_size(size);
1621           _bt.mark_block((HeapWord*)curFc, size);
1622           split_birth(size);
1623           fc = curFc;
1624         } else {
1625           // Return entire block to caller
1626           fc = newFc;
1627         }
1628       }
1629     }
1630   } else {
1631     // Get a free chunk from the free chunk dictionary to be returned to
1632     // replenish the indexed free list.
1633     fc = getChunkFromDictionaryExact(size);
1634   }
1635   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1636   return fc;
1637 }
1638 
1639 FreeChunk*
1640 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1641   assert_locked();
1642   FreeChunk* fc = _dictionary->get_chunk(size,
1643                                          FreeBlockDictionary<FreeChunk>::atLeast);
1644   if (fc == NULL) {
1645     return NULL;
1646   }
1647   _bt.allocated((HeapWord*)fc, fc->size());
1648   if (fc->size() >= size + MinChunkSize) {
1649     fc = splitChunkAndReturnRemainder(fc, size);
1650   }
1651   assert(fc->size() >= size, "chunk too small");
1652   assert(fc->size() < size + MinChunkSize, "chunk too big");
1653   _bt.verify_single_block((HeapWord*)fc, fc->size());
1654   return fc;
1655 }
1656 
1657 FreeChunk*
1658 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1659   assert_locked();
1660   FreeChunk* fc = _dictionary->get_chunk(size,
1661                                          FreeBlockDictionary<FreeChunk>::atLeast);
1662   if (fc == NULL) {
1663     return fc;
1664   }
1665   _bt.allocated((HeapWord*)fc, fc->size());
1666   if (fc->size() == size) {
1667     _bt.verify_single_block((HeapWord*)fc, size);
1668     return fc;
1669   }
1670   assert(fc->size() > size, "get_chunk() guarantee");
1671   if (fc->size() < size + MinChunkSize) {
1672     // Return the chunk to the dictionary and go get a bigger one.
1673     returnChunkToDictionary(fc);
1674     fc = _dictionary->get_chunk(size + MinChunkSize,
1675                                 FreeBlockDictionary<FreeChunk>::atLeast);
1676     if (fc == NULL) {
1677       return NULL;
1678     }
1679     _bt.allocated((HeapWord*)fc, fc->size());
1680   }
1681   assert(fc->size() >= size + MinChunkSize, "tautology");
1682   fc = splitChunkAndReturnRemainder(fc, size);
1683   assert(fc->size() == size, "chunk is wrong size");
1684   _bt.verify_single_block((HeapWord*)fc, size);
1685   return fc;
1686 }
1687 
1688 void
1689 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1690   assert_locked();
1691 
1692   size_t size = chunk->size();
1693   _bt.verify_single_block((HeapWord*)chunk, size);
1694   // adjust _unallocated_block downward, as necessary
1695   _bt.freed((HeapWord*)chunk, size);
1696   _dictionary->return_chunk(chunk);
1697 #ifndef PRODUCT
1698   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1699     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1700     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1701     tl->verify_stats();
1702   }
1703 #endif // PRODUCT
1704 }
1705 
1706 void
1707 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1708   assert_locked();
1709   size_t size = fc->size();
1710   _bt.verify_single_block((HeapWord*) fc, size);
1711   _bt.verify_not_unallocated((HeapWord*) fc, size);
1712   if (_adaptive_freelists) {
1713     _indexedFreeList[size].return_chunk_at_tail(fc);
1714   } else {
1715     _indexedFreeList[size].return_chunk_at_head(fc);
1716   }
1717 #ifndef PRODUCT
1718   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1719      _indexedFreeList[size].verify_stats();
1720   }
1721 #endif // PRODUCT
1722 }
1723 
1724 // Add chunk to end of last block -- if it's the largest
1725 // block -- and update BOT and census data. We would
1726 // of course have preferred to coalesce it with the
1727 // last block, but it's currently less expensive to find the
1728 // largest block than it is to find the last.
1729 void
1730 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1731   HeapWord* chunk, size_t     size) {
1732   // check that the chunk does lie in this space!
1733   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1734   // One of the parallel gc task threads may be here
1735   // whilst others are allocating.
1736   Mutex* lock = &_parDictionaryAllocLock;
1737   FreeChunk* ec;
1738   {
1739     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1740     ec = dictionary()->find_largest_dict();  // get largest block
1741     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1742       // It's a coterminal block - we can coalesce.
1743       size_t old_size = ec->size();
1744       coalDeath(old_size);
1745       removeChunkFromDictionary(ec);
1746       size += old_size;
1747     } else {
1748       ec = (FreeChunk*)chunk;
1749     }
1750   }
1751   ec->set_size(size);
1752   debug_only(ec->mangleFreed(size));
1753   if (size < SmallForDictionary) {
1754     lock = _indexedFreeListParLocks[size];
1755   }
1756   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1757   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1758   // record the birth under the lock since the recording involves
1759   // manipulation of the list on which the chunk lives and
1760   // if the chunk is allocated and is the last on the list,
1761   // the list can go away.
1762   coalBirth(size);
1763 }
1764 
1765 void
1766 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1767                                               size_t     size) {
1768   // check that the chunk does lie in this space!
1769   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1770   assert_locked();
1771   _bt.verify_single_block(chunk, size);
1772 
1773   FreeChunk* fc = (FreeChunk*) chunk;
1774   fc->set_size(size);
1775   debug_only(fc->mangleFreed(size));
1776   if (size < SmallForDictionary) {
1777     returnChunkToFreeList(fc);
1778   } else {
1779     returnChunkToDictionary(fc);
1780   }
1781 }
1782 
1783 void
1784 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1785   size_t size, bool coalesced) {
1786   assert_locked();
1787   assert(chunk != NULL, "null chunk");
1788   if (coalesced) {
1789     // repair BOT
1790     _bt.single_block(chunk, size);
1791   }
1792   addChunkToFreeLists(chunk, size);
1793 }
1794 
1795 // We _must_ find the purported chunk on our free lists;
1796 // we assert if we don't.
1797 void
1798 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1799   size_t size = fc->size();
1800   assert_locked();
1801   debug_only(verifyFreeLists());
1802   if (size < SmallForDictionary) {
1803     removeChunkFromIndexedFreeList(fc);
1804   } else {
1805     removeChunkFromDictionary(fc);
1806   }
1807   _bt.verify_single_block((HeapWord*)fc, size);
1808   debug_only(verifyFreeLists());
1809 }
1810 
1811 void
1812 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1813   size_t size = fc->size();
1814   assert_locked();
1815   assert(fc != NULL, "null chunk");
1816   _bt.verify_single_block((HeapWord*)fc, size);
1817   _dictionary->remove_chunk(fc);
1818   // adjust _unallocated_block upward, as necessary
1819   _bt.allocated((HeapWord*)fc, size);
1820 }
1821 
1822 void
1823 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1824   assert_locked();
1825   size_t size = fc->size();
1826   _bt.verify_single_block((HeapWord*)fc, size);
1827   NOT_PRODUCT(
1828     if (FLSVerifyIndexTable) {
1829       verifyIndexedFreeList(size);
1830     }
1831   )
1832   _indexedFreeList[size].remove_chunk(fc);
1833   NOT_PRODUCT(
1834     if (FLSVerifyIndexTable) {
1835       verifyIndexedFreeList(size);
1836     }
1837   )
1838 }
1839 
1840 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1841   /* A hint is the next larger size that has a surplus.
1842      Start search at a size large enough to guarantee that
1843      the excess is >= MIN_CHUNK. */
1844   size_t start = align_object_size(numWords + MinChunkSize);
1845   if (start < IndexSetSize) {
1846     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1847     size_t    hint = _indexedFreeList[start].hint();
1848     while (hint < IndexSetSize) {
1849       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1850       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1851       if (fl->surplus() > 0 && fl->head() != NULL) {
1852         // Found a list with surplus, reset original hint
1853         // and split out a free chunk which is returned.
1854         _indexedFreeList[start].set_hint(hint);
1855         FreeChunk* res = getFromListGreater(fl, numWords);
1856         assert(res == NULL || res->is_free(),
1857           "Should be returning a free chunk");
1858         return res;
1859       }
1860       hint = fl->hint(); /* keep looking */
1861     }
1862     /* None found. */
1863     it[start].set_hint(IndexSetSize);
1864   }
1865   return NULL;
1866 }
1867 
1868 /* Requires fl->size >= numWords + MinChunkSize */
1869 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1870   size_t numWords) {
1871   FreeChunk *curr = fl->head();
1872   size_t oldNumWords = curr->size();
1873   assert(numWords >= MinChunkSize, "Word size is too small");
1874   assert(curr != NULL, "List is empty");
1875   assert(oldNumWords >= numWords + MinChunkSize,
1876         "Size of chunks in the list is too small");
1877 
1878   fl->remove_chunk(curr);
1879   // recorded indirectly by splitChunkAndReturnRemainder -
1880   // smallSplit(oldNumWords, numWords);
1881   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1882   // Does anything have to be done for the remainder in terms of
1883   // fixing the card table?
1884   assert(new_chunk == NULL || new_chunk->is_free(),
1885     "Should be returning a free chunk");
1886   return new_chunk;
1887 }
1888 
1889 FreeChunk*
1890 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1891   size_t new_size) {
1892   assert_locked();
1893   size_t size = chunk->size();
1894   assert(size > new_size, "Split from a smaller block?");
1895   assert(is_aligned(chunk), "alignment problem");
1896   assert(size == adjustObjectSize(size), "alignment problem");
1897   size_t rem_sz = size - new_size;
1898   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1899   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1900   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1901   assert(is_aligned(ffc), "alignment problem");
1902   ffc->set_size(rem_sz);
1903   ffc->link_next(NULL);
1904   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1905   // Above must occur before BOT is updated below.
1906   // adjust block offset table
1907   OrderAccess::storestore();
1908   assert(chunk->is_free() && ffc->is_free(), "Error");
1909   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1910   if (rem_sz < SmallForDictionary) {
1911     bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
1912     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1913     assert(!is_par ||
1914            (GenCollectedHeap::heap()->n_par_threads() ==
1915             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
1916     returnChunkToFreeList(ffc);
1917     split(size, rem_sz);
1918     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1919   } else {
1920     returnChunkToDictionary(ffc);
1921     split(size, rem_sz);
1922   }
1923   chunk->set_size(new_size);
1924   return chunk;
1925 }
1926 
1927 void
1928 CompactibleFreeListSpace::sweep_completed() {
1929   // Now that space is probably plentiful, refill linear
1930   // allocation blocks as needed.
1931   refillLinearAllocBlocksIfNeeded();
1932 }
1933 
1934 void
1935 CompactibleFreeListSpace::gc_prologue() {
1936   assert_locked();
1937   if (PrintFLSStatistics != 0) {
1938     gclog_or_tty->print("Before GC:\n");
1939     reportFreeListStatistics();
1940   }
1941   refillLinearAllocBlocksIfNeeded();
1942 }
1943 
1944 void
1945 CompactibleFreeListSpace::gc_epilogue() {
1946   assert_locked();
1947   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1948     if (_smallLinearAllocBlock._word_size == 0)
1949       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1950   }
1951   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1952   _promoInfo.stopTrackingPromotions();
1953   repairLinearAllocationBlocks();
1954   // Print Space's stats
1955   if (PrintFLSStatistics != 0) {
1956     gclog_or_tty->print("After GC:\n");
1957     reportFreeListStatistics();
1958   }
1959 }
1960 
1961 // Iteration support, mostly delegated from a CMS generation
1962 
1963 void CompactibleFreeListSpace::save_marks() {
1964   assert(Thread::current()->is_VM_thread(),
1965          "Global variable should only be set when single-threaded");
1966   // Mark the "end" of the used space at the time of this call;
1967   // note, however, that promoted objects from this point
1968   // on are tracked in the _promoInfo below.
1969   set_saved_mark_word(unallocated_block());
1970 #ifdef ASSERT
1971   // Check the sanity of save_marks() etc.
1972   MemRegion ur    = used_region();
1973   MemRegion urasm = used_region_at_save_marks();
1974   assert(ur.contains(urasm),
1975          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1976                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1977                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1978 #endif
1979   // inform allocator that promotions should be tracked.
1980   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1981   _promoInfo.startTrackingPromotions();
1982 }
1983 
1984 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1985   assert(_promoInfo.tracking(), "No preceding save_marks?");
1986   assert(GenCollectedHeap::heap()->n_par_threads() == 0,
1987          "Shouldn't be called if using parallel gc.");
1988   return _promoInfo.noPromotions();
1989 }
1990 
1991 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1992                                                                             \
1993 void CompactibleFreeListSpace::                                             \
1994 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1995   assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
1996          "Shouldn't be called (yet) during parallel part of gc.");          \
1997   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1998   /*                                                                        \
1999    * This also restores any displaced headers and removes the elements from \
2000    * the iteration set as they are processed, so that we have a clean slate \
2001    * at the end of the iteration. Note, thus, that if new objects are       \
2002    * promoted as a result of the iteration they are iterated over as well.  \
2003    */                                                                       \
2004   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2005 }
2006 
2007 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2008 
2009 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2010   return _smallLinearAllocBlock._word_size == 0;
2011 }
2012 
2013 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2014   // Fix up linear allocation blocks to look like free blocks
2015   repairLinearAllocBlock(&_smallLinearAllocBlock);
2016 }
2017 
2018 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2019   assert_locked();
2020   if (blk->_ptr != NULL) {
2021     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2022            "Minimum block size requirement");
2023     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2024     fc->set_size(blk->_word_size);
2025     fc->link_prev(NULL);   // mark as free
2026     fc->dontCoalesce();
2027     assert(fc->is_free(), "just marked it free");
2028     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2029   }
2030 }
2031 
2032 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2033   assert_locked();
2034   if (_smallLinearAllocBlock._ptr == NULL) {
2035     assert(_smallLinearAllocBlock._word_size == 0,
2036       "Size of linAB should be zero if the ptr is NULL");
2037     // Reset the linAB refill and allocation size limit.
2038     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2039   }
2040   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2041 }
2042 
2043 void
2044 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2045   assert_locked();
2046   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2047          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2048          "blk invariant");
2049   if (blk->_ptr == NULL) {
2050     refillLinearAllocBlock(blk);
2051   }
2052   if (PrintMiscellaneous && Verbose) {
2053     if (blk->_word_size == 0) {
2054       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2055     }
2056   }
2057 }
2058 
2059 void
2060 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2061   assert_locked();
2062   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2063          "linear allocation block should be empty");
2064   FreeChunk* fc;
2065   if (blk->_refillSize < SmallForDictionary &&
2066       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2067     // A linAB's strategy might be to use small sizes to reduce
2068     // fragmentation but still get the benefits of allocation from a
2069     // linAB.
2070   } else {
2071     fc = getChunkFromDictionary(blk->_refillSize);
2072   }
2073   if (fc != NULL) {
2074     blk->_ptr  = (HeapWord*)fc;
2075     blk->_word_size = fc->size();
2076     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2077   }
2078 }
2079 
2080 // Support for concurrent collection policy decisions.
2081 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2082   // In the future we might want to add in fragmentation stats --
2083   // including erosion of the "mountain" into this decision as well.
2084   return !adaptive_freelists() && linearAllocationWouldFail();
2085 }
2086 
2087 // Support for compaction
2088 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2089   scan_and_forward(this, cp);
2090   // Prepare_for_compaction() uses the space between live objects
2091   // so that later phase can skip dead space quickly.  So verification
2092   // of the free lists doesn't work after.
2093 }
2094 
2095 void CompactibleFreeListSpace::adjust_pointers() {
2096   // In other versions of adjust_pointers(), a bail out
2097   // based on the amount of live data in the generation
2098   // (i.e., if 0, bail out) may be used.
2099   // Cannot test used() == 0 here because the free lists have already
2100   // been mangled by the compaction.
2101 
2102   scan_and_adjust_pointers(this);
2103   // See note about verification in prepare_for_compaction().
2104 }
2105 
2106 void CompactibleFreeListSpace::compact() {
2107   scan_and_compact(this);
2108 }
2109 
2110 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2111 // where fbs is free block sizes
2112 double CompactibleFreeListSpace::flsFrag() const {
2113   size_t itabFree = totalSizeInIndexedFreeLists();
2114   double frag = 0.0;
2115   size_t i;
2116 
2117   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2118     double sz  = i;
2119     frag      += _indexedFreeList[i].count() * (sz * sz);
2120   }
2121 
2122   double totFree = itabFree +
2123                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2124   if (totFree > 0) {
2125     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2126             (totFree * totFree));
2127     frag = (double)1.0  - frag;
2128   } else {
2129     assert(frag == 0.0, "Follows from totFree == 0");
2130   }
2131   return frag;
2132 }
2133 
2134 void CompactibleFreeListSpace::beginSweepFLCensus(
2135   float inter_sweep_current,
2136   float inter_sweep_estimate,
2137   float intra_sweep_estimate) {
2138   assert_locked();
2139   size_t i;
2140   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2141     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2142     if (PrintFLSStatistics > 1) {
2143       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2144     }
2145     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2146     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2147     fl->set_before_sweep(fl->count());
2148     fl->set_bfr_surp(fl->surplus());
2149   }
2150   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2151                                     inter_sweep_current,
2152                                     inter_sweep_estimate,
2153                                     intra_sweep_estimate);
2154 }
2155 
2156 void CompactibleFreeListSpace::setFLSurplus() {
2157   assert_locked();
2158   size_t i;
2159   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2160     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2161     fl->set_surplus(fl->count() -
2162                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2163   }
2164 }
2165 
2166 void CompactibleFreeListSpace::setFLHints() {
2167   assert_locked();
2168   size_t i;
2169   size_t h = IndexSetSize;
2170   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2171     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2172     fl->set_hint(h);
2173     if (fl->surplus() > 0) {
2174       h = i;
2175     }
2176   }
2177 }
2178 
2179 void CompactibleFreeListSpace::clearFLCensus() {
2180   assert_locked();
2181   size_t i;
2182   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2183     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2184     fl->set_prev_sweep(fl->count());
2185     fl->set_coal_births(0);
2186     fl->set_coal_deaths(0);
2187     fl->set_split_births(0);
2188     fl->set_split_deaths(0);
2189   }
2190 }
2191 
2192 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2193   if (PrintFLSStatistics > 0) {
2194     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2195     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2196                            p2i(largestAddr));
2197   }
2198   setFLSurplus();
2199   setFLHints();
2200   if (PrintGC && PrintFLSCensus > 0) {
2201     printFLCensus(sweep_count);
2202   }
2203   clearFLCensus();
2204   assert_locked();
2205   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2206 }
2207 
2208 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2209   if (size < SmallForDictionary) {
2210     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2211     return (fl->coal_desired() < 0) ||
2212            ((int)fl->count() > fl->coal_desired());
2213   } else {
2214     return dictionary()->coal_dict_over_populated(size);
2215   }
2216 }
2217 
2218 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2219   assert(size < SmallForDictionary, "Size too large for indexed list");
2220   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2221   fl->increment_coal_births();
2222   fl->increment_surplus();
2223 }
2224 
2225 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2226   assert(size < SmallForDictionary, "Size too large for indexed list");
2227   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2228   fl->increment_coal_deaths();
2229   fl->decrement_surplus();
2230 }
2231 
2232 void CompactibleFreeListSpace::coalBirth(size_t size) {
2233   if (size  < SmallForDictionary) {
2234     smallCoalBirth(size);
2235   } else {
2236     dictionary()->dict_census_update(size,
2237                                    false /* split */,
2238                                    true /* birth */);
2239   }
2240 }
2241 
2242 void CompactibleFreeListSpace::coalDeath(size_t size) {
2243   if(size  < SmallForDictionary) {
2244     smallCoalDeath(size);
2245   } else {
2246     dictionary()->dict_census_update(size,
2247                                    false /* split */,
2248                                    false /* birth */);
2249   }
2250 }
2251 
2252 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2253   assert(size < SmallForDictionary, "Size too large for indexed list");
2254   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2255   fl->increment_split_births();
2256   fl->increment_surplus();
2257 }
2258 
2259 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2260   assert(size < SmallForDictionary, "Size too large for indexed list");
2261   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2262   fl->increment_split_deaths();
2263   fl->decrement_surplus();
2264 }
2265 
2266 void CompactibleFreeListSpace::split_birth(size_t size) {
2267   if (size  < SmallForDictionary) {
2268     smallSplitBirth(size);
2269   } else {
2270     dictionary()->dict_census_update(size,
2271                                    true /* split */,
2272                                    true /* birth */);
2273   }
2274 }
2275 
2276 void CompactibleFreeListSpace::splitDeath(size_t size) {
2277   if (size  < SmallForDictionary) {
2278     smallSplitDeath(size);
2279   } else {
2280     dictionary()->dict_census_update(size,
2281                                    true /* split */,
2282                                    false /* birth */);
2283   }
2284 }
2285 
2286 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2287   size_t to2 = from - to1;
2288   splitDeath(from);
2289   split_birth(to1);
2290   split_birth(to2);
2291 }
2292 
2293 void CompactibleFreeListSpace::print() const {
2294   print_on(tty);
2295 }
2296 
2297 void CompactibleFreeListSpace::prepare_for_verify() {
2298   assert_locked();
2299   repairLinearAllocationBlocks();
2300   // Verify that the SpoolBlocks look like free blocks of
2301   // appropriate sizes... To be done ...
2302 }
2303 
2304 class VerifyAllBlksClosure: public BlkClosure {
2305  private:
2306   const CompactibleFreeListSpace* _sp;
2307   const MemRegion                 _span;
2308   HeapWord*                       _last_addr;
2309   size_t                          _last_size;
2310   bool                            _last_was_obj;
2311   bool                            _last_was_live;
2312 
2313  public:
2314   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2315     MemRegion span) :  _sp(sp), _span(span),
2316                        _last_addr(NULL), _last_size(0),
2317                        _last_was_obj(false), _last_was_live(false) { }
2318 
2319   virtual size_t do_blk(HeapWord* addr) {
2320     size_t res;
2321     bool   was_obj  = false;
2322     bool   was_live = false;
2323     if (_sp->block_is_obj(addr)) {
2324       was_obj = true;
2325       oop p = oop(addr);
2326       guarantee(p->is_oop(), "Should be an oop");
2327       res = _sp->adjustObjectSize(p->size());
2328       if (_sp->obj_is_alive(addr)) {
2329         was_live = true;
2330         p->verify();
2331       }
2332     } else {
2333       FreeChunk* fc = (FreeChunk*)addr;
2334       res = fc->size();
2335       if (FLSVerifyLists && !fc->cantCoalesce()) {
2336         guarantee(_sp->verify_chunk_in_free_list(fc),
2337                   "Chunk should be on a free list");
2338       }
2339     }
2340     if (res == 0) {
2341       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2342       gclog_or_tty->print_cr(
2343         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2344         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2345         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2346         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2347       _sp->print_on(gclog_or_tty);
2348       guarantee(false, "Seppuku!");
2349     }
2350     _last_addr = addr;
2351     _last_size = res;
2352     _last_was_obj  = was_obj;
2353     _last_was_live = was_live;
2354     return res;
2355   }
2356 };
2357 
2358 class VerifyAllOopsClosure: public OopClosure {
2359  private:
2360   const CMSCollector*             _collector;
2361   const CompactibleFreeListSpace* _sp;
2362   const MemRegion                 _span;
2363   const bool                      _past_remark;
2364   const CMSBitMap*                _bit_map;
2365 
2366  protected:
2367   void do_oop(void* p, oop obj) {
2368     if (_span.contains(obj)) { // the interior oop points into CMS heap
2369       if (!_span.contains(p)) { // reference from outside CMS heap
2370         // Should be a valid object; the first disjunct below allows
2371         // us to sidestep an assertion in block_is_obj() that insists
2372         // that p be in _sp. Note that several generations (and spaces)
2373         // are spanned by _span (CMS heap) above.
2374         guarantee(!_sp->is_in_reserved(obj) ||
2375                   _sp->block_is_obj((HeapWord*)obj),
2376                   "Should be an object");
2377         guarantee(obj->is_oop(), "Should be an oop");
2378         obj->verify();
2379         if (_past_remark) {
2380           // Remark has been completed, the object should be marked
2381           _bit_map->isMarked((HeapWord*)obj);
2382         }
2383       } else { // reference within CMS heap
2384         if (_past_remark) {
2385           // Remark has been completed -- so the referent should have
2386           // been marked, if referring object is.
2387           if (_bit_map->isMarked(_collector->block_start(p))) {
2388             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2389           }
2390         }
2391       }
2392     } else if (_sp->is_in_reserved(p)) {
2393       // the reference is from FLS, and points out of FLS
2394       guarantee(obj->is_oop(), "Should be an oop");
2395       obj->verify();
2396     }
2397   }
2398 
2399   template <class T> void do_oop_work(T* p) {
2400     T heap_oop = oopDesc::load_heap_oop(p);
2401     if (!oopDesc::is_null(heap_oop)) {
2402       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2403       do_oop(p, obj);
2404     }
2405   }
2406 
2407  public:
2408   VerifyAllOopsClosure(const CMSCollector* collector,
2409     const CompactibleFreeListSpace* sp, MemRegion span,
2410     bool past_remark, CMSBitMap* bit_map) :
2411     _collector(collector), _sp(sp), _span(span),
2412     _past_remark(past_remark), _bit_map(bit_map) { }
2413 
2414   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2415   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2416 };
2417 
2418 void CompactibleFreeListSpace::verify() const {
2419   assert_lock_strong(&_freelistLock);
2420   verify_objects_initialized();
2421   MemRegion span = _collector->_span;
2422   bool past_remark = (_collector->abstract_state() ==
2423                       CMSCollector::Sweeping);
2424 
2425   ResourceMark rm;
2426   HandleMark  hm;
2427 
2428   // Check integrity of CFL data structures
2429   _promoInfo.verify();
2430   _dictionary->verify();
2431   if (FLSVerifyIndexTable) {
2432     verifyIndexedFreeLists();
2433   }
2434   // Check integrity of all objects and free blocks in space
2435   {
2436     VerifyAllBlksClosure cl(this, span);
2437     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2438   }
2439   // Check that all references in the heap to FLS
2440   // are to valid objects in FLS or that references in
2441   // FLS are to valid objects elsewhere in the heap
2442   if (FLSVerifyAllHeapReferences)
2443   {
2444     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2445       _collector->markBitMap());
2446 
2447     // Iterate over all oops in the heap. Uses the _no_header version
2448     // since we are not interested in following the klass pointers.
2449     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2450   }
2451 
2452   if (VerifyObjectStartArray) {
2453     // Verify the block offset table
2454     _bt.verify();
2455   }
2456 }
2457 
2458 #ifndef PRODUCT
2459 void CompactibleFreeListSpace::verifyFreeLists() const {
2460   if (FLSVerifyLists) {
2461     _dictionary->verify();
2462     verifyIndexedFreeLists();
2463   } else {
2464     if (FLSVerifyDictionary) {
2465       _dictionary->verify();
2466     }
2467     if (FLSVerifyIndexTable) {
2468       verifyIndexedFreeLists();
2469     }
2470   }
2471 }
2472 #endif
2473 
2474 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2475   size_t i = 0;
2476   for (; i < IndexSetStart; i++) {
2477     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2478   }
2479   for (; i < IndexSetSize; i++) {
2480     verifyIndexedFreeList(i);
2481   }
2482 }
2483 
2484 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2485   FreeChunk* fc   =  _indexedFreeList[size].head();
2486   FreeChunk* tail =  _indexedFreeList[size].tail();
2487   size_t    num = _indexedFreeList[size].count();
2488   size_t      n = 0;
2489   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2490             "Slot should have been empty");
2491   for (; fc != NULL; fc = fc->next(), n++) {
2492     guarantee(fc->size() == size, "Size inconsistency");
2493     guarantee(fc->is_free(), "!free?");
2494     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2495     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2496   }
2497   guarantee(n == num, "Incorrect count");
2498 }
2499 
2500 #ifndef PRODUCT
2501 void CompactibleFreeListSpace::check_free_list_consistency() const {
2502   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2503     "Some sizes can't be allocated without recourse to"
2504     " linear allocation buffers");
2505   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2506     "else MIN_TREE_CHUNK_SIZE is wrong");
2507   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2508   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2509 }
2510 #endif
2511 
2512 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2513   assert_lock_strong(&_freelistLock);
2514   AdaptiveFreeList<FreeChunk> total;
2515   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2516   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2517   size_t total_free = 0;
2518   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2519     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2520     total_free += fl->count() * fl->size();
2521     if (i % (40*IndexSetStride) == 0) {
2522       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2523     }
2524     fl->print_on(gclog_or_tty);
2525     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2526     total.set_surplus(    total.surplus()     + fl->surplus()    );
2527     total.set_desired(    total.desired()     + fl->desired()    );
2528     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2529     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2530     total.set_count(      total.count()       + fl->count()      );
2531     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2532     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2533     total.set_split_births(total.split_births() + fl->split_births());
2534     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2535   }
2536   total.print_on(gclog_or_tty, "TOTAL");
2537   gclog_or_tty->print_cr("Total free in indexed lists "
2538                          SIZE_FORMAT " words", total_free);
2539   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2540     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2541             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2542     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2543   _dictionary->print_dict_census();
2544 }
2545 
2546 ///////////////////////////////////////////////////////////////////////////
2547 // CFLS_LAB
2548 ///////////////////////////////////////////////////////////////////////////
2549 
2550 #define VECTOR_257(x)                                                                                  \
2551   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2552   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2559      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2560      x }
2561 
2562 // Initialize with default setting for CMS, _not_
2563 // generic OldPLABSize, whose static default is different; if overridden at the
2564 // command-line, this will get reinitialized via a call to
2565 // modify_initialization() below.
2566 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2567   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2568 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2569 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2570 
2571 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2572   _cfls(cfls)
2573 {
2574   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2575   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2576        i < CompactibleFreeListSpace::IndexSetSize;
2577        i += CompactibleFreeListSpace::IndexSetStride) {
2578     _indexedFreeList[i].set_size(i);
2579     _num_blocks[i] = 0;
2580   }
2581 }
2582 
2583 static bool _CFLS_LAB_modified = false;
2584 
2585 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2586   assert(!_CFLS_LAB_modified, "Call only once");
2587   _CFLS_LAB_modified = true;
2588   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2589        i < CompactibleFreeListSpace::IndexSetSize;
2590        i += CompactibleFreeListSpace::IndexSetStride) {
2591     _blocks_to_claim[i].modify(n, wt, true /* force */);
2592   }
2593 }
2594 
2595 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2596   FreeChunk* res;
2597   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2598   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2599     // This locking manages sync with other large object allocations.
2600     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2601                     Mutex::_no_safepoint_check_flag);
2602     res = _cfls->getChunkFromDictionaryExact(word_sz);
2603     if (res == NULL) return NULL;
2604   } else {
2605     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2606     if (fl->count() == 0) {
2607       // Attempt to refill this local free list.
2608       get_from_global_pool(word_sz, fl);
2609       // If it didn't work, give up.
2610       if (fl->count() == 0) return NULL;
2611     }
2612     res = fl->get_chunk_at_head();
2613     assert(res != NULL, "Why was count non-zero?");
2614   }
2615   res->markNotFree();
2616   assert(!res->is_free(), "shouldn't be marked free");
2617   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2618   // mangle a just allocated object with a distinct pattern.
2619   debug_only(res->mangleAllocated(word_sz));
2620   return (HeapWord*)res;
2621 }
2622 
2623 // Get a chunk of blocks of the right size and update related
2624 // book-keeping stats
2625 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2626   // Get the #blocks we want to claim
2627   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2628   assert(n_blks > 0, "Error");
2629   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2630   // In some cases, when the application has a phase change,
2631   // there may be a sudden and sharp shift in the object survival
2632   // profile, and updating the counts at the end of a scavenge
2633   // may not be quick enough, giving rise to large scavenge pauses
2634   // during these phase changes. It is beneficial to detect such
2635   // changes on-the-fly during a scavenge and avoid such a phase-change
2636   // pothole. The following code is a heuristic attempt to do that.
2637   // It is protected by a product flag until we have gained
2638   // enough experience with this heuristic and fine-tuned its behavior.
2639   // WARNING: This might increase fragmentation if we overreact to
2640   // small spikes, so some kind of historical smoothing based on
2641   // previous experience with the greater reactivity might be useful.
2642   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2643   // default.
2644   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2645     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2646     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2647     n_blks = MIN2(n_blks, CMSOldPLABMax);
2648   }
2649   assert(n_blks > 0, "Error");
2650   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2651   // Update stats table entry for this block size
2652   _num_blocks[word_sz] += fl->count();
2653 }
2654 
2655 void CFLS_LAB::compute_desired_plab_size() {
2656   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2657        i < CompactibleFreeListSpace::IndexSetSize;
2658        i += CompactibleFreeListSpace::IndexSetStride) {
2659     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2660            "Counter inconsistency");
2661     if (_global_num_workers[i] > 0) {
2662       // Need to smooth wrt historical average
2663       if (ResizeOldPLAB) {
2664         _blocks_to_claim[i].sample(
2665           MAX2(CMSOldPLABMin,
2666           MIN2(CMSOldPLABMax,
2667                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2668       }
2669       // Reset counters for next round
2670       _global_num_workers[i] = 0;
2671       _global_num_blocks[i] = 0;
2672       if (PrintOldPLAB) {
2673         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2674                                i, (size_t)_blocks_to_claim[i].average());
2675       }
2676     }
2677   }
2678 }
2679 
2680 // If this is changed in the future to allow parallel
2681 // access, one would need to take the FL locks and,
2682 // depending on how it is used, stagger access from
2683 // parallel threads to reduce contention.
2684 void CFLS_LAB::retire(int tid) {
2685   // We run this single threaded with the world stopped;
2686   // so no need for locks and such.
2687   NOT_PRODUCT(Thread* t = Thread::current();)
2688   assert(Thread::current()->is_VM_thread(), "Error");
2689   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2690        i < CompactibleFreeListSpace::IndexSetSize;
2691        i += CompactibleFreeListSpace::IndexSetStride) {
2692     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2693            "Can't retire more than what we obtained");
2694     if (_num_blocks[i] > 0) {
2695       size_t num_retire =  _indexedFreeList[i].count();
2696       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2697       {
2698         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2699         //                Mutex::_no_safepoint_check_flag);
2700 
2701         // Update globals stats for num_blocks used
2702         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2703         _global_num_workers[i]++;
2704         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2705         if (num_retire > 0) {
2706           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2707           // Reset this list.
2708           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2709           _indexedFreeList[i].set_size(i);
2710         }
2711       }
2712       if (PrintOldPLAB) {
2713         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2714                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2715       }
2716       // Reset stats for next round
2717       _num_blocks[i]         = 0;
2718     }
2719   }
2720 }
2721 
2722 // Used by par_get_chunk_of_blocks() for the chunks from the
2723 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2724 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2725 // to the free list "fl".  "n" is the maximum number of chunks to
2726 // be added to "fl".
2727 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2728 
2729   // We'll try all multiples of word_sz in the indexed set, starting with
2730   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2731   // then try getting a big chunk and splitting it.
2732   {
2733     bool found;
2734     int  k;
2735     size_t cur_sz;
2736     for (k = 1, cur_sz = k * word_sz, found = false;
2737          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2738          (CMSSplitIndexedFreeListBlocks || k <= 1);
2739          k++, cur_sz = k * word_sz) {
2740       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2741       fl_for_cur_sz.set_size(cur_sz);
2742       {
2743         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2744                         Mutex::_no_safepoint_check_flag);
2745         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2746         if (gfl->count() != 0) {
2747           // nn is the number of chunks of size cur_sz that
2748           // we'd need to split k-ways each, in order to create
2749           // "n" chunks of size word_sz each.
2750           const size_t nn = MAX2(n/k, (size_t)1);
2751           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2752           found = true;
2753           if (k > 1) {
2754             // Update split death stats for the cur_sz-size blocks list:
2755             // we increment the split death count by the number of blocks
2756             // we just took from the cur_sz-size blocks list and which
2757             // we will be splitting below.
2758             ssize_t deaths = gfl->split_deaths() +
2759                              fl_for_cur_sz.count();
2760             gfl->set_split_deaths(deaths);
2761           }
2762         }
2763       }
2764       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2765       if (found) {
2766         if (k == 1) {
2767           fl->prepend(&fl_for_cur_sz);
2768         } else {
2769           // Divide each block on fl_for_cur_sz up k ways.
2770           FreeChunk* fc;
2771           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2772             // Must do this in reverse order, so that anybody attempting to
2773             // access the main chunk sees it as a single free block until we
2774             // change it.
2775             size_t fc_size = fc->size();
2776             assert(fc->is_free(), "Error");
2777             for (int i = k-1; i >= 0; i--) {
2778               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2779               assert((i != 0) ||
2780                         ((fc == ffc) && ffc->is_free() &&
2781                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2782                         "Counting error");
2783               ffc->set_size(word_sz);
2784               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2785               ffc->link_next(NULL);
2786               // Above must occur before BOT is updated below.
2787               OrderAccess::storestore();
2788               // splitting from the right, fc_size == i * word_sz
2789               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2790               fc_size -= word_sz;
2791               assert(fc_size == i*word_sz, "Error");
2792               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2793               _bt.verify_single_block((HeapWord*)fc, fc_size);
2794               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2795               // Push this on "fl".
2796               fl->return_chunk_at_head(ffc);
2797             }
2798             // TRAP
2799             assert(fl->tail()->next() == NULL, "List invariant.");
2800           }
2801         }
2802         // Update birth stats for this block size.
2803         size_t num = fl->count();
2804         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2805                         Mutex::_no_safepoint_check_flag);
2806         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2807         _indexedFreeList[word_sz].set_split_births(births);
2808         return true;
2809       }
2810     }
2811     return found;
2812   }
2813 }
2814 
2815 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2816 
2817   FreeChunk* fc = NULL;
2818   FreeChunk* rem_fc = NULL;
2819   size_t rem;
2820   {
2821     MutexLockerEx x(parDictionaryAllocLock(),
2822                     Mutex::_no_safepoint_check_flag);
2823     while (n > 0) {
2824       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2825                                   FreeBlockDictionary<FreeChunk>::atLeast);
2826       if (fc != NULL) {
2827         break;
2828       } else {
2829         n--;
2830       }
2831     }
2832     if (fc == NULL) return NULL;
2833     // Otherwise, split up that block.
2834     assert((ssize_t)n >= 1, "Control point invariant");
2835     assert(fc->is_free(), "Error: should be a free block");
2836     _bt.verify_single_block((HeapWord*)fc, fc->size());
2837     const size_t nn = fc->size() / word_sz;
2838     n = MIN2(nn, n);
2839     assert((ssize_t)n >= 1, "Control point invariant");
2840     rem = fc->size() - n * word_sz;
2841     // If there is a remainder, and it's too small, allocate one fewer.
2842     if (rem > 0 && rem < MinChunkSize) {
2843       n--; rem += word_sz;
2844     }
2845     // Note that at this point we may have n == 0.
2846     assert((ssize_t)n >= 0, "Control point invariant");
2847 
2848     // If n is 0, the chunk fc that was found is not large
2849     // enough to leave a viable remainder.  We are unable to
2850     // allocate even one block.  Return fc to the
2851     // dictionary and return, leaving "fl" empty.
2852     if (n == 0) {
2853       returnChunkToDictionary(fc);
2854       return NULL;
2855     }
2856 
2857     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2858     dictionary()->dict_census_update(fc->size(),
2859                                      true /*split*/,
2860                                      false /*birth*/);
2861 
2862     // First return the remainder, if any.
2863     // Note that we hold the lock until we decide if we're going to give
2864     // back the remainder to the dictionary, since a concurrent allocation
2865     // may otherwise see the heap as empty.  (We're willing to take that
2866     // hit if the block is a small block.)
2867     if (rem > 0) {
2868       size_t prefix_size = n * word_sz;
2869       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2870       rem_fc->set_size(rem);
2871       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2872       rem_fc->link_next(NULL);
2873       // Above must occur before BOT is updated below.
2874       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2875       OrderAccess::storestore();
2876       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2877       assert(fc->is_free(), "Error");
2878       fc->set_size(prefix_size);
2879       if (rem >= IndexSetSize) {
2880         returnChunkToDictionary(rem_fc);
2881         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2882         rem_fc = NULL;
2883       }
2884       // Otherwise, return it to the small list below.
2885     }
2886   }
2887   if (rem_fc != NULL) {
2888     MutexLockerEx x(_indexedFreeListParLocks[rem],
2889                     Mutex::_no_safepoint_check_flag);
2890     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2891     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2892     smallSplitBirth(rem);
2893   }
2894   assert(n * word_sz == fc->size(),
2895     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2896     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2897     fc->size(), n, word_sz));
2898   return fc;
2899 }
2900 
2901 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2902 
2903   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2904 
2905   if (fc == NULL) {
2906     return;
2907   }
2908 
2909   size_t n = fc->size() / word_sz;
2910 
2911   assert((ssize_t)n > 0, "Consistency");
2912   // Now do the splitting up.
2913   // Must do this in reverse order, so that anybody attempting to
2914   // access the main chunk sees it as a single free block until we
2915   // change it.
2916   size_t fc_size = n * word_sz;
2917   // All but first chunk in this loop
2918   for (ssize_t i = n-1; i > 0; i--) {
2919     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2920     ffc->set_size(word_sz);
2921     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2922     ffc->link_next(NULL);
2923     // Above must occur before BOT is updated below.
2924     OrderAccess::storestore();
2925     // splitting from the right, fc_size == (n - i + 1) * wordsize
2926     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2927     fc_size -= word_sz;
2928     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2929     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2930     _bt.verify_single_block((HeapWord*)fc, fc_size);
2931     // Push this on "fl".
2932     fl->return_chunk_at_head(ffc);
2933   }
2934   // First chunk
2935   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2936   // The blocks above should show their new sizes before the first block below
2937   fc->set_size(word_sz);
2938   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2939   fc->link_next(NULL);
2940   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2941   _bt.verify_single_block((HeapWord*)fc, fc->size());
2942   fl->return_chunk_at_head(fc);
2943 
2944   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2945   {
2946     // Update the stats for this block size.
2947     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2948                     Mutex::_no_safepoint_check_flag);
2949     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2950     _indexedFreeList[word_sz].set_split_births(births);
2951     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2952     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2953   }
2954 
2955   // TRAP
2956   assert(fl->tail()->next() == NULL, "List invariant.");
2957 }
2958 
2959 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2960   assert(fl->count() == 0, "Precondition.");
2961   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2962          "Precondition");
2963 
2964   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2965     // Got it
2966     return;
2967   }
2968 
2969   // Otherwise, we'll split a block from the dictionary.
2970   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2971 }
2972 
2973 // Set up the space's par_seq_tasks structure for work claiming
2974 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2975 // XXX Need to suitably abstract and generalize this and the next
2976 // method into one.
2977 void
2978 CompactibleFreeListSpace::
2979 initialize_sequential_subtasks_for_rescan(int n_threads) {
2980   // The "size" of each task is fixed according to rescan_task_size.
2981   assert(n_threads > 0, "Unexpected n_threads argument");
2982   const size_t task_size = rescan_task_size();
2983   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2984   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2985   assert(n_tasks == 0 ||
2986          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2987           (used_region().start() + n_tasks*task_size >= used_region().end())),
2988          "n_tasks calculation incorrect");
2989   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2990   assert(!pst->valid(), "Clobbering existing data?");
2991   // Sets the condition for completion of the subtask (how many threads
2992   // need to finish in order to be done).
2993   pst->set_n_threads(n_threads);
2994   pst->set_n_tasks((int)n_tasks);
2995 }
2996 
2997 // Set up the space's par_seq_tasks structure for work claiming
2998 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2999 void
3000 CompactibleFreeListSpace::
3001 initialize_sequential_subtasks_for_marking(int n_threads,
3002                                            HeapWord* low) {
3003   // The "size" of each task is fixed according to rescan_task_size.
3004   assert(n_threads > 0, "Unexpected n_threads argument");
3005   const size_t task_size = marking_task_size();
3006   assert(task_size > CardTableModRefBS::card_size_in_words &&
3007          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3008          "Otherwise arithmetic below would be incorrect");
3009   MemRegion span = _gen->reserved();
3010   if (low != NULL) {
3011     if (span.contains(low)) {
3012       // Align low down to  a card boundary so that
3013       // we can use block_offset_careful() on span boundaries.
3014       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3015                                  CardTableModRefBS::card_size);
3016       // Clip span prefix at aligned_low
3017       span = span.intersection(MemRegion(aligned_low, span.end()));
3018     } else if (low > span.end()) {
3019       span = MemRegion(low, low);  // Null region
3020     } // else use entire span
3021   }
3022   assert(span.is_empty() ||
3023          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3024         "span should start at a card boundary");
3025   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3026   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3027   assert(n_tasks == 0 ||
3028          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3029           (span.start() + n_tasks*task_size >= span.end())),
3030          "n_tasks calculation incorrect");
3031   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3032   assert(!pst->valid(), "Clobbering existing data?");
3033   // Sets the condition for completion of the subtask (how many threads
3034   // need to finish in order to be done).
3035   pst->set_n_threads(n_threads);
3036   pst->set_n_tasks((int)n_tasks);
3037 }