1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/liveRange.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  77   MemRegion mr, bool use_adaptive_freelists,
  78   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  79   _dictionaryChoice(dictionaryChoice),
  80   _adaptive_freelists(use_adaptive_freelists),
  81   _bt(bs, mr),
  82   // free list locks are in the range of values taken by _lockRank
  83   // This range currently is [_leaf+2, _leaf+3]
  84   // Note: this requires that CFLspace c'tors
  85   // are called serially in the order in which the locks are
  86   // are acquired in the program text. This is true today.
  87   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  88                 Monitor::_safepoint_check_sometimes),
  89   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  90                           "CompactibleFreeListSpace._dict_par_lock", true,
  91                           Monitor::_safepoint_check_never),
  92   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  93                     CMSRescanMultiple),
  94   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  95                     CMSConcMarkMultiple),
  96   _collector(NULL),
  97   _preconsumptionDirtyCardClosure(NULL)
  98 {
  99   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 100          "FreeChunk is larger than expected");
 101   _bt.set_space(this);
 102   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 103   // We have all of "mr", all of which we place in the dictionary
 104   // as one big chunk. We'll need to decide here which of several
 105   // possible alternative dictionary implementations to use. For
 106   // now the choice is easy, since we have only one working
 107   // implementation, namely, the simple binary tree (splaying
 108   // temporarily disabled).
 109   switch (dictionaryChoice) {
 110     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 111       _dictionary = new AFLBinaryTreeDictionary(mr);
 112       break;
 113     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 114     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 115     default:
 116       warning("dictionaryChoice: selected option not understood; using"
 117               " default BinaryTreeDictionary implementation instead.");
 118   }
 119   assert(_dictionary != NULL, "CMS dictionary initialization");
 120   // The indexed free lists are initially all empty and are lazily
 121   // filled in on demand. Initialize the array elements to NULL.
 122   initializeIndexedFreeListArray();
 123 
 124   // Not using adaptive free lists assumes that allocation is first
 125   // from the linAB's.  Also a cms perm gen which can be compacted
 126   // has to have the klass's klassKlass allocated at a lower
 127   // address in the heap than the klass so that the klassKlass is
 128   // moved to its new location before the klass is moved.
 129   // Set the _refillSize for the linear allocation blocks
 130   if (!use_adaptive_freelists) {
 131     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 132                                            FreeBlockDictionary<FreeChunk>::atLeast);
 133     // The small linAB initially has all the space and will allocate
 134     // a chunk of any size.
 135     HeapWord* addr = (HeapWord*) fc;
 136     _smallLinearAllocBlock.set(addr, fc->size() ,
 137       1024*SmallForLinearAlloc, fc->size());
 138     // Note that _unallocated_block is not updated here.
 139     // Allocations from the linear allocation block should
 140     // update it.
 141   } else {
 142     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 143                                SmallForLinearAlloc);
 144   }
 145   // CMSIndexedFreeListReplenish should be at least 1
 146   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 147   _promoInfo.setSpace(this);
 148   if (UseCMSBestFit) {
 149     _fitStrategy = FreeBlockBestFitFirst;
 150   } else {
 151     _fitStrategy = FreeBlockStrategyNone;
 152   }
 153   check_free_list_consistency();
 154 
 155   // Initialize locks for parallel case.
 156   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 157     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 158                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 159     DEBUG_ONLY(
 160       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 161     )
 162   }
 163   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->young_gen();
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288     coalBirth(mr.word_size());
 289   }
 290   _promoInfo.reset();
 291   _smallLinearAllocBlock._ptr = NULL;
 292   _smallLinearAllocBlock._word_size = 0;
 293 }
 294 
 295 void CompactibleFreeListSpace::reset_after_compaction() {
 296   // Reset the space to the new reality - one free chunk.
 297   MemRegion mr(compaction_top(), end());
 298   reset(mr);
 299   // Now refill the linear allocation block(s) if possible.
 300   if (_adaptive_freelists) {
 301     refillLinearAllocBlocksIfNeeded();
 302   } else {
 303     // Place as much of mr in the linAB as we can get,
 304     // provided it was big enough to go into the dictionary.
 305     FreeChunk* fc = dictionary()->find_largest_dict();
 306     if (fc != NULL) {
 307       assert(fc->size() == mr.word_size(),
 308              "Why was the chunk broken up?");
 309       removeChunkFromDictionary(fc);
 310       HeapWord* addr = (HeapWord*) fc;
 311       _smallLinearAllocBlock.set(addr, fc->size() ,
 312         1024*SmallForLinearAlloc, fc->size());
 313       // Note that _unallocated_block is not updated here.
 314     }
 315   }
 316 }
 317 
 318 // Walks the entire dictionary, returning a coterminal
 319 // chunk, if it exists. Use with caution since it involves
 320 // a potentially complete walk of a potentially large tree.
 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 322 
 323   assert_lock_strong(&_freelistLock);
 324 
 325   return dictionary()->find_chunk_ends_at(end());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 333   }
 334 }
 335 
 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 337   size_t sum = 0;
 338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 340   }
 341   return sum;
 342 }
 343 
 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 345   size_t count = 0;
 346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 347     debug_only(
 348       ssize_t total_list_count = 0;
 349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 350          fc = fc->next()) {
 351         total_list_count++;
 352       }
 353       assert(total_list_count ==  _indexedFreeList[i].count(),
 354         "Count in list is incorrect");
 355     )
 356     count += _indexedFreeList[i].count();
 357   }
 358   return count;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::totalCount() {
 362   size_t num = totalCountInIndexedFreeLists();
 363   num +=  dictionary()->total_count();
 364   if (_smallLinearAllocBlock._word_size != 0) {
 365     num++;
 366   }
 367   return num;
 368 }
 369 #endif
 370 
 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 372   FreeChunk* fc = (FreeChunk*) p;
 373   return fc->is_free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::used() const {
 377   return capacity() - free();
 378 }
 379 
 380 size_t CompactibleFreeListSpace::free() const {
 381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 382   // if you do this while the structures are in flux you
 383   // may get an approximate answer only; for instance
 384   // because there is concurrent allocation either
 385   // directly by mutators or for promotion during a GC.
 386   // It's "MT-safe", however, in the sense that you are guaranteed
 387   // not to crash and burn, for instance, because of walking
 388   // pointers that could disappear as you were walking them.
 389   // The approximation is because the various components
 390   // that are read below are not read atomically (and
 391   // further the computation of totalSizeInIndexedFreeLists()
 392   // is itself a non-atomic computation. The normal use of
 393   // this is during a resize operation at the end of GC
 394   // and at that time you are guaranteed to get the
 395   // correct actual value. However, for instance, this is
 396   // also read completely asynchronously by the "perf-sampler"
 397   // that supports jvmstat, and you are apt to see the values
 398   // flicker in such cases.
 399   assert(_dictionary != NULL, "No _dictionary?");
 400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 401           totalSizeInIndexedFreeLists() +
 402           _smallLinearAllocBlock._word_size) * HeapWordSize;
 403 }
 404 
 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 406   assert(_dictionary != NULL, "No _dictionary?");
 407   assert_locked();
 408   size_t res = _dictionary->max_chunk_size();
 409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 410                        (size_t) SmallForLinearAlloc - 1));
 411   // XXX the following could potentially be pretty slow;
 412   // should one, pessimistically for the rare cases when res
 413   // calculated above is less than IndexSetSize,
 414   // just return res calculated above? My reasoning was that
 415   // those cases will be so rare that the extra time spent doesn't
 416   // really matter....
 417   // Note: do not change the loop test i >= res + IndexSetStride
 418   // to i > res below, because i is unsigned and res may be zero.
 419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 420        i -= IndexSetStride) {
 421     if (_indexedFreeList[i].head() != NULL) {
 422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 423       return i;
 424     }
 425   }
 426   return res;
 427 }
 428 
 429 void LinearAllocBlock::print_on(outputStream* st) const {
 430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 433 }
 434 
 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 436   st->print_cr("COMPACTIBLE FREELIST SPACE");
 437   st->print_cr(" Space:");
 438   Space::print_on(st);
 439 
 440   st->print_cr("promoInfo:");
 441   _promoInfo.print_on(st);
 442 
 443   st->print_cr("_smallLinearAllocBlock");
 444   _smallLinearAllocBlock.print_on(st);
 445 
 446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 447 
 448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 450 }
 451 
 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 453 const {
 454   reportIndexedFreeListStatistics();
 455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 456   gclog_or_tty->print_cr("---------------------------");
 457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 459     _indexedFreeList[i].print_on(gclog_or_tty);
 460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 461          fc = fc->next()) {
 462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 463                           p2i(fc), p2i((HeapWord*)fc + i),
 464                           fc->cantCoalesce() ? "\t CC" : "");
 465     }
 466   }
 467 }
 468 
 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 470 const {
 471   _promoInfo.print_on(st);
 472 }
 473 
 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 475 const {
 476   _dictionary->report_statistics();
 477   st->print_cr("Layout of Freelists in Tree");
 478   st->print_cr("---------------------------");
 479   _dictionary->print_free_lists(st);
 480 }
 481 
 482 class BlkPrintingClosure: public BlkClosure {
 483   const CMSCollector*             _collector;
 484   const CompactibleFreeListSpace* _sp;
 485   const CMSBitMap*                _live_bit_map;
 486   const bool                      _post_remark;
 487   outputStream*                   _st;
 488 public:
 489   BlkPrintingClosure(const CMSCollector* collector,
 490                      const CompactibleFreeListSpace* sp,
 491                      const CMSBitMap* live_bit_map,
 492                      outputStream* st):
 493     _collector(collector),
 494     _sp(sp),
 495     _live_bit_map(live_bit_map),
 496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 497     _st(st) { }
 498   size_t do_blk(HeapWord* addr);
 499 };
 500 
 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 502   size_t sz = _sp->block_size_no_stall(addr, _collector);
 503   assert(sz != 0, "Should always be able to compute a size");
 504   if (_sp->block_is_obj(addr)) {
 505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 507       p2i(addr),
 508       dead ? "dead" : "live",
 509       sz,
 510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 511     if (CMSPrintObjectsInDump && !dead) {
 512       oop(addr)->print_on(_st);
 513       _st->print_cr("--------------------------------------");
 514     }
 515   } else { // free block
 516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 518     if (CMSPrintChunksInDump) {
 519       ((FreeChunk*)addr)->print_on(_st);
 520       _st->print_cr("--------------------------------------");
 521     }
 522   }
 523   return sz;
 524 }
 525 
 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 527   outputStream* st) {
 528   st->print_cr("\n=========================");
 529   st->print_cr("Block layout in CMS Heap:");
 530   st->print_cr("=========================");
 531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 532   blk_iterate(&bpcl);
 533 
 534   st->print_cr("\n=======================================");
 535   st->print_cr("Order & Layout of Promotion Info Blocks");
 536   st->print_cr("=======================================");
 537   print_promo_info_blocks(st);
 538 
 539   st->print_cr("\n===========================");
 540   st->print_cr("Order of Indexed Free Lists");
 541   st->print_cr("=========================");
 542   print_indexed_free_lists(st);
 543 
 544   st->print_cr("\n=================================");
 545   st->print_cr("Order of Free Lists in Dictionary");
 546   st->print_cr("=================================");
 547   print_dictionary_free_lists(st);
 548 }
 549 
 550 
 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 552   assert_lock_strong(&_freelistLock);
 553   assert(PrintFLSStatistics != 0, "Reporting error");
 554   _dictionary->report_statistics();
 555   if (PrintFLSStatistics > 1) {
 556     reportIndexedFreeListStatistics();
 557     size_t total_size = totalSizeInIndexedFreeLists() +
 558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 560   }
 561 }
 562 
 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 564   assert_lock_strong(&_freelistLock);
 565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 566                       "--------------------------------\n");
 567   size_t total_size = totalSizeInIndexedFreeLists();
 568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 572   if (free_blocks != 0) {
 573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 574   }
 575 }
 576 
 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 578   size_t res = 0;
 579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 580     debug_only(
 581       ssize_t recount = 0;
 582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 583          fc = fc->next()) {
 584         recount += 1;
 585       }
 586       assert(recount == _indexedFreeList[i].count(),
 587         "Incorrect count in list");
 588     )
 589     res += _indexedFreeList[i].count();
 590   }
 591   return res;
 592 }
 593 
 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 596     if (_indexedFreeList[i].head() != NULL) {
 597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 598       return (size_t)i;
 599     }
 600   }
 601   return 0;
 602 }
 603 
 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 605   HeapWord* prevEnd = end();
 606   assert(prevEnd != value, "unnecessary set_end call");
 607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 608         "New end is below unallocated block");
 609   _end = value;
 610   if (prevEnd != NULL) {
 611     // Resize the underlying block offset table.
 612     _bt.resize(pointer_delta(value, bottom()));
 613     if (value <= prevEnd) {
 614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 615              "New end is below unallocated block");
 616     } else {
 617       // Now, take this new chunk and add it to the free blocks.
 618       // Note that the BOT has not yet been updated for this block.
 619       size_t newFcSize = pointer_delta(value, prevEnd);
 620       // XXX This is REALLY UGLY and should be fixed up. XXX
 621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 622         // Mark the boundary of the new block in BOT
 623         _bt.mark_block(prevEnd, value);
 624         // put it all in the linAB
 625         MutexLockerEx x(parDictionaryAllocLock(),
 626                         Mutex::_no_safepoint_check_flag);
 627         _smallLinearAllocBlock._ptr = prevEnd;
 628         _smallLinearAllocBlock._word_size = newFcSize;
 629         repairLinearAllocBlock(&_smallLinearAllocBlock);
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;
 644   bool _parallel;
 645 protected:
 646   // Override.
 647 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 648   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 649                                        HeapWord* bottom, HeapWord* top, \
 650                                        ClosureType* cl);                \
 651       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 652                                        HeapWord* bottom, HeapWord* top, \
 653                                        ClosureType* cl);                \
 654     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 655                                        HeapWord* bottom, HeapWord* top, \
 656                                        ClosureType* cl)
 657   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 658   walk_mem_region_with_cl_DECL(FilteringClosure);
 659 
 660 public:
 661   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 662                       CMSCollector* collector,
 663                       ExtendedOopClosure* cl,
 664                       CardTableModRefBS::PrecisionStyle precision,
 665                       HeapWord* boundary,
 666                       bool parallel) :
 667     Filtering_DCTOC(sp, cl, precision, boundary),
 668     _cfls(sp), _collector(collector), _parallel(parallel) {}
 669 };
 670 
 671 // We de-virtualize the block-related calls below, since we know that our
 672 // space is a CompactibleFreeListSpace.
 673 
 674 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 675 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 676                                                  HeapWord* bottom,              \
 677                                                  HeapWord* top,                 \
 678                                                  ClosureType* cl) {             \
 679    if (_parallel) {                                                             \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \
 700   while (bottom < top) {                                                        \
 701     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 702         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 703                     oop(bottom)) &&                                             \
 704         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 705       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 706       bottom += _cfls->adjustObjectSize(word_sz);                               \
 707     } else {                                                                    \
 708       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 709     }                                                                           \
 710   }                                                                             \
 711 }                                                                               \
 712 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 713                                                         HeapWord* bottom,       \
 714                                                         HeapWord* top,          \
 715                                                         ClosureType* cl) {      \
 716   /* Skip parts that are before "mr", in case "block_start" sent us             \
 717      back too far. */                                                           \
 718   HeapWord* mr_start = mr.start();                                              \
 719   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 720   HeapWord* next = bottom + bot_size;                                           \
 721   while (next < mr_start) {                                                     \
 722     bottom = next;                                                              \
 723     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 724     next = bottom + bot_size;                                                   \
 725   }                                                                             \
 726                                                                                 \
 727   while (bottom < top) {                                                        \
 728     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 729         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 730                     oop(bottom)) &&                                             \
 731         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 732       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 733       bottom += _cfls->adjustObjectSize(word_sz);                               \
 734     } else {                                                                    \
 735       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 736     }                                                                           \
 737   }                                                                             \
 738 }
 739 
 740 // (There are only two of these, rather than N, because the split is due
 741 // only to the introduction of the FilteringClosure, a local part of the
 742 // impl of this abstraction.)
 743 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 745 
 746 DirtyCardToOopClosure*
 747 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 748                                       CardTableModRefBS::PrecisionStyle precision,
 749                                       HeapWord* boundary,
 750                                       bool parallel) {
 751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
 752 }
 753 
 754 
 755 // Note on locking for the space iteration functions:
 756 // since the collector's iteration activities are concurrent with
 757 // allocation activities by mutators, absent a suitable mutual exclusion
 758 // mechanism the iterators may go awry. For instance a block being iterated
 759 // may suddenly be allocated or divided up and part of it allocated and
 760 // so on.
 761 
 762 // Apply the given closure to each block in the space.
 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 764   assert_lock_strong(freelistLock());
 765   HeapWord *cur, *limit;
 766   for (cur = bottom(), limit = end(); cur < limit;
 767        cur += cl->do_blk_careful(cur));
 768 }
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 772   assert_lock_strong(freelistLock());
 773   HeapWord *cur, *limit;
 774   for (cur = bottom(), limit = end(); cur < limit;
 775        cur += cl->do_blk(cur));
 776 }
 777 
 778 // Apply the given closure to each oop in the space.
 779 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 780   assert_lock_strong(freelistLock());
 781   HeapWord *cur, *limit;
 782   size_t curSize;
 783   for (cur = bottom(), limit = end(); cur < limit;
 784        cur += curSize) {
 785     curSize = block_size(cur);
 786     if (block_is_obj(cur)) {
 787       oop(cur)->oop_iterate(cl);
 788     }
 789   }
 790 }
 791 
 792 // NOTE: In the following methods, in order to safely be able to
 793 // apply the closure to an object, we need to be sure that the
 794 // object has been initialized. We are guaranteed that an object
 795 // is initialized if we are holding the Heap_lock with the
 796 // world stopped.
 797 void CompactibleFreeListSpace::verify_objects_initialized() const {
 798   if (is_init_completed()) {
 799     assert_locked_or_safepoint(Heap_lock);
 800     if (Universe::is_fully_initialized()) {
 801       guarantee(SafepointSynchronize::is_at_safepoint(),
 802                 "Required for objects to be initialized");
 803     }
 804   } // else make a concession at vm start-up
 805 }
 806 
 807 // Apply the given closure to each object in the space
 808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 809   assert_lock_strong(freelistLock());
 810   NOT_PRODUCT(verify_objects_initialized());
 811   HeapWord *cur, *limit;
 812   size_t curSize;
 813   for (cur = bottom(), limit = end(); cur < limit;
 814        cur += curSize) {
 815     curSize = block_size(cur);
 816     if (block_is_obj(cur)) {
 817       blk->do_object(oop(cur));
 818     }
 819   }
 820 }
 821 
 822 // Apply the given closure to each live object in the space
 823 //   The usage of CompactibleFreeListSpace
 824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 825 // objects in the space with references to objects that are no longer
 826 // valid.  For example, an object may reference another object
 827 // that has already been sweep up (collected).  This method uses
 828 // obj_is_alive() to determine whether it is safe to apply the closure to
 829 // an object.  See obj_is_alive() for details on how liveness of an
 830 // object is decided.
 831 
 832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 833   assert_lock_strong(freelistLock());
 834   NOT_PRODUCT(verify_objects_initialized());
 835   HeapWord *cur, *limit;
 836   size_t curSize;
 837   for (cur = bottom(), limit = end(); cur < limit;
 838        cur += curSize) {
 839     curSize = block_size(cur);
 840     if (block_is_obj(cur) && obj_is_alive(cur)) {
 841       blk->do_object(oop(cur));
 842     }
 843   }
 844 }
 845 
 846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 847                                                   UpwardsObjectClosure* cl) {
 848   assert_locked(freelistLock());
 849   NOT_PRODUCT(verify_objects_initialized());
 850   assert(!mr.is_empty(), "Should be non-empty");
 851   // We use MemRegion(bottom(), end()) rather than used_region() below
 852   // because the two are not necessarily equal for some kinds of
 853   // spaces, in particular, certain kinds of free list spaces.
 854   // We could use the more complicated but more precise:
 855   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 856   // but the slight imprecision seems acceptable in the assertion check.
 857   assert(MemRegion(bottom(), end()).contains(mr),
 858          "Should be within used space");
 859   HeapWord* prev = cl->previous();   // max address from last time
 860   if (prev >= mr.end()) { // nothing to do
 861     return;
 862   }
 863   // This assert will not work when we go from cms space to perm
 864   // space, and use same closure. Easy fix deferred for later. XXX YSR
 865   // assert(prev == NULL || contains(prev), "Should be within space");
 866 
 867   bool last_was_obj_array = false;
 868   HeapWord *blk_start_addr, *region_start_addr;
 869   if (prev > mr.start()) {
 870     region_start_addr = prev;
 871     blk_start_addr    = prev;
 872     // The previous invocation may have pushed "prev" beyond the
 873     // last allocated block yet there may be still be blocks
 874     // in this region due to a particular coalescing policy.
 875     // Relax the assertion so that the case where the unallocated
 876     // block is maintained and "prev" is beyond the unallocated
 877     // block does not cause the assertion to fire.
 878     assert((BlockOffsetArrayUseUnallocatedBlock &&
 879             (!is_in(prev))) ||
 880            (blk_start_addr == block_start(region_start_addr)), "invariant");
 881   } else {
 882     region_start_addr = mr.start();
 883     blk_start_addr    = block_start(region_start_addr);
 884   }
 885   HeapWord* region_end_addr = mr.end();
 886   MemRegion derived_mr(region_start_addr, region_end_addr);
 887   while (blk_start_addr < region_end_addr) {
 888     const size_t size = block_size(blk_start_addr);
 889     if (block_is_obj(blk_start_addr)) {
 890       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 891     } else {
 892       last_was_obj_array = false;
 893     }
 894     blk_start_addr += size;
 895   }
 896   if (!last_was_obj_array) {
 897     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 898            "Should be within (closed) used space");
 899     assert(blk_start_addr > prev, "Invariant");
 900     cl->set_previous(blk_start_addr); // min address for next time
 901   }
 902 }
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 912   ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   // Can't use used_region() below because it may not necessarily
 915   // be the same as [bottom(),end()); although we could
 916   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 917   // that appears too cumbersome, so we just do the simpler check
 918   // in the assertion below.
 919   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 920          "mr should be non-empty and within used space");
 921   HeapWord *addr, *end;
 922   size_t size;
 923   for (addr = block_start_careful(mr.start()), end  = mr.end();
 924        addr < end; addr += size) {
 925     FreeChunk* fc = (FreeChunk*)addr;
 926     if (fc->is_free()) {
 927       // Since we hold the free list lock, which protects direct
 928       // allocation in this generation by mutators, a free object
 929       // will remain free throughout this iteration code.
 930       size = fc->size();
 931     } else {
 932       // Note that the object need not necessarily be initialized,
 933       // because (for instance) the free list lock does NOT protect
 934       // object initialization. The closure application below must
 935       // therefore be correct in the face of uninitialized objects.
 936       size = cl->do_object_careful_m(oop(addr), mr);
 937       if (size == 0) {
 938         // An unparsable object found. Signal early termination.
 939         return addr;
 940       }
 941     }
 942   }
 943   return NULL;
 944 }
 945 
 946 
 947 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 948   NOT_PRODUCT(verify_objects_initialized());
 949   return _bt.block_start(p);
 950 }
 951 
 952 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 953   return _bt.block_start_careful(p);
 954 }
 955 
 956 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 957   NOT_PRODUCT(verify_objects_initialized());
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   while (true) {
 962     // We must do this until we get a consistent view of the object.
 963     if (FreeChunk::indicatesFreeChunk(p)) {
 964       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 965       size_t res = fc->size();
 966 
 967       // Bugfix for systems with weak memory model (PPC64/IA64). The
 968       // block's free bit was set and we have read the size of the
 969       // block. Acquire and check the free bit again. If the block is
 970       // still free, the read size is correct.
 971       OrderAccess::acquire();
 972 
 973       // If the object is still a free chunk, return the size, else it
 974       // has been allocated so try again.
 975       if (FreeChunk::indicatesFreeChunk(p)) {
 976         assert(res != 0, "Block size should not be 0");
 977         return res;
 978       }
 979     } else {
 980       // must read from what 'p' points to in each loop.
 981       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       }
 997     }
 998   }
 999 }
1000 
1001 // TODO: Now that is_parsable is gone, we should combine these two functions.
1002 // A variant of the above that uses the Printezis bits for
1003 // unparsable but allocated objects. This avoids any possible
1004 // stalls waiting for mutators to initialize objects, and is
1005 // thus potentially faster than the variant above. However,
1006 // this variant may return a zero size for a block that is
1007 // under mutation and for which a consistent size cannot be
1008 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1009 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1010                                                      const CMSCollector* c)
1011 const {
1012   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1013   // This must be volatile, or else there is a danger that the compiler
1014   // will compile the code below into a sometimes-infinite loop, by keeping
1015   // the value read the first time in a register.
1016   DEBUG_ONLY(uint loops = 0;)
1017   while (true) {
1018     // We must do this until we get a consistent view of the object.
1019     if (FreeChunk::indicatesFreeChunk(p)) {
1020       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1021       size_t res = fc->size();
1022 
1023       // Bugfix for systems with weak memory model (PPC64/IA64). The
1024       // free bit of the block was set and we have read the size of
1025       // the block. Acquire and check the free bit again. If the
1026       // block is still free, the read size is correct.
1027       OrderAccess::acquire();
1028 
1029       if (FreeChunk::indicatesFreeChunk(p)) {
1030         assert(res != 0, "Block size should not be 0");
1031         assert(loops == 0, "Should be 0");
1032         return res;
1033       }
1034     } else {
1035       // must read from what 'p' points to in each loop.
1036       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1037       // We trust the size of any object that has a non-NULL
1038       // klass and (for those in the perm gen) is parsable
1039       // -- irrespective of its conc_safe-ty.
1040       if (k != NULL) {
1041         assert(k->is_klass(), "Should really be klass oop.");
1042         oop o = (oop)p;
1043         assert(o->is_oop(), "Should be an oop");
1044 
1045         // Bugfix for systems with weak memory model (PPC64/IA64).
1046         // The object o may be an array. Acquire to make sure that the array
1047         // size (third word) is consistent.
1048         OrderAccess::acquire();
1049 
1050         size_t res = o->size_given_klass(k);
1051         res = adjustObjectSize(res);
1052         assert(res != 0, "Block size should not be 0");
1053         return res;
1054       } else {
1055         // May return 0 if P-bits not present.
1056         return c->block_size_if_printezis_bits(p);
1057       }
1058     }
1059     assert(loops == 0, "Can loop at most once");
1060     DEBUG_ONLY(loops++;)
1061   }
1062 }
1063 
1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1065   NOT_PRODUCT(verify_objects_initialized());
1066   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1067   FreeChunk* fc = (FreeChunk*)p;
1068   if (fc->is_free()) {
1069     return fc->size();
1070   } else {
1071     // Ignore mark word because this may be a recently promoted
1072     // object whose mark word is used to chain together grey
1073     // objects (the last one would have a null value).
1074     assert(oop(p)->is_oop(true), "Should be an oop");
1075     return adjustObjectSize(oop(p)->size());
1076   }
1077 }
1078 
1079 // This implementation assumes that the property of "being an object" is
1080 // stable.  But being a free chunk may not be (because of parallel
1081 // promotion.)
1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1083   FreeChunk* fc = (FreeChunk*)p;
1084   assert(is_in_reserved(p), "Should be in space");
1085   if (FreeChunk::indicatesFreeChunk(p)) return false;
1086   Klass* k = oop(p)->klass_or_null();
1087   if (k != NULL) {
1088     // Ignore mark word because it may have been used to
1089     // chain together promoted objects (the last one
1090     // would have a null value).
1091     assert(oop(p)->is_oop(true), "Should be an oop");
1092     return true;
1093   } else {
1094     return false;  // Was not an object at the start of collection.
1095   }
1096 }
1097 
1098 // Check if the object is alive. This fact is checked either by consulting
1099 // the main marking bitmap in the sweeping phase or, if it's a permanent
1100 // generation and we're not in the sweeping phase, by checking the
1101 // perm_gen_verify_bit_map where we store the "deadness" information if
1102 // we did not sweep the perm gen in the most recent previous GC cycle.
1103 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1104   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1105          "Else races are possible");
1106   assert(block_is_obj(p), "The address should point to an object");
1107 
1108   // If we're sweeping, we use object liveness information from the main bit map
1109   // for both perm gen and old gen.
1110   // We don't need to lock the bitmap (live_map or dead_map below), because
1111   // EITHER we are in the middle of the sweeping phase, and the
1112   // main marking bit map (live_map below) is locked,
1113   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1114   // is stable, because it's mutated only in the sweeping phase.
1115   // NOTE: This method is also used by jmap where, if class unloading is
1116   // off, the results can return "false" for legitimate perm objects,
1117   // when we are not in the midst of a sweeping phase, which can result
1118   // in jmap not reporting certain perm gen objects. This will be moot
1119   // if/when the perm gen goes away in the future.
1120   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1121     CMSBitMap* live_map = _collector->markBitMap();
1122     return live_map->par_isMarked((HeapWord*) p);
1123   }
1124   return true;
1125 }
1126 
1127 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1128   FreeChunk* fc = (FreeChunk*)p;
1129   assert(is_in_reserved(p), "Should be in space");
1130   assert(_bt.block_start(p) == p, "Should be a block boundary");
1131   if (!fc->is_free()) {
1132     // Ignore mark word because it may have been used to
1133     // chain together promoted objects (the last one
1134     // would have a null value).
1135     assert(oop(p)->is_oop(true), "Should be an oop");
1136     return true;
1137   }
1138   return false;
1139 }
1140 
1141 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1142 // approximate answer if you don't hold the freelistlock when you call this.
1143 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1144   size_t size = 0;
1145   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1146     debug_only(
1147       // We may be calling here without the lock in which case we
1148       // won't do this modest sanity check.
1149       if (freelistLock()->owned_by_self()) {
1150         size_t total_list_size = 0;
1151         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1152           fc = fc->next()) {
1153           total_list_size += i;
1154         }
1155         assert(total_list_size == i * _indexedFreeList[i].count(),
1156                "Count in list is incorrect");
1157       }
1158     )
1159     size += i * _indexedFreeList[i].count();
1160   }
1161   return size;
1162 }
1163 
1164 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1165   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1166   return allocate(size);
1167 }
1168 
1169 HeapWord*
1170 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1171   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1172 }
1173 
1174 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1175   assert_lock_strong(freelistLock());
1176   HeapWord* res = NULL;
1177   assert(size == adjustObjectSize(size),
1178          "use adjustObjectSize() before calling into allocate()");
1179 
1180   if (_adaptive_freelists) {
1181     res = allocate_adaptive_freelists(size);
1182   } else {  // non-adaptive free lists
1183     res = allocate_non_adaptive_freelists(size);
1184   }
1185 
1186   if (res != NULL) {
1187     // check that res does lie in this space!
1188     assert(is_in_reserved(res), "Not in this space!");
1189     assert(is_aligned((void*)res), "alignment check");
1190 
1191     FreeChunk* fc = (FreeChunk*)res;
1192     fc->markNotFree();
1193     assert(!fc->is_free(), "shouldn't be marked free");
1194     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1195     // Verify that the block offset table shows this to
1196     // be a single block, but not one which is unallocated.
1197     _bt.verify_single_block(res, size);
1198     _bt.verify_not_unallocated(res, size);
1199     // mangle a just allocated object with a distinct pattern.
1200     debug_only(fc->mangleAllocated(size));
1201   }
1202 
1203   return res;
1204 }
1205 
1206 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1207   HeapWord* res = NULL;
1208   // try and use linear allocation for smaller blocks
1209   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1210     // if successful, the following also adjusts block offset table
1211     res = getChunkFromSmallLinearAllocBlock(size);
1212   }
1213   // Else triage to indexed lists for smaller sizes
1214   if (res == NULL) {
1215     if (size < SmallForDictionary) {
1216       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1217     } else {
1218       // else get it from the big dictionary; if even this doesn't
1219       // work we are out of luck.
1220       res = (HeapWord*)getChunkFromDictionaryExact(size);
1221     }
1222   }
1223 
1224   return res;
1225 }
1226 
1227 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1228   assert_lock_strong(freelistLock());
1229   HeapWord* res = NULL;
1230   assert(size == adjustObjectSize(size),
1231          "use adjustObjectSize() before calling into allocate()");
1232 
1233   // Strategy
1234   //   if small
1235   //     exact size from small object indexed list if small
1236   //     small or large linear allocation block (linAB) as appropriate
1237   //     take from lists of greater sized chunks
1238   //   else
1239   //     dictionary
1240   //     small or large linear allocation block if it has the space
1241   // Try allocating exact size from indexTable first
1242   if (size < IndexSetSize) {
1243     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1244     if(res != NULL) {
1245       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1246         "Not removed from free list");
1247       // no block offset table adjustment is necessary on blocks in
1248       // the indexed lists.
1249 
1250     // Try allocating from the small LinAB
1251     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1252         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1253         // if successful, the above also adjusts block offset table
1254         // Note that this call will refill the LinAB to
1255         // satisfy the request.  This is different that
1256         // evm.
1257         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1258     } else {
1259       // Raid the exact free lists larger than size, even if they are not
1260       // overpopulated.
1261       res = (HeapWord*) getChunkFromGreater(size);
1262     }
1263   } else {
1264     // Big objects get allocated directly from the dictionary.
1265     res = (HeapWord*) getChunkFromDictionaryExact(size);
1266     if (res == NULL) {
1267       // Try hard not to fail since an allocation failure will likely
1268       // trigger a synchronous GC.  Try to get the space from the
1269       // allocation blocks.
1270       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1271     }
1272   }
1273 
1274   return res;
1275 }
1276 
1277 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1278 // when promoting obj.
1279 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1280   // Depending on the object size, expansion may require refilling either a
1281   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1282   // is added because the dictionary may over-allocate to avoid fragmentation.
1283   size_t space = obj_size;
1284   if (!_adaptive_freelists) {
1285     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1286   }
1287   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1288   return space;
1289 }
1290 
1291 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1292   FreeChunk* ret;
1293 
1294   assert(numWords >= MinChunkSize, "Size is less than minimum");
1295   assert(linearAllocationWouldFail() || bestFitFirst(),
1296     "Should not be here");
1297 
1298   size_t i;
1299   size_t currSize = numWords + MinChunkSize;
1300   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1301   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1302     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1303     if (fl->head()) {
1304       ret = getFromListGreater(fl, numWords);
1305       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1306       return ret;
1307     }
1308   }
1309 
1310   currSize = MAX2((size_t)SmallForDictionary,
1311                   (size_t)(numWords + MinChunkSize));
1312 
1313   /* Try to get a chunk that satisfies request, while avoiding
1314      fragmentation that can't be handled. */
1315   {
1316     ret =  dictionary()->get_chunk(currSize);
1317     if (ret != NULL) {
1318       assert(ret->size() - numWords >= MinChunkSize,
1319              "Chunk is too small");
1320       _bt.allocated((HeapWord*)ret, ret->size());
1321       /* Carve returned chunk. */
1322       (void) splitChunkAndReturnRemainder(ret, numWords);
1323       /* Label this as no longer a free chunk. */
1324       assert(ret->is_free(), "This chunk should be free");
1325       ret->link_prev(NULL);
1326     }
1327     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1328     return ret;
1329   }
1330   ShouldNotReachHere();
1331 }
1332 
1333 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1334   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1335   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1336 }
1337 
1338 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1339   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1340          (_smallLinearAllocBlock._word_size == fc->size()),
1341          "Linear allocation block shows incorrect size");
1342   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1343           (_smallLinearAllocBlock._word_size == fc->size()));
1344 }
1345 
1346 // Check if the purported free chunk is present either as a linear
1347 // allocation block, the size-indexed table of (smaller) free blocks,
1348 // or the larger free blocks kept in the binary tree dictionary.
1349 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1350   if (verify_chunk_is_linear_alloc_block(fc)) {
1351     return true;
1352   } else if (fc->size() < IndexSetSize) {
1353     return verifyChunkInIndexedFreeLists(fc);
1354   } else {
1355     return dictionary()->verify_chunk_in_free_list(fc);
1356   }
1357 }
1358 
1359 #ifndef PRODUCT
1360 void CompactibleFreeListSpace::assert_locked() const {
1361   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1362 }
1363 
1364 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1365   CMSLockVerifier::assert_locked(lock);
1366 }
1367 #endif
1368 
1369 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1370   // In the parallel case, the main thread holds the free list lock
1371   // on behalf the parallel threads.
1372   FreeChunk* fc;
1373   {
1374     // If GC is parallel, this might be called by several threads.
1375     // This should be rare enough that the locking overhead won't affect
1376     // the sequential code.
1377     MutexLockerEx x(parDictionaryAllocLock(),
1378                     Mutex::_no_safepoint_check_flag);
1379     fc = getChunkFromDictionary(size);
1380   }
1381   if (fc != NULL) {
1382     fc->dontCoalesce();
1383     assert(fc->is_free(), "Should be free, but not coalescable");
1384     // Verify that the block offset table shows this to
1385     // be a single block, but not one which is unallocated.
1386     _bt.verify_single_block((HeapWord*)fc, fc->size());
1387     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1388   }
1389   return fc;
1390 }
1391 
1392 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1393   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1394   assert_locked();
1395 
1396   // if we are tracking promotions, then first ensure space for
1397   // promotion (including spooling space for saving header if necessary).
1398   // then allocate and copy, then track promoted info if needed.
1399   // When tracking (see PromotionInfo::track()), the mark word may
1400   // be displaced and in this case restoration of the mark word
1401   // occurs in the (oop_since_save_marks_)iterate phase.
1402   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1403     return NULL;
1404   }
1405   // Call the allocate(size_t, bool) form directly to avoid the
1406   // additional call through the allocate(size_t) form.  Having
1407   // the compile inline the call is problematic because allocate(size_t)
1408   // is a virtual method.
1409   HeapWord* res = allocate(adjustObjectSize(obj_size));
1410   if (res != NULL) {
1411     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1412     // if we should be tracking promotions, do so.
1413     if (_promoInfo.tracking()) {
1414         _promoInfo.track((PromotedObject*)res);
1415     }
1416   }
1417   return oop(res);
1418 }
1419 
1420 HeapWord*
1421 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1422   assert_locked();
1423   assert(size >= MinChunkSize, "minimum chunk size");
1424   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1425     "maximum from smallLinearAllocBlock");
1426   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1427 }
1428 
1429 HeapWord*
1430 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1431                                                        size_t size) {
1432   assert_locked();
1433   assert(size >= MinChunkSize, "too small");
1434   HeapWord* res = NULL;
1435   // Try to do linear allocation from blk, making sure that
1436   if (blk->_word_size == 0) {
1437     // We have probably been unable to fill this either in the prologue or
1438     // when it was exhausted at the last linear allocation. Bail out until
1439     // next time.
1440     assert(blk->_ptr == NULL, "consistency check");
1441     return NULL;
1442   }
1443   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1444   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1445   if (res != NULL) return res;
1446 
1447   // about to exhaust this linear allocation block
1448   if (blk->_word_size == size) { // exactly satisfied
1449     res = blk->_ptr;
1450     _bt.allocated(res, blk->_word_size);
1451   } else if (size + MinChunkSize <= blk->_refillSize) {
1452     size_t sz = blk->_word_size;
1453     // Update _unallocated_block if the size is such that chunk would be
1454     // returned to the indexed free list.  All other chunks in the indexed
1455     // free lists are allocated from the dictionary so that _unallocated_block
1456     // has already been adjusted for them.  Do it here so that the cost
1457     // for all chunks added back to the indexed free lists.
1458     if (sz < SmallForDictionary) {
1459       _bt.allocated(blk->_ptr, sz);
1460     }
1461     // Return the chunk that isn't big enough, and then refill below.
1462     addChunkToFreeLists(blk->_ptr, sz);
1463     split_birth(sz);
1464     // Don't keep statistics on adding back chunk from a LinAB.
1465   } else {
1466     // A refilled block would not satisfy the request.
1467     return NULL;
1468   }
1469 
1470   blk->_ptr = NULL; blk->_word_size = 0;
1471   refillLinearAllocBlock(blk);
1472   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1473          "block was replenished");
1474   if (res != NULL) {
1475     split_birth(size);
1476     repairLinearAllocBlock(blk);
1477   } else if (blk->_ptr != NULL) {
1478     res = blk->_ptr;
1479     size_t blk_size = blk->_word_size;
1480     blk->_word_size -= size;
1481     blk->_ptr  += size;
1482     split_birth(size);
1483     repairLinearAllocBlock(blk);
1484     // Update BOT last so that other (parallel) GC threads see a consistent
1485     // view of the BOT and free blocks.
1486     // Above must occur before BOT is updated below.
1487     OrderAccess::storestore();
1488     _bt.split_block(res, blk_size, size);  // adjust block offset table
1489   }
1490   return res;
1491 }
1492 
1493 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1494                                         LinearAllocBlock* blk,
1495                                         size_t size) {
1496   assert_locked();
1497   assert(size >= MinChunkSize, "too small");
1498 
1499   HeapWord* res = NULL;
1500   // This is the common case.  Keep it simple.
1501   if (blk->_word_size >= size + MinChunkSize) {
1502     assert(blk->_ptr != NULL, "consistency check");
1503     res = blk->_ptr;
1504     // Note that the BOT is up-to-date for the linAB before allocation.  It
1505     // indicates the start of the linAB.  The split_block() updates the
1506     // BOT for the linAB after the allocation (indicates the start of the
1507     // next chunk to be allocated).
1508     size_t blk_size = blk->_word_size;
1509     blk->_word_size -= size;
1510     blk->_ptr  += size;
1511     split_birth(size);
1512     repairLinearAllocBlock(blk);
1513     // Update BOT last so that other (parallel) GC threads see a consistent
1514     // view of the BOT and free blocks.
1515     // Above must occur before BOT is updated below.
1516     OrderAccess::storestore();
1517     _bt.split_block(res, blk_size, size);  // adjust block offset table
1518     _bt.allocated(res, size);
1519   }
1520   return res;
1521 }
1522 
1523 FreeChunk*
1524 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1525   assert_locked();
1526   assert(size < SmallForDictionary, "just checking");
1527   FreeChunk* res;
1528   res = _indexedFreeList[size].get_chunk_at_head();
1529   if (res == NULL) {
1530     res = getChunkFromIndexedFreeListHelper(size);
1531   }
1532   _bt.verify_not_unallocated((HeapWord*) res, size);
1533   assert(res == NULL || res->size() == size, "Incorrect block size");
1534   return res;
1535 }
1536 
1537 FreeChunk*
1538 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1539   bool replenish) {
1540   assert_locked();
1541   FreeChunk* fc = NULL;
1542   if (size < SmallForDictionary) {
1543     assert(_indexedFreeList[size].head() == NULL ||
1544       _indexedFreeList[size].surplus() <= 0,
1545       "List for this size should be empty or under populated");
1546     // Try best fit in exact lists before replenishing the list
1547     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1548       // Replenish list.
1549       //
1550       // Things tried that failed.
1551       //   Tried allocating out of the two LinAB's first before
1552       // replenishing lists.
1553       //   Tried small linAB of size 256 (size in indexed list)
1554       // and replenishing indexed lists from the small linAB.
1555       //
1556       FreeChunk* newFc = NULL;
1557       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1558       if (replenish_size < SmallForDictionary) {
1559         // Do not replenish from an underpopulated size.
1560         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1561             _indexedFreeList[replenish_size].head() != NULL) {
1562           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1563         } else if (bestFitFirst()) {
1564           newFc = bestFitSmall(replenish_size);
1565         }
1566       }
1567       if (newFc == NULL && replenish_size > size) {
1568         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1569         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1570       }
1571       // Note: The stats update re split-death of block obtained above
1572       // will be recorded below precisely when we know we are going to
1573       // be actually splitting it into more than one pieces below.
1574       if (newFc != NULL) {
1575         if  (replenish || CMSReplenishIntermediate) {
1576           // Replenish this list and return one block to caller.
1577           size_t i;
1578           FreeChunk *curFc, *nextFc;
1579           size_t num_blk = newFc->size() / size;
1580           assert(num_blk >= 1, "Smaller than requested?");
1581           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1582           if (num_blk > 1) {
1583             // we are sure we will be splitting the block just obtained
1584             // into multiple pieces; record the split-death of the original
1585             splitDeath(replenish_size);
1586           }
1587           // carve up and link blocks 0, ..., num_blk - 2
1588           // The last chunk is not added to the lists but is returned as the
1589           // free chunk.
1590           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1591                i = 0;
1592                i < (num_blk - 1);
1593                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1594                i++) {
1595             curFc->set_size(size);
1596             // Don't record this as a return in order to try and
1597             // determine the "returns" from a GC.
1598             _bt.verify_not_unallocated((HeapWord*) fc, size);
1599             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1600             _bt.mark_block((HeapWord*)curFc, size);
1601             split_birth(size);
1602             // Don't record the initial population of the indexed list
1603             // as a split birth.
1604           }
1605 
1606           // check that the arithmetic was OK above
1607           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1608             "inconsistency in carving newFc");
1609           curFc->set_size(size);
1610           _bt.mark_block((HeapWord*)curFc, size);
1611           split_birth(size);
1612           fc = curFc;
1613         } else {
1614           // Return entire block to caller
1615           fc = newFc;
1616         }
1617       }
1618     }
1619   } else {
1620     // Get a free chunk from the free chunk dictionary to be returned to
1621     // replenish the indexed free list.
1622     fc = getChunkFromDictionaryExact(size);
1623   }
1624   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1625   return fc;
1626 }
1627 
1628 FreeChunk*
1629 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1630   assert_locked();
1631   FreeChunk* fc = _dictionary->get_chunk(size,
1632                                          FreeBlockDictionary<FreeChunk>::atLeast);
1633   if (fc == NULL) {
1634     return NULL;
1635   }
1636   _bt.allocated((HeapWord*)fc, fc->size());
1637   if (fc->size() >= size + MinChunkSize) {
1638     fc = splitChunkAndReturnRemainder(fc, size);
1639   }
1640   assert(fc->size() >= size, "chunk too small");
1641   assert(fc->size() < size + MinChunkSize, "chunk too big");
1642   _bt.verify_single_block((HeapWord*)fc, fc->size());
1643   return fc;
1644 }
1645 
1646 FreeChunk*
1647 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1648   assert_locked();
1649   FreeChunk* fc = _dictionary->get_chunk(size,
1650                                          FreeBlockDictionary<FreeChunk>::atLeast);
1651   if (fc == NULL) {
1652     return fc;
1653   }
1654   _bt.allocated((HeapWord*)fc, fc->size());
1655   if (fc->size() == size) {
1656     _bt.verify_single_block((HeapWord*)fc, size);
1657     return fc;
1658   }
1659   assert(fc->size() > size, "get_chunk() guarantee");
1660   if (fc->size() < size + MinChunkSize) {
1661     // Return the chunk to the dictionary and go get a bigger one.
1662     returnChunkToDictionary(fc);
1663     fc = _dictionary->get_chunk(size + MinChunkSize,
1664                                 FreeBlockDictionary<FreeChunk>::atLeast);
1665     if (fc == NULL) {
1666       return NULL;
1667     }
1668     _bt.allocated((HeapWord*)fc, fc->size());
1669   }
1670   assert(fc->size() >= size + MinChunkSize, "tautology");
1671   fc = splitChunkAndReturnRemainder(fc, size);
1672   assert(fc->size() == size, "chunk is wrong size");
1673   _bt.verify_single_block((HeapWord*)fc, size);
1674   return fc;
1675 }
1676 
1677 void
1678 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1679   assert_locked();
1680 
1681   size_t size = chunk->size();
1682   _bt.verify_single_block((HeapWord*)chunk, size);
1683   // adjust _unallocated_block downward, as necessary
1684   _bt.freed((HeapWord*)chunk, size);
1685   _dictionary->return_chunk(chunk);
1686 #ifndef PRODUCT
1687   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1688     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1689     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1690     tl->verify_stats();
1691   }
1692 #endif // PRODUCT
1693 }
1694 
1695 void
1696 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1697   assert_locked();
1698   size_t size = fc->size();
1699   _bt.verify_single_block((HeapWord*) fc, size);
1700   _bt.verify_not_unallocated((HeapWord*) fc, size);
1701   if (_adaptive_freelists) {
1702     _indexedFreeList[size].return_chunk_at_tail(fc);
1703   } else {
1704     _indexedFreeList[size].return_chunk_at_head(fc);
1705   }
1706 #ifndef PRODUCT
1707   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1708      _indexedFreeList[size].verify_stats();
1709   }
1710 #endif // PRODUCT
1711 }
1712 
1713 // Add chunk to end of last block -- if it's the largest
1714 // block -- and update BOT and census data. We would
1715 // of course have preferred to coalesce it with the
1716 // last block, but it's currently less expensive to find the
1717 // largest block than it is to find the last.
1718 void
1719 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1720   HeapWord* chunk, size_t     size) {
1721   // check that the chunk does lie in this space!
1722   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1723   // One of the parallel gc task threads may be here
1724   // whilst others are allocating.
1725   Mutex* lock = &_parDictionaryAllocLock;
1726   FreeChunk* ec;
1727   {
1728     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1729     ec = dictionary()->find_largest_dict();  // get largest block
1730     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1731       // It's a coterminal block - we can coalesce.
1732       size_t old_size = ec->size();
1733       coalDeath(old_size);
1734       removeChunkFromDictionary(ec);
1735       size += old_size;
1736     } else {
1737       ec = (FreeChunk*)chunk;
1738     }
1739   }
1740   ec->set_size(size);
1741   debug_only(ec->mangleFreed(size));
1742   if (size < SmallForDictionary) {
1743     lock = _indexedFreeListParLocks[size];
1744   }
1745   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1746   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1747   // record the birth under the lock since the recording involves
1748   // manipulation of the list on which the chunk lives and
1749   // if the chunk is allocated and is the last on the list,
1750   // the list can go away.
1751   coalBirth(size);
1752 }
1753 
1754 void
1755 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1756                                               size_t     size) {
1757   // check that the chunk does lie in this space!
1758   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1759   assert_locked();
1760   _bt.verify_single_block(chunk, size);
1761 
1762   FreeChunk* fc = (FreeChunk*) chunk;
1763   fc->set_size(size);
1764   debug_only(fc->mangleFreed(size));
1765   if (size < SmallForDictionary) {
1766     returnChunkToFreeList(fc);
1767   } else {
1768     returnChunkToDictionary(fc);
1769   }
1770 }
1771 
1772 void
1773 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1774   size_t size, bool coalesced) {
1775   assert_locked();
1776   assert(chunk != NULL, "null chunk");
1777   if (coalesced) {
1778     // repair BOT
1779     _bt.single_block(chunk, size);
1780   }
1781   addChunkToFreeLists(chunk, size);
1782 }
1783 
1784 // We _must_ find the purported chunk on our free lists;
1785 // we assert if we don't.
1786 void
1787 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1788   size_t size = fc->size();
1789   assert_locked();
1790   debug_only(verifyFreeLists());
1791   if (size < SmallForDictionary) {
1792     removeChunkFromIndexedFreeList(fc);
1793   } else {
1794     removeChunkFromDictionary(fc);
1795   }
1796   _bt.verify_single_block((HeapWord*)fc, size);
1797   debug_only(verifyFreeLists());
1798 }
1799 
1800 void
1801 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1802   size_t size = fc->size();
1803   assert_locked();
1804   assert(fc != NULL, "null chunk");
1805   _bt.verify_single_block((HeapWord*)fc, size);
1806   _dictionary->remove_chunk(fc);
1807   // adjust _unallocated_block upward, as necessary
1808   _bt.allocated((HeapWord*)fc, size);
1809 }
1810 
1811 void
1812 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1813   assert_locked();
1814   size_t size = fc->size();
1815   _bt.verify_single_block((HeapWord*)fc, size);
1816   NOT_PRODUCT(
1817     if (FLSVerifyIndexTable) {
1818       verifyIndexedFreeList(size);
1819     }
1820   )
1821   _indexedFreeList[size].remove_chunk(fc);
1822   NOT_PRODUCT(
1823     if (FLSVerifyIndexTable) {
1824       verifyIndexedFreeList(size);
1825     }
1826   )
1827 }
1828 
1829 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1830   /* A hint is the next larger size that has a surplus.
1831      Start search at a size large enough to guarantee that
1832      the excess is >= MIN_CHUNK. */
1833   size_t start = align_object_size(numWords + MinChunkSize);
1834   if (start < IndexSetSize) {
1835     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1836     size_t    hint = _indexedFreeList[start].hint();
1837     while (hint < IndexSetSize) {
1838       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1839       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1840       if (fl->surplus() > 0 && fl->head() != NULL) {
1841         // Found a list with surplus, reset original hint
1842         // and split out a free chunk which is returned.
1843         _indexedFreeList[start].set_hint(hint);
1844         FreeChunk* res = getFromListGreater(fl, numWords);
1845         assert(res == NULL || res->is_free(),
1846           "Should be returning a free chunk");
1847         return res;
1848       }
1849       hint = fl->hint(); /* keep looking */
1850     }
1851     /* None found. */
1852     it[start].set_hint(IndexSetSize);
1853   }
1854   return NULL;
1855 }
1856 
1857 /* Requires fl->size >= numWords + MinChunkSize */
1858 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1859   size_t numWords) {
1860   FreeChunk *curr = fl->head();
1861   size_t oldNumWords = curr->size();
1862   assert(numWords >= MinChunkSize, "Word size is too small");
1863   assert(curr != NULL, "List is empty");
1864   assert(oldNumWords >= numWords + MinChunkSize,
1865         "Size of chunks in the list is too small");
1866 
1867   fl->remove_chunk(curr);
1868   // recorded indirectly by splitChunkAndReturnRemainder -
1869   // smallSplit(oldNumWords, numWords);
1870   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1871   // Does anything have to be done for the remainder in terms of
1872   // fixing the card table?
1873   assert(new_chunk == NULL || new_chunk->is_free(),
1874     "Should be returning a free chunk");
1875   return new_chunk;
1876 }
1877 
1878 FreeChunk*
1879 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1880   size_t new_size) {
1881   assert_locked();
1882   size_t size = chunk->size();
1883   assert(size > new_size, "Split from a smaller block?");
1884   assert(is_aligned(chunk), "alignment problem");
1885   assert(size == adjustObjectSize(size), "alignment problem");
1886   size_t rem_sz = size - new_size;
1887   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1888   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1889   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1890   assert(is_aligned(ffc), "alignment problem");
1891   ffc->set_size(rem_sz);
1892   ffc->link_next(NULL);
1893   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1894   // Above must occur before BOT is updated below.
1895   // adjust block offset table
1896   OrderAccess::storestore();
1897   assert(chunk->is_free() && ffc->is_free(), "Error");
1898   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1899   if (rem_sz < SmallForDictionary) {
1900     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1901     bool is_par = Thread::current()->is_GC_task_thread();
1902     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1903     returnChunkToFreeList(ffc);
1904     split(size, rem_sz);
1905     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1906   } else {
1907     returnChunkToDictionary(ffc);
1908     split(size, rem_sz);
1909   }
1910   chunk->set_size(new_size);
1911   return chunk;
1912 }
1913 
1914 void
1915 CompactibleFreeListSpace::sweep_completed() {
1916   // Now that space is probably plentiful, refill linear
1917   // allocation blocks as needed.
1918   refillLinearAllocBlocksIfNeeded();
1919 }
1920 
1921 void
1922 CompactibleFreeListSpace::gc_prologue() {
1923   assert_locked();
1924   if (PrintFLSStatistics != 0) {
1925     gclog_or_tty->print("Before GC:\n");
1926     reportFreeListStatistics();
1927   }
1928   refillLinearAllocBlocksIfNeeded();
1929 }
1930 
1931 void
1932 CompactibleFreeListSpace::gc_epilogue() {
1933   assert_locked();
1934   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1935     if (_smallLinearAllocBlock._word_size == 0)
1936       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1937   }
1938   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1939   _promoInfo.stopTrackingPromotions();
1940   repairLinearAllocationBlocks();
1941   // Print Space's stats
1942   if (PrintFLSStatistics != 0) {
1943     gclog_or_tty->print("After GC:\n");
1944     reportFreeListStatistics();
1945   }
1946 }
1947 
1948 // Iteration support, mostly delegated from a CMS generation
1949 
1950 void CompactibleFreeListSpace::save_marks() {
1951   assert(Thread::current()->is_VM_thread(),
1952          "Global variable should only be set when single-threaded");
1953   // Mark the "end" of the used space at the time of this call;
1954   // note, however, that promoted objects from this point
1955   // on are tracked in the _promoInfo below.
1956   set_saved_mark_word(unallocated_block());
1957 #ifdef ASSERT
1958   // Check the sanity of save_marks() etc.
1959   MemRegion ur    = used_region();
1960   MemRegion urasm = used_region_at_save_marks();
1961   assert(ur.contains(urasm),
1962          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1963          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1964          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1965 #endif
1966   // inform allocator that promotions should be tracked.
1967   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1968   _promoInfo.startTrackingPromotions();
1969 }
1970 
1971 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1972   assert(_promoInfo.tracking(), "No preceding save_marks?");
1973   return _promoInfo.noPromotions();
1974 }
1975 
1976 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1977                                                                             \
1978 void CompactibleFreeListSpace::                                             \
1979 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1980   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1981   /*                                                                        \
1982    * This also restores any displaced headers and removes the elements from \
1983    * the iteration set as they are processed, so that we have a clean slate \
1984    * at the end of the iteration. Note, thus, that if new objects are       \
1985    * promoted as a result of the iteration they are iterated over as well.  \
1986    */                                                                       \
1987   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1988 }
1989 
1990 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1991 
1992 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1993   return _smallLinearAllocBlock._word_size == 0;
1994 }
1995 
1996 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1997   // Fix up linear allocation blocks to look like free blocks
1998   repairLinearAllocBlock(&_smallLinearAllocBlock);
1999 }
2000 
2001 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2002   assert_locked();
2003   if (blk->_ptr != NULL) {
2004     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2005            "Minimum block size requirement");
2006     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2007     fc->set_size(blk->_word_size);
2008     fc->link_prev(NULL);   // mark as free
2009     fc->dontCoalesce();
2010     assert(fc->is_free(), "just marked it free");
2011     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2012   }
2013 }
2014 
2015 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2016   assert_locked();
2017   if (_smallLinearAllocBlock._ptr == NULL) {
2018     assert(_smallLinearAllocBlock._word_size == 0,
2019       "Size of linAB should be zero if the ptr is NULL");
2020     // Reset the linAB refill and allocation size limit.
2021     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2022   }
2023   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2024 }
2025 
2026 void
2027 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2028   assert_locked();
2029   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2030          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2031          "blk invariant");
2032   if (blk->_ptr == NULL) {
2033     refillLinearAllocBlock(blk);
2034   }
2035   if (PrintMiscellaneous && Verbose) {
2036     if (blk->_word_size == 0) {
2037       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2038     }
2039   }
2040 }
2041 
2042 void
2043 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2044   assert_locked();
2045   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2046          "linear allocation block should be empty");
2047   FreeChunk* fc;
2048   if (blk->_refillSize < SmallForDictionary &&
2049       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2050     // A linAB's strategy might be to use small sizes to reduce
2051     // fragmentation but still get the benefits of allocation from a
2052     // linAB.
2053   } else {
2054     fc = getChunkFromDictionary(blk->_refillSize);
2055   }
2056   if (fc != NULL) {
2057     blk->_ptr  = (HeapWord*)fc;
2058     blk->_word_size = fc->size();
2059     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2060   }
2061 }
2062 
2063 // Support for concurrent collection policy decisions.
2064 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2065   // In the future we might want to add in fragmentation stats --
2066   // including erosion of the "mountain" into this decision as well.
2067   return !adaptive_freelists() && linearAllocationWouldFail();
2068 }
2069 
2070 // Support for compaction
2071 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2072   scan_and_forward(this, cp);
2073   // Prepare_for_compaction() uses the space between live objects
2074   // so that later phase can skip dead space quickly.  So verification
2075   // of the free lists doesn't work after.
2076 }
2077 
2078 void CompactibleFreeListSpace::adjust_pointers() {
2079   // In other versions of adjust_pointers(), a bail out
2080   // based on the amount of live data in the generation
2081   // (i.e., if 0, bail out) may be used.
2082   // Cannot test used() == 0 here because the free lists have already
2083   // been mangled by the compaction.
2084 
2085   scan_and_adjust_pointers(this);
2086   // See note about verification in prepare_for_compaction().
2087 }
2088 
2089 void CompactibleFreeListSpace::compact() {
2090   scan_and_compact(this);
2091 }
2092 
2093 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2094 // where fbs is free block sizes
2095 double CompactibleFreeListSpace::flsFrag() const {
2096   size_t itabFree = totalSizeInIndexedFreeLists();
2097   double frag = 0.0;
2098   size_t i;
2099 
2100   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2101     double sz  = i;
2102     frag      += _indexedFreeList[i].count() * (sz * sz);
2103   }
2104 
2105   double totFree = itabFree +
2106                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2107   if (totFree > 0) {
2108     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2109             (totFree * totFree));
2110     frag = (double)1.0  - frag;
2111   } else {
2112     assert(frag == 0.0, "Follows from totFree == 0");
2113   }
2114   return frag;
2115 }
2116 
2117 void CompactibleFreeListSpace::beginSweepFLCensus(
2118   float inter_sweep_current,
2119   float inter_sweep_estimate,
2120   float intra_sweep_estimate) {
2121   assert_locked();
2122   size_t i;
2123   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2124     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2125     if (PrintFLSStatistics > 1) {
2126       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2127     }
2128     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2129     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2130     fl->set_before_sweep(fl->count());
2131     fl->set_bfr_surp(fl->surplus());
2132   }
2133   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2134                                     inter_sweep_current,
2135                                     inter_sweep_estimate,
2136                                     intra_sweep_estimate);
2137 }
2138 
2139 void CompactibleFreeListSpace::setFLSurplus() {
2140   assert_locked();
2141   size_t i;
2142   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2143     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2144     fl->set_surplus(fl->count() -
2145                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2146   }
2147 }
2148 
2149 void CompactibleFreeListSpace::setFLHints() {
2150   assert_locked();
2151   size_t i;
2152   size_t h = IndexSetSize;
2153   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2154     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2155     fl->set_hint(h);
2156     if (fl->surplus() > 0) {
2157       h = i;
2158     }
2159   }
2160 }
2161 
2162 void CompactibleFreeListSpace::clearFLCensus() {
2163   assert_locked();
2164   size_t i;
2165   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2166     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2167     fl->set_prev_sweep(fl->count());
2168     fl->set_coal_births(0);
2169     fl->set_coal_deaths(0);
2170     fl->set_split_births(0);
2171     fl->set_split_deaths(0);
2172   }
2173 }
2174 
2175 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2176   if (PrintFLSStatistics > 0) {
2177     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2178     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2179                            p2i(largestAddr));
2180   }
2181   setFLSurplus();
2182   setFLHints();
2183   if (PrintGC && PrintFLSCensus > 0) {
2184     printFLCensus(sweep_count);
2185   }
2186   clearFLCensus();
2187   assert_locked();
2188   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2189 }
2190 
2191 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2192   if (size < SmallForDictionary) {
2193     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2194     return (fl->coal_desired() < 0) ||
2195            ((int)fl->count() > fl->coal_desired());
2196   } else {
2197     return dictionary()->coal_dict_over_populated(size);
2198   }
2199 }
2200 
2201 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2202   assert(size < SmallForDictionary, "Size too large for indexed list");
2203   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2204   fl->increment_coal_births();
2205   fl->increment_surplus();
2206 }
2207 
2208 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2209   assert(size < SmallForDictionary, "Size too large for indexed list");
2210   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2211   fl->increment_coal_deaths();
2212   fl->decrement_surplus();
2213 }
2214 
2215 void CompactibleFreeListSpace::coalBirth(size_t size) {
2216   if (size  < SmallForDictionary) {
2217     smallCoalBirth(size);
2218   } else {
2219     dictionary()->dict_census_update(size,
2220                                    false /* split */,
2221                                    true /* birth */);
2222   }
2223 }
2224 
2225 void CompactibleFreeListSpace::coalDeath(size_t size) {
2226   if(size  < SmallForDictionary) {
2227     smallCoalDeath(size);
2228   } else {
2229     dictionary()->dict_census_update(size,
2230                                    false /* split */,
2231                                    false /* birth */);
2232   }
2233 }
2234 
2235 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2236   assert(size < SmallForDictionary, "Size too large for indexed list");
2237   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2238   fl->increment_split_births();
2239   fl->increment_surplus();
2240 }
2241 
2242 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2243   assert(size < SmallForDictionary, "Size too large for indexed list");
2244   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2245   fl->increment_split_deaths();
2246   fl->decrement_surplus();
2247 }
2248 
2249 void CompactibleFreeListSpace::split_birth(size_t size) {
2250   if (size  < SmallForDictionary) {
2251     smallSplitBirth(size);
2252   } else {
2253     dictionary()->dict_census_update(size,
2254                                    true /* split */,
2255                                    true /* birth */);
2256   }
2257 }
2258 
2259 void CompactibleFreeListSpace::splitDeath(size_t size) {
2260   if (size  < SmallForDictionary) {
2261     smallSplitDeath(size);
2262   } else {
2263     dictionary()->dict_census_update(size,
2264                                    true /* split */,
2265                                    false /* birth */);
2266   }
2267 }
2268 
2269 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2270   size_t to2 = from - to1;
2271   splitDeath(from);
2272   split_birth(to1);
2273   split_birth(to2);
2274 }
2275 
2276 void CompactibleFreeListSpace::print() const {
2277   print_on(tty);
2278 }
2279 
2280 void CompactibleFreeListSpace::prepare_for_verify() {
2281   assert_locked();
2282   repairLinearAllocationBlocks();
2283   // Verify that the SpoolBlocks look like free blocks of
2284   // appropriate sizes... To be done ...
2285 }
2286 
2287 class VerifyAllBlksClosure: public BlkClosure {
2288  private:
2289   const CompactibleFreeListSpace* _sp;
2290   const MemRegion                 _span;
2291   HeapWord*                       _last_addr;
2292   size_t                          _last_size;
2293   bool                            _last_was_obj;
2294   bool                            _last_was_live;
2295 
2296  public:
2297   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2298     MemRegion span) :  _sp(sp), _span(span),
2299                        _last_addr(NULL), _last_size(0),
2300                        _last_was_obj(false), _last_was_live(false) { }
2301 
2302   virtual size_t do_blk(HeapWord* addr) {
2303     size_t res;
2304     bool   was_obj  = false;
2305     bool   was_live = false;
2306     if (_sp->block_is_obj(addr)) {
2307       was_obj = true;
2308       oop p = oop(addr);
2309       guarantee(p->is_oop(), "Should be an oop");
2310       res = _sp->adjustObjectSize(p->size());
2311       if (_sp->obj_is_alive(addr)) {
2312         was_live = true;
2313         p->verify();
2314       }
2315     } else {
2316       FreeChunk* fc = (FreeChunk*)addr;
2317       res = fc->size();
2318       if (FLSVerifyLists && !fc->cantCoalesce()) {
2319         guarantee(_sp->verify_chunk_in_free_list(fc),
2320                   "Chunk should be on a free list");
2321       }
2322     }
2323     if (res == 0) {
2324       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2325       gclog_or_tty->print_cr(
2326         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2327         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2328         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2329         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2330       _sp->print_on(gclog_or_tty);
2331       guarantee(false, "Seppuku!");
2332     }
2333     _last_addr = addr;
2334     _last_size = res;
2335     _last_was_obj  = was_obj;
2336     _last_was_live = was_live;
2337     return res;
2338   }
2339 };
2340 
2341 class VerifyAllOopsClosure: public OopClosure {
2342  private:
2343   const CMSCollector*             _collector;
2344   const CompactibleFreeListSpace* _sp;
2345   const MemRegion                 _span;
2346   const bool                      _past_remark;
2347   const CMSBitMap*                _bit_map;
2348 
2349  protected:
2350   void do_oop(void* p, oop obj) {
2351     if (_span.contains(obj)) { // the interior oop points into CMS heap
2352       if (!_span.contains(p)) { // reference from outside CMS heap
2353         // Should be a valid object; the first disjunct below allows
2354         // us to sidestep an assertion in block_is_obj() that insists
2355         // that p be in _sp. Note that several generations (and spaces)
2356         // are spanned by _span (CMS heap) above.
2357         guarantee(!_sp->is_in_reserved(obj) ||
2358                   _sp->block_is_obj((HeapWord*)obj),
2359                   "Should be an object");
2360         guarantee(obj->is_oop(), "Should be an oop");
2361         obj->verify();
2362         if (_past_remark) {
2363           // Remark has been completed, the object should be marked
2364           _bit_map->isMarked((HeapWord*)obj);
2365         }
2366       } else { // reference within CMS heap
2367         if (_past_remark) {
2368           // Remark has been completed -- so the referent should have
2369           // been marked, if referring object is.
2370           if (_bit_map->isMarked(_collector->block_start(p))) {
2371             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2372           }
2373         }
2374       }
2375     } else if (_sp->is_in_reserved(p)) {
2376       // the reference is from FLS, and points out of FLS
2377       guarantee(obj->is_oop(), "Should be an oop");
2378       obj->verify();
2379     }
2380   }
2381 
2382   template <class T> void do_oop_work(T* p) {
2383     T heap_oop = oopDesc::load_heap_oop(p);
2384     if (!oopDesc::is_null(heap_oop)) {
2385       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2386       do_oop(p, obj);
2387     }
2388   }
2389 
2390  public:
2391   VerifyAllOopsClosure(const CMSCollector* collector,
2392     const CompactibleFreeListSpace* sp, MemRegion span,
2393     bool past_remark, CMSBitMap* bit_map) :
2394     _collector(collector), _sp(sp), _span(span),
2395     _past_remark(past_remark), _bit_map(bit_map) { }
2396 
2397   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2398   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2399 };
2400 
2401 void CompactibleFreeListSpace::verify() const {
2402   assert_lock_strong(&_freelistLock);
2403   verify_objects_initialized();
2404   MemRegion span = _collector->_span;
2405   bool past_remark = (_collector->abstract_state() ==
2406                       CMSCollector::Sweeping);
2407 
2408   ResourceMark rm;
2409   HandleMark  hm;
2410 
2411   // Check integrity of CFL data structures
2412   _promoInfo.verify();
2413   _dictionary->verify();
2414   if (FLSVerifyIndexTable) {
2415     verifyIndexedFreeLists();
2416   }
2417   // Check integrity of all objects and free blocks in space
2418   {
2419     VerifyAllBlksClosure cl(this, span);
2420     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2421   }
2422   // Check that all references in the heap to FLS
2423   // are to valid objects in FLS or that references in
2424   // FLS are to valid objects elsewhere in the heap
2425   if (FLSVerifyAllHeapReferences)
2426   {
2427     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2428       _collector->markBitMap());
2429 
2430     // Iterate over all oops in the heap. Uses the _no_header version
2431     // since we are not interested in following the klass pointers.
2432     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2433   }
2434 
2435   if (VerifyObjectStartArray) {
2436     // Verify the block offset table
2437     _bt.verify();
2438   }
2439 }
2440 
2441 #ifndef PRODUCT
2442 void CompactibleFreeListSpace::verifyFreeLists() const {
2443   if (FLSVerifyLists) {
2444     _dictionary->verify();
2445     verifyIndexedFreeLists();
2446   } else {
2447     if (FLSVerifyDictionary) {
2448       _dictionary->verify();
2449     }
2450     if (FLSVerifyIndexTable) {
2451       verifyIndexedFreeLists();
2452     }
2453   }
2454 }
2455 #endif
2456 
2457 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2458   size_t i = 0;
2459   for (; i < IndexSetStart; i++) {
2460     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2461   }
2462   for (; i < IndexSetSize; i++) {
2463     verifyIndexedFreeList(i);
2464   }
2465 }
2466 
2467 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2468   FreeChunk* fc   =  _indexedFreeList[size].head();
2469   FreeChunk* tail =  _indexedFreeList[size].tail();
2470   size_t    num = _indexedFreeList[size].count();
2471   size_t      n = 0;
2472   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2473             "Slot should have been empty");
2474   for (; fc != NULL; fc = fc->next(), n++) {
2475     guarantee(fc->size() == size, "Size inconsistency");
2476     guarantee(fc->is_free(), "!free?");
2477     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2478     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2479   }
2480   guarantee(n == num, "Incorrect count");
2481 }
2482 
2483 #ifndef PRODUCT
2484 void CompactibleFreeListSpace::check_free_list_consistency() const {
2485   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2486     "Some sizes can't be allocated without recourse to"
2487     " linear allocation buffers");
2488   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2489     "else MIN_TREE_CHUNK_SIZE is wrong");
2490   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2491   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2492 }
2493 #endif
2494 
2495 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2496   assert_lock_strong(&_freelistLock);
2497   AdaptiveFreeList<FreeChunk> total;
2498   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2499   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2500   size_t total_free = 0;
2501   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2502     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2503     total_free += fl->count() * fl->size();
2504     if (i % (40*IndexSetStride) == 0) {
2505       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2506     }
2507     fl->print_on(gclog_or_tty);
2508     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2509     total.set_surplus(    total.surplus()     + fl->surplus()    );
2510     total.set_desired(    total.desired()     + fl->desired()    );
2511     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2512     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2513     total.set_count(      total.count()       + fl->count()      );
2514     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2515     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2516     total.set_split_births(total.split_births() + fl->split_births());
2517     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2518   }
2519   total.print_on(gclog_or_tty, "TOTAL");
2520   gclog_or_tty->print_cr("Total free in indexed lists "
2521                          SIZE_FORMAT " words", total_free);
2522   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2523     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2524             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2525     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2526   _dictionary->print_dict_census();
2527 }
2528 
2529 ///////////////////////////////////////////////////////////////////////////
2530 // CFLS_LAB
2531 ///////////////////////////////////////////////////////////////////////////
2532 
2533 #define VECTOR_257(x)                                                                                  \
2534   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2535   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2536      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2537      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2538      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2539      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2540      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2541      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2542      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2543      x }
2544 
2545 // Initialize with default setting for CMS, _not_
2546 // generic OldPLABSize, whose static default is different; if overridden at the
2547 // command-line, this will get reinitialized via a call to
2548 // modify_initialization() below.
2549 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2550   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2551 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2552 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2553 
2554 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2555   _cfls(cfls)
2556 {
2557   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2558   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2559        i < CompactibleFreeListSpace::IndexSetSize;
2560        i += CompactibleFreeListSpace::IndexSetStride) {
2561     _indexedFreeList[i].set_size(i);
2562     _num_blocks[i] = 0;
2563   }
2564 }
2565 
2566 static bool _CFLS_LAB_modified = false;
2567 
2568 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2569   assert(!_CFLS_LAB_modified, "Call only once");
2570   _CFLS_LAB_modified = true;
2571   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2572        i < CompactibleFreeListSpace::IndexSetSize;
2573        i += CompactibleFreeListSpace::IndexSetStride) {
2574     _blocks_to_claim[i].modify(n, wt, true /* force */);
2575   }
2576 }
2577 
2578 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2579   FreeChunk* res;
2580   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2581   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2582     // This locking manages sync with other large object allocations.
2583     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2584                     Mutex::_no_safepoint_check_flag);
2585     res = _cfls->getChunkFromDictionaryExact(word_sz);
2586     if (res == NULL) return NULL;
2587   } else {
2588     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2589     if (fl->count() == 0) {
2590       // Attempt to refill this local free list.
2591       get_from_global_pool(word_sz, fl);
2592       // If it didn't work, give up.
2593       if (fl->count() == 0) return NULL;
2594     }
2595     res = fl->get_chunk_at_head();
2596     assert(res != NULL, "Why was count non-zero?");
2597   }
2598   res->markNotFree();
2599   assert(!res->is_free(), "shouldn't be marked free");
2600   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2601   // mangle a just allocated object with a distinct pattern.
2602   debug_only(res->mangleAllocated(word_sz));
2603   return (HeapWord*)res;
2604 }
2605 
2606 // Get a chunk of blocks of the right size and update related
2607 // book-keeping stats
2608 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2609   // Get the #blocks we want to claim
2610   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2611   assert(n_blks > 0, "Error");
2612   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2613   // In some cases, when the application has a phase change,
2614   // there may be a sudden and sharp shift in the object survival
2615   // profile, and updating the counts at the end of a scavenge
2616   // may not be quick enough, giving rise to large scavenge pauses
2617   // during these phase changes. It is beneficial to detect such
2618   // changes on-the-fly during a scavenge and avoid such a phase-change
2619   // pothole. The following code is a heuristic attempt to do that.
2620   // It is protected by a product flag until we have gained
2621   // enough experience with this heuristic and fine-tuned its behavior.
2622   // WARNING: This might increase fragmentation if we overreact to
2623   // small spikes, so some kind of historical smoothing based on
2624   // previous experience with the greater reactivity might be useful.
2625   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2626   // default.
2627   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2628     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2629     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2630     n_blks = MIN2(n_blks, CMSOldPLABMax);
2631   }
2632   assert(n_blks > 0, "Error");
2633   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2634   // Update stats table entry for this block size
2635   _num_blocks[word_sz] += fl->count();
2636 }
2637 
2638 void CFLS_LAB::compute_desired_plab_size() {
2639   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2640        i < CompactibleFreeListSpace::IndexSetSize;
2641        i += CompactibleFreeListSpace::IndexSetStride) {
2642     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2643            "Counter inconsistency");
2644     if (_global_num_workers[i] > 0) {
2645       // Need to smooth wrt historical average
2646       if (ResizeOldPLAB) {
2647         _blocks_to_claim[i].sample(
2648           MAX2(CMSOldPLABMin,
2649           MIN2(CMSOldPLABMax,
2650                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2651       }
2652       // Reset counters for next round
2653       _global_num_workers[i] = 0;
2654       _global_num_blocks[i] = 0;
2655       if (PrintOldPLAB) {
2656         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2657                                i, (size_t)_blocks_to_claim[i].average());
2658       }
2659     }
2660   }
2661 }
2662 
2663 // If this is changed in the future to allow parallel
2664 // access, one would need to take the FL locks and,
2665 // depending on how it is used, stagger access from
2666 // parallel threads to reduce contention.
2667 void CFLS_LAB::retire(int tid) {
2668   // We run this single threaded with the world stopped;
2669   // so no need for locks and such.
2670   NOT_PRODUCT(Thread* t = Thread::current();)
2671   assert(Thread::current()->is_VM_thread(), "Error");
2672   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2673        i < CompactibleFreeListSpace::IndexSetSize;
2674        i += CompactibleFreeListSpace::IndexSetStride) {
2675     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2676            "Can't retire more than what we obtained");
2677     if (_num_blocks[i] > 0) {
2678       size_t num_retire =  _indexedFreeList[i].count();
2679       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2680       {
2681         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2682         //                Mutex::_no_safepoint_check_flag);
2683 
2684         // Update globals stats for num_blocks used
2685         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2686         _global_num_workers[i]++;
2687         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2688         if (num_retire > 0) {
2689           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2690           // Reset this list.
2691           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2692           _indexedFreeList[i].set_size(i);
2693         }
2694       }
2695       if (PrintOldPLAB) {
2696         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2697                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2698       }
2699       // Reset stats for next round
2700       _num_blocks[i]         = 0;
2701     }
2702   }
2703 }
2704 
2705 // Used by par_get_chunk_of_blocks() for the chunks from the
2706 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2707 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2708 // to the free list "fl".  "n" is the maximum number of chunks to
2709 // be added to "fl".
2710 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2711 
2712   // We'll try all multiples of word_sz in the indexed set, starting with
2713   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2714   // then try getting a big chunk and splitting it.
2715   {
2716     bool found;
2717     int  k;
2718     size_t cur_sz;
2719     for (k = 1, cur_sz = k * word_sz, found = false;
2720          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2721          (CMSSplitIndexedFreeListBlocks || k <= 1);
2722          k++, cur_sz = k * word_sz) {
2723       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2724       fl_for_cur_sz.set_size(cur_sz);
2725       {
2726         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2727                         Mutex::_no_safepoint_check_flag);
2728         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2729         if (gfl->count() != 0) {
2730           // nn is the number of chunks of size cur_sz that
2731           // we'd need to split k-ways each, in order to create
2732           // "n" chunks of size word_sz each.
2733           const size_t nn = MAX2(n/k, (size_t)1);
2734           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2735           found = true;
2736           if (k > 1) {
2737             // Update split death stats for the cur_sz-size blocks list:
2738             // we increment the split death count by the number of blocks
2739             // we just took from the cur_sz-size blocks list and which
2740             // we will be splitting below.
2741             ssize_t deaths = gfl->split_deaths() +
2742                              fl_for_cur_sz.count();
2743             gfl->set_split_deaths(deaths);
2744           }
2745         }
2746       }
2747       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2748       if (found) {
2749         if (k == 1) {
2750           fl->prepend(&fl_for_cur_sz);
2751         } else {
2752           // Divide each block on fl_for_cur_sz up k ways.
2753           FreeChunk* fc;
2754           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2755             // Must do this in reverse order, so that anybody attempting to
2756             // access the main chunk sees it as a single free block until we
2757             // change it.
2758             size_t fc_size = fc->size();
2759             assert(fc->is_free(), "Error");
2760             for (int i = k-1; i >= 0; i--) {
2761               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2762               assert((i != 0) ||
2763                         ((fc == ffc) && ffc->is_free() &&
2764                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2765                         "Counting error");
2766               ffc->set_size(word_sz);
2767               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2768               ffc->link_next(NULL);
2769               // Above must occur before BOT is updated below.
2770               OrderAccess::storestore();
2771               // splitting from the right, fc_size == i * word_sz
2772               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2773               fc_size -= word_sz;
2774               assert(fc_size == i*word_sz, "Error");
2775               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2776               _bt.verify_single_block((HeapWord*)fc, fc_size);
2777               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2778               // Push this on "fl".
2779               fl->return_chunk_at_head(ffc);
2780             }
2781             // TRAP
2782             assert(fl->tail()->next() == NULL, "List invariant.");
2783           }
2784         }
2785         // Update birth stats for this block size.
2786         size_t num = fl->count();
2787         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2788                         Mutex::_no_safepoint_check_flag);
2789         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2790         _indexedFreeList[word_sz].set_split_births(births);
2791         return true;
2792       }
2793     }
2794     return found;
2795   }
2796 }
2797 
2798 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2799 
2800   FreeChunk* fc = NULL;
2801   FreeChunk* rem_fc = NULL;
2802   size_t rem;
2803   {
2804     MutexLockerEx x(parDictionaryAllocLock(),
2805                     Mutex::_no_safepoint_check_flag);
2806     while (n > 0) {
2807       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2808                                   FreeBlockDictionary<FreeChunk>::atLeast);
2809       if (fc != NULL) {
2810         break;
2811       } else {
2812         n--;
2813       }
2814     }
2815     if (fc == NULL) return NULL;
2816     // Otherwise, split up that block.
2817     assert((ssize_t)n >= 1, "Control point invariant");
2818     assert(fc->is_free(), "Error: should be a free block");
2819     _bt.verify_single_block((HeapWord*)fc, fc->size());
2820     const size_t nn = fc->size() / word_sz;
2821     n = MIN2(nn, n);
2822     assert((ssize_t)n >= 1, "Control point invariant");
2823     rem = fc->size() - n * word_sz;
2824     // If there is a remainder, and it's too small, allocate one fewer.
2825     if (rem > 0 && rem < MinChunkSize) {
2826       n--; rem += word_sz;
2827     }
2828     // Note that at this point we may have n == 0.
2829     assert((ssize_t)n >= 0, "Control point invariant");
2830 
2831     // If n is 0, the chunk fc that was found is not large
2832     // enough to leave a viable remainder.  We are unable to
2833     // allocate even one block.  Return fc to the
2834     // dictionary and return, leaving "fl" empty.
2835     if (n == 0) {
2836       returnChunkToDictionary(fc);
2837       return NULL;
2838     }
2839 
2840     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2841     dictionary()->dict_census_update(fc->size(),
2842                                      true /*split*/,
2843                                      false /*birth*/);
2844 
2845     // First return the remainder, if any.
2846     // Note that we hold the lock until we decide if we're going to give
2847     // back the remainder to the dictionary, since a concurrent allocation
2848     // may otherwise see the heap as empty.  (We're willing to take that
2849     // hit if the block is a small block.)
2850     if (rem > 0) {
2851       size_t prefix_size = n * word_sz;
2852       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2853       rem_fc->set_size(rem);
2854       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2855       rem_fc->link_next(NULL);
2856       // Above must occur before BOT is updated below.
2857       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2858       OrderAccess::storestore();
2859       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2860       assert(fc->is_free(), "Error");
2861       fc->set_size(prefix_size);
2862       if (rem >= IndexSetSize) {
2863         returnChunkToDictionary(rem_fc);
2864         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2865         rem_fc = NULL;
2866       }
2867       // Otherwise, return it to the small list below.
2868     }
2869   }
2870   if (rem_fc != NULL) {
2871     MutexLockerEx x(_indexedFreeListParLocks[rem],
2872                     Mutex::_no_safepoint_check_flag);
2873     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2874     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2875     smallSplitBirth(rem);
2876   }
2877   assert(n * word_sz == fc->size(),
2878          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2879          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2880          fc->size(), n, word_sz);
2881   return fc;
2882 }
2883 
2884 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2885 
2886   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2887 
2888   if (fc == NULL) {
2889     return;
2890   }
2891 
2892   size_t n = fc->size() / word_sz;
2893 
2894   assert((ssize_t)n > 0, "Consistency");
2895   // Now do the splitting up.
2896   // Must do this in reverse order, so that anybody attempting to
2897   // access the main chunk sees it as a single free block until we
2898   // change it.
2899   size_t fc_size = n * word_sz;
2900   // All but first chunk in this loop
2901   for (ssize_t i = n-1; i > 0; i--) {
2902     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2903     ffc->set_size(word_sz);
2904     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2905     ffc->link_next(NULL);
2906     // Above must occur before BOT is updated below.
2907     OrderAccess::storestore();
2908     // splitting from the right, fc_size == (n - i + 1) * wordsize
2909     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2910     fc_size -= word_sz;
2911     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2912     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2913     _bt.verify_single_block((HeapWord*)fc, fc_size);
2914     // Push this on "fl".
2915     fl->return_chunk_at_head(ffc);
2916   }
2917   // First chunk
2918   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2919   // The blocks above should show their new sizes before the first block below
2920   fc->set_size(word_sz);
2921   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2922   fc->link_next(NULL);
2923   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2924   _bt.verify_single_block((HeapWord*)fc, fc->size());
2925   fl->return_chunk_at_head(fc);
2926 
2927   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2928   {
2929     // Update the stats for this block size.
2930     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2931                     Mutex::_no_safepoint_check_flag);
2932     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2933     _indexedFreeList[word_sz].set_split_births(births);
2934     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2935     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2936   }
2937 
2938   // TRAP
2939   assert(fl->tail()->next() == NULL, "List invariant.");
2940 }
2941 
2942 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2943   assert(fl->count() == 0, "Precondition.");
2944   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2945          "Precondition");
2946 
2947   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2948     // Got it
2949     return;
2950   }
2951 
2952   // Otherwise, we'll split a block from the dictionary.
2953   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2954 }
2955 
2956 // Set up the space's par_seq_tasks structure for work claiming
2957 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2958 // XXX Need to suitably abstract and generalize this and the next
2959 // method into one.
2960 void
2961 CompactibleFreeListSpace::
2962 initialize_sequential_subtasks_for_rescan(int n_threads) {
2963   // The "size" of each task is fixed according to rescan_task_size.
2964   assert(n_threads > 0, "Unexpected n_threads argument");
2965   const size_t task_size = rescan_task_size();
2966   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2967   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2968   assert(n_tasks == 0 ||
2969          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2970           (used_region().start() + n_tasks*task_size >= used_region().end())),
2971          "n_tasks calculation incorrect");
2972   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2973   assert(!pst->valid(), "Clobbering existing data?");
2974   // Sets the condition for completion of the subtask (how many threads
2975   // need to finish in order to be done).
2976   pst->set_n_threads(n_threads);
2977   pst->set_n_tasks((int)n_tasks);
2978 }
2979 
2980 // Set up the space's par_seq_tasks structure for work claiming
2981 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2982 void
2983 CompactibleFreeListSpace::
2984 initialize_sequential_subtasks_for_marking(int n_threads,
2985                                            HeapWord* low) {
2986   // The "size" of each task is fixed according to rescan_task_size.
2987   assert(n_threads > 0, "Unexpected n_threads argument");
2988   const size_t task_size = marking_task_size();
2989   assert(task_size > CardTableModRefBS::card_size_in_words &&
2990          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2991          "Otherwise arithmetic below would be incorrect");
2992   MemRegion span = _old_gen->reserved();
2993   if (low != NULL) {
2994     if (span.contains(low)) {
2995       // Align low down to  a card boundary so that
2996       // we can use block_offset_careful() on span boundaries.
2997       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2998                                  CardTableModRefBS::card_size);
2999       // Clip span prefix at aligned_low
3000       span = span.intersection(MemRegion(aligned_low, span.end()));
3001     } else if (low > span.end()) {
3002       span = MemRegion(low, low);  // Null region
3003     } // else use entire span
3004   }
3005   assert(span.is_empty() ||
3006          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3007         "span should start at a card boundary");
3008   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3009   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3010   assert(n_tasks == 0 ||
3011          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3012           (span.start() + n_tasks*task_size >= span.end())),
3013          "n_tasks calculation incorrect");
3014   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3015   assert(!pst->valid(), "Clobbering existing data?");
3016   // Sets the condition for completion of the subtask (how many threads
3017   // need to finish in order to be done).
3018   pst->set_n_threads(n_threads);
3019   pst->set_n_tasks((int)n_tasks);
3020 }