1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/genCollectedHeap.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/space.inline.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  77   MemRegion mr, bool use_adaptive_freelists,
  78   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  79   _dictionaryChoice(dictionaryChoice),
  80   _adaptive_freelists(use_adaptive_freelists),
  81   _bt(bs, mr),
  82   // free list locks are in the range of values taken by _lockRank
  83   // This range currently is [_leaf+2, _leaf+3]
  84   // Note: this requires that CFLspace c'tors
  85   // are called serially in the order in which the locks are
  86   // are acquired in the program text. This is true today.
  87   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  88                 Monitor::_safepoint_check_sometimes),
  89   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  90                           "CompactibleFreeListSpace._dict_par_lock", true,
  91                           Monitor::_safepoint_check_never),
  92   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  93                     CMSRescanMultiple),
  94   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  95                     CMSConcMarkMultiple),
  96   _collector(NULL),
  97   _preconsumptionDirtyCardClosure(NULL)
  98 {
  99   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 100          "FreeChunk is larger than expected");
 101   _bt.set_space(this);
 102   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 103   // We have all of "mr", all of which we place in the dictionary
 104   // as one big chunk. We'll need to decide here which of several
 105   // possible alternative dictionary implementations to use. For
 106   // now the choice is easy, since we have only one working
 107   // implementation, namely, the simple binary tree (splaying
 108   // temporarily disabled).
 109   switch (dictionaryChoice) {
 110     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 111       _dictionary = new AFLBinaryTreeDictionary(mr);
 112       break;
 113     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 114     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 115     default:
 116       warning("dictionaryChoice: selected option not understood; using"
 117               " default BinaryTreeDictionary implementation instead.");
 118   }
 119   assert(_dictionary != NULL, "CMS dictionary initialization");
 120   // The indexed free lists are initially all empty and are lazily
 121   // filled in on demand. Initialize the array elements to NULL.
 122   initializeIndexedFreeListArray();
 123 
 124   // Not using adaptive free lists assumes that allocation is first
 125   // from the linAB's.  Also a cms perm gen which can be compacted
 126   // has to have the klass's klassKlass allocated at a lower
 127   // address in the heap than the klass so that the klassKlass is
 128   // moved to its new location before the klass is moved.
 129   // Set the _refillSize for the linear allocation blocks
 130   if (!use_adaptive_freelists) {
 131     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 132                                            FreeBlockDictionary<FreeChunk>::atLeast);
 133     // The small linAB initially has all the space and will allocate
 134     // a chunk of any size.
 135     HeapWord* addr = (HeapWord*) fc;
 136     _smallLinearAllocBlock.set(addr, fc->size() ,
 137       1024*SmallForLinearAlloc, fc->size());
 138     // Note that _unallocated_block is not updated here.
 139     // Allocations from the linear allocation block should
 140     // update it.
 141   } else {
 142     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 143                                SmallForLinearAlloc);
 144   }
 145   // CMSIndexedFreeListReplenish should be at least 1
 146   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 147   _promoInfo.setSpace(this);
 148   if (UseCMSBestFit) {
 149     _fitStrategy = FreeBlockBestFitFirst;
 150   } else {
 151     _fitStrategy = FreeBlockStrategyNone;
 152   }
 153   check_free_list_consistency();
 154 
 155   // Initialize locks for parallel case.
 156   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 157     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 158                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 159     DEBUG_ONLY(
 160       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 161     )
 162   }
 163   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->young_gen();
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288     coalBirth(mr.word_size());
 289   }
 290   _promoInfo.reset();
 291   _smallLinearAllocBlock._ptr = NULL;
 292   _smallLinearAllocBlock._word_size = 0;
 293 }
 294 
 295 void CompactibleFreeListSpace::reset_after_compaction() {
 296   // Reset the space to the new reality - one free chunk.
 297   MemRegion mr(compaction_top(), end());
 298   reset(mr);
 299   // Now refill the linear allocation block(s) if possible.
 300   if (_adaptive_freelists) {
 301     refillLinearAllocBlocksIfNeeded();
 302   } else {
 303     // Place as much of mr in the linAB as we can get,
 304     // provided it was big enough to go into the dictionary.
 305     FreeChunk* fc = dictionary()->find_largest_dict();
 306     if (fc != NULL) {
 307       assert(fc->size() == mr.word_size(),
 308              "Why was the chunk broken up?");
 309       removeChunkFromDictionary(fc);
 310       HeapWord* addr = (HeapWord*) fc;
 311       _smallLinearAllocBlock.set(addr, fc->size() ,
 312         1024*SmallForLinearAlloc, fc->size());
 313       // Note that _unallocated_block is not updated here.
 314     }
 315   }
 316 }
 317 
 318 // Walks the entire dictionary, returning a coterminal
 319 // chunk, if it exists. Use with caution since it involves
 320 // a potentially complete walk of a potentially large tree.
 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 322 
 323   assert_lock_strong(&_freelistLock);
 324 
 325   return dictionary()->find_chunk_ends_at(end());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 333   }
 334 }
 335 
 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 337   size_t sum = 0;
 338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 340   }
 341   return sum;
 342 }
 343 
 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 345   size_t count = 0;
 346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 347     debug_only(
 348       ssize_t total_list_count = 0;
 349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 350          fc = fc->next()) {
 351         total_list_count++;
 352       }
 353       assert(total_list_count ==  _indexedFreeList[i].count(),
 354         "Count in list is incorrect");
 355     )
 356     count += _indexedFreeList[i].count();
 357   }
 358   return count;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::totalCount() {
 362   size_t num = totalCountInIndexedFreeLists();
 363   num +=  dictionary()->total_count();
 364   if (_smallLinearAllocBlock._word_size != 0) {
 365     num++;
 366   }
 367   return num;
 368 }
 369 #endif
 370 
 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 372   FreeChunk* fc = (FreeChunk*) p;
 373   return fc->is_free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::used() const {
 377   return capacity() - free();
 378 }
 379 
 380 size_t CompactibleFreeListSpace::free() const {
 381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 382   // if you do this while the structures are in flux you
 383   // may get an approximate answer only; for instance
 384   // because there is concurrent allocation either
 385   // directly by mutators or for promotion during a GC.
 386   // It's "MT-safe", however, in the sense that you are guaranteed
 387   // not to crash and burn, for instance, because of walking
 388   // pointers that could disappear as you were walking them.
 389   // The approximation is because the various components
 390   // that are read below are not read atomically (and
 391   // further the computation of totalSizeInIndexedFreeLists()
 392   // is itself a non-atomic computation. The normal use of
 393   // this is during a resize operation at the end of GC
 394   // and at that time you are guaranteed to get the
 395   // correct actual value. However, for instance, this is
 396   // also read completely asynchronously by the "perf-sampler"
 397   // that supports jvmstat, and you are apt to see the values
 398   // flicker in such cases.
 399   assert(_dictionary != NULL, "No _dictionary?");
 400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 401           totalSizeInIndexedFreeLists() +
 402           _smallLinearAllocBlock._word_size) * HeapWordSize;
 403 }
 404 
 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 406   assert(_dictionary != NULL, "No _dictionary?");
 407   assert_locked();
 408   size_t res = _dictionary->max_chunk_size();
 409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 410                        (size_t) SmallForLinearAlloc - 1));
 411   // XXX the following could potentially be pretty slow;
 412   // should one, pessimistically for the rare cases when res
 413   // calculated above is less than IndexSetSize,
 414   // just return res calculated above? My reasoning was that
 415   // those cases will be so rare that the extra time spent doesn't
 416   // really matter....
 417   // Note: do not change the loop test i >= res + IndexSetStride
 418   // to i > res below, because i is unsigned and res may be zero.
 419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 420        i -= IndexSetStride) {
 421     if (_indexedFreeList[i].head() != NULL) {
 422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 423       return i;
 424     }
 425   }
 426   return res;
 427 }
 428 
 429 void LinearAllocBlock::print_on(outputStream* st) const {
 430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 433 }
 434 
 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 436   st->print_cr("COMPACTIBLE FREELIST SPACE");
 437   st->print_cr(" Space:");
 438   Space::print_on(st);
 439 
 440   st->print_cr("promoInfo:");
 441   _promoInfo.print_on(st);
 442 
 443   st->print_cr("_smallLinearAllocBlock");
 444   _smallLinearAllocBlock.print_on(st);
 445 
 446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 447 
 448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 450 }
 451 
 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 453 const {
 454   reportIndexedFreeListStatistics();
 455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 456   gclog_or_tty->print_cr("---------------------------");
 457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 459     _indexedFreeList[i].print_on(gclog_or_tty);
 460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 461          fc = fc->next()) {
 462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 463                           p2i(fc), p2i((HeapWord*)fc + i),
 464                           fc->cantCoalesce() ? "\t CC" : "");
 465     }
 466   }
 467 }
 468 
 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 470 const {
 471   _promoInfo.print_on(st);
 472 }
 473 
 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 475 const {
 476   _dictionary->report_statistics();
 477   st->print_cr("Layout of Freelists in Tree");
 478   st->print_cr("---------------------------");
 479   _dictionary->print_free_lists(st);
 480 }
 481 
 482 class BlkPrintingClosure: public BlkClosure {
 483   const CMSCollector*             _collector;
 484   const CompactibleFreeListSpace* _sp;
 485   const CMSBitMap*                _live_bit_map;
 486   const bool                      _post_remark;
 487   outputStream*                   _st;
 488 public:
 489   BlkPrintingClosure(const CMSCollector* collector,
 490                      const CompactibleFreeListSpace* sp,
 491                      const CMSBitMap* live_bit_map,
 492                      outputStream* st):
 493     _collector(collector),
 494     _sp(sp),
 495     _live_bit_map(live_bit_map),
 496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 497     _st(st) { }
 498   size_t do_blk(HeapWord* addr);
 499 };
 500 
 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 502   size_t sz = _sp->block_size_no_stall(addr, _collector);
 503   assert(sz != 0, "Should always be able to compute a size");
 504   if (_sp->block_is_obj(addr)) {
 505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 507       p2i(addr),
 508       dead ? "dead" : "live",
 509       sz,
 510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 511     if (CMSPrintObjectsInDump && !dead) {
 512       oop(addr)->print_on(_st);
 513       _st->print_cr("--------------------------------------");
 514     }
 515   } else { // free block
 516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 518     if (CMSPrintChunksInDump) {
 519       ((FreeChunk*)addr)->print_on(_st);
 520       _st->print_cr("--------------------------------------");
 521     }
 522   }
 523   return sz;
 524 }
 525 
 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 527   outputStream* st) {
 528   st->print_cr("\n=========================");
 529   st->print_cr("Block layout in CMS Heap:");
 530   st->print_cr("=========================");
 531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 532   blk_iterate(&bpcl);
 533 
 534   st->print_cr("\n=======================================");
 535   st->print_cr("Order & Layout of Promotion Info Blocks");
 536   st->print_cr("=======================================");
 537   print_promo_info_blocks(st);
 538 
 539   st->print_cr("\n===========================");
 540   st->print_cr("Order of Indexed Free Lists");
 541   st->print_cr("=========================");
 542   print_indexed_free_lists(st);
 543 
 544   st->print_cr("\n=================================");
 545   st->print_cr("Order of Free Lists in Dictionary");
 546   st->print_cr("=================================");
 547   print_dictionary_free_lists(st);
 548 }
 549 
 550 
 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 552   assert_lock_strong(&_freelistLock);
 553   assert(PrintFLSStatistics != 0, "Reporting error");
 554   _dictionary->report_statistics();
 555   if (PrintFLSStatistics > 1) {
 556     reportIndexedFreeListStatistics();
 557     size_t total_size = totalSizeInIndexedFreeLists() +
 558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 560   }
 561 }
 562 
 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 564   assert_lock_strong(&_freelistLock);
 565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 566                       "--------------------------------\n");
 567   size_t total_size = totalSizeInIndexedFreeLists();
 568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 572   if (free_blocks != 0) {
 573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 574   }
 575 }
 576 
 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 578   size_t res = 0;
 579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 580     debug_only(
 581       ssize_t recount = 0;
 582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 583          fc = fc->next()) {
 584         recount += 1;
 585       }
 586       assert(recount == _indexedFreeList[i].count(),
 587         "Incorrect count in list");
 588     )
 589     res += _indexedFreeList[i].count();
 590   }
 591   return res;
 592 }
 593 
 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 596     if (_indexedFreeList[i].head() != NULL) {
 597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 598       return (size_t)i;
 599     }
 600   }
 601   return 0;
 602 }
 603 
 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 605   HeapWord* prevEnd = end();
 606   assert(prevEnd != value, "unnecessary set_end call");
 607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 608         "New end is below unallocated block");
 609   _end = value;
 610   if (prevEnd != NULL) {
 611     // Resize the underlying block offset table.
 612     _bt.resize(pointer_delta(value, bottom()));
 613     if (value <= prevEnd) {
 614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 615              "New end is below unallocated block");
 616     } else {
 617       // Now, take this new chunk and add it to the free blocks.
 618       // Note that the BOT has not yet been updated for this block.
 619       size_t newFcSize = pointer_delta(value, prevEnd);
 620       // XXX This is REALLY UGLY and should be fixed up. XXX
 621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 622         // Mark the boundary of the new block in BOT
 623         _bt.mark_block(prevEnd, value);
 624         // put it all in the linAB
 625         MutexLockerEx x(parDictionaryAllocLock(),
 626                         Mutex::_no_safepoint_check_flag);
 627         _smallLinearAllocBlock._ptr = prevEnd;
 628         _smallLinearAllocBlock._word_size = newFcSize;
 629         repairLinearAllocBlock(&_smallLinearAllocBlock);
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;
 644 protected:
 645   // Override.
 646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 648                                        HeapWord* bottom, HeapWord* top, \
 649                                        ClosureType* cl);                \
 650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl);                \
 653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl)
 656   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 657   walk_mem_region_with_cl_DECL(FilteringClosure);
 658 
 659 public:
 660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 661                       CMSCollector* collector,
 662                       ExtendedOopClosure* cl,
 663                       CardTableModRefBS::PrecisionStyle precision,
 664                       HeapWord* boundary) :
 665     Filtering_DCTOC(sp, cl, precision, boundary),
 666     _cfls(sp), _collector(collector) {}
 667 };
 668 
 669 // We de-virtualize the block-related calls below, since we know that our
 670 // space is a CompactibleFreeListSpace.
 671 
 672 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 673 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 674                                                  HeapWord* bottom,              \
 675                                                  HeapWord* top,                 \
 676                                                  ClosureType* cl) {             \
 677    bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
 678    if (is_par) {                                                                \
 679      assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
 680             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
 681      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 682    } else {                                                                     \
 683      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 684    }                                                                            \
 685 }                                                                               \
 686 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 687                                                       HeapWord* bottom,         \
 688                                                       HeapWord* top,            \
 689                                                       ClosureType* cl) {        \
 690   /* Skip parts that are before "mr", in case "block_start" sent us             \
 691      back too far. */                                                           \
 692   HeapWord* mr_start = mr.start();                                              \
 693   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 694   HeapWord* next = bottom + bot_size;                                           \
 695   while (next < mr_start) {                                                     \
 696     bottom = next;                                                              \
 697     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 698     next = bottom + bot_size;                                                   \
 699   }                                                                             \
 700                                                                                 \
 701   while (bottom < top) {                                                        \
 702     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 703         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 704                     oop(bottom)) &&                                             \
 705         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 706       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 707       bottom += _cfls->adjustObjectSize(word_sz);                               \
 708     } else {                                                                    \
 709       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 710     }                                                                           \
 711   }                                                                             \
 712 }                                                                               \
 713 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 714                                                         HeapWord* bottom,       \
 715                                                         HeapWord* top,          \
 716                                                         ClosureType* cl) {      \
 717   /* Skip parts that are before "mr", in case "block_start" sent us             \
 718      back too far. */                                                           \
 719   HeapWord* mr_start = mr.start();                                              \
 720   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 721   HeapWord* next = bottom + bot_size;                                           \
 722   while (next < mr_start) {                                                     \
 723     bottom = next;                                                              \
 724     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 725     next = bottom + bot_size;                                                   \
 726   }                                                                             \
 727                                                                                 \
 728   while (bottom < top) {                                                        \
 729     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 730         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 731                     oop(bottom)) &&                                             \
 732         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 733       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 734       bottom += _cfls->adjustObjectSize(word_sz);                               \
 735     } else {                                                                    \
 736       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 737     }                                                                           \
 738   }                                                                             \
 739 }
 740 
 741 // (There are only two of these, rather than N, because the split is due
 742 // only to the introduction of the FilteringClosure, a local part of the
 743 // impl of this abstraction.)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 745 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 746 
 747 DirtyCardToOopClosure*
 748 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 749                                       CardTableModRefBS::PrecisionStyle precision,
 750                                       HeapWord* boundary) {
 751   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 752 }
 753 
 754 
 755 // Note on locking for the space iteration functions:
 756 // since the collector's iteration activities are concurrent with
 757 // allocation activities by mutators, absent a suitable mutual exclusion
 758 // mechanism the iterators may go awry. For instance a block being iterated
 759 // may suddenly be allocated or divided up and part of it allocated and
 760 // so on.
 761 
 762 // Apply the given closure to each block in the space.
 763 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 764   assert_lock_strong(freelistLock());
 765   HeapWord *cur, *limit;
 766   for (cur = bottom(), limit = end(); cur < limit;
 767        cur += cl->do_blk_careful(cur));
 768 }
 769 
 770 // Apply the given closure to each block in the space.
 771 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 772   assert_lock_strong(freelistLock());
 773   HeapWord *cur, *limit;
 774   for (cur = bottom(), limit = end(); cur < limit;
 775        cur += cl->do_blk(cur));
 776 }
 777 
 778 // Apply the given closure to each oop in the space.
 779 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 780   assert_lock_strong(freelistLock());
 781   HeapWord *cur, *limit;
 782   size_t curSize;
 783   for (cur = bottom(), limit = end(); cur < limit;
 784        cur += curSize) {
 785     curSize = block_size(cur);
 786     if (block_is_obj(cur)) {
 787       oop(cur)->oop_iterate(cl);
 788     }
 789   }
 790 }
 791 
 792 // NOTE: In the following methods, in order to safely be able to
 793 // apply the closure to an object, we need to be sure that the
 794 // object has been initialized. We are guaranteed that an object
 795 // is initialized if we are holding the Heap_lock with the
 796 // world stopped.
 797 void CompactibleFreeListSpace::verify_objects_initialized() const {
 798   if (is_init_completed()) {
 799     assert_locked_or_safepoint(Heap_lock);
 800     if (Universe::is_fully_initialized()) {
 801       guarantee(SafepointSynchronize::is_at_safepoint(),
 802                 "Required for objects to be initialized");
 803     }
 804   } // else make a concession at vm start-up
 805 }
 806 
 807 // Apply the given closure to each object in the space
 808 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 809   assert_lock_strong(freelistLock());
 810   NOT_PRODUCT(verify_objects_initialized());
 811   HeapWord *cur, *limit;
 812   size_t curSize;
 813   for (cur = bottom(), limit = end(); cur < limit;
 814        cur += curSize) {
 815     curSize = block_size(cur);
 816     if (block_is_obj(cur)) {
 817       blk->do_object(oop(cur));
 818     }
 819   }
 820 }
 821 
 822 // Apply the given closure to each live object in the space
 823 //   The usage of CompactibleFreeListSpace
 824 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 825 // objects in the space with references to objects that are no longer
 826 // valid.  For example, an object may reference another object
 827 // that has already been sweep up (collected).  This method uses
 828 // obj_is_alive() to determine whether it is safe to apply the closure to
 829 // an object.  See obj_is_alive() for details on how liveness of an
 830 // object is decided.
 831 
 832 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 833   assert_lock_strong(freelistLock());
 834   NOT_PRODUCT(verify_objects_initialized());
 835   HeapWord *cur, *limit;
 836   size_t curSize;
 837   for (cur = bottom(), limit = end(); cur < limit;
 838        cur += curSize) {
 839     curSize = block_size(cur);
 840     if (block_is_obj(cur) && obj_is_alive(cur)) {
 841       blk->do_object(oop(cur));
 842     }
 843   }
 844 }
 845 
 846 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 847                                                   UpwardsObjectClosure* cl) {
 848   assert_locked(freelistLock());
 849   NOT_PRODUCT(verify_objects_initialized());
 850   assert(!mr.is_empty(), "Should be non-empty");
 851   // We use MemRegion(bottom(), end()) rather than used_region() below
 852   // because the two are not necessarily equal for some kinds of
 853   // spaces, in particular, certain kinds of free list spaces.
 854   // We could use the more complicated but more precise:
 855   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 856   // but the slight imprecision seems acceptable in the assertion check.
 857   assert(MemRegion(bottom(), end()).contains(mr),
 858          "Should be within used space");
 859   HeapWord* prev = cl->previous();   // max address from last time
 860   if (prev >= mr.end()) { // nothing to do
 861     return;
 862   }
 863   // This assert will not work when we go from cms space to perm
 864   // space, and use same closure. Easy fix deferred for later. XXX YSR
 865   // assert(prev == NULL || contains(prev), "Should be within space");
 866 
 867   bool last_was_obj_array = false;
 868   HeapWord *blk_start_addr, *region_start_addr;
 869   if (prev > mr.start()) {
 870     region_start_addr = prev;
 871     blk_start_addr    = prev;
 872     // The previous invocation may have pushed "prev" beyond the
 873     // last allocated block yet there may be still be blocks
 874     // in this region due to a particular coalescing policy.
 875     // Relax the assertion so that the case where the unallocated
 876     // block is maintained and "prev" is beyond the unallocated
 877     // block does not cause the assertion to fire.
 878     assert((BlockOffsetArrayUseUnallocatedBlock &&
 879             (!is_in(prev))) ||
 880            (blk_start_addr == block_start(region_start_addr)), "invariant");
 881   } else {
 882     region_start_addr = mr.start();
 883     blk_start_addr    = block_start(region_start_addr);
 884   }
 885   HeapWord* region_end_addr = mr.end();
 886   MemRegion derived_mr(region_start_addr, region_end_addr);
 887   while (blk_start_addr < region_end_addr) {
 888     const size_t size = block_size(blk_start_addr);
 889     if (block_is_obj(blk_start_addr)) {
 890       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 891     } else {
 892       last_was_obj_array = false;
 893     }
 894     blk_start_addr += size;
 895   }
 896   if (!last_was_obj_array) {
 897     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 898            "Should be within (closed) used space");
 899     assert(blk_start_addr > prev, "Invariant");
 900     cl->set_previous(blk_start_addr); // min address for next time
 901   }
 902 }
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 912   ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   // Can't use used_region() below because it may not necessarily
 915   // be the same as [bottom(),end()); although we could
 916   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 917   // that appears too cumbersome, so we just do the simpler check
 918   // in the assertion below.
 919   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 920          "mr should be non-empty and within used space");
 921   HeapWord *addr, *end;
 922   size_t size;
 923   for (addr = block_start_careful(mr.start()), end  = mr.end();
 924        addr < end; addr += size) {
 925     FreeChunk* fc = (FreeChunk*)addr;
 926     if (fc->is_free()) {
 927       // Since we hold the free list lock, which protects direct
 928       // allocation in this generation by mutators, a free object
 929       // will remain free throughout this iteration code.
 930       size = fc->size();
 931     } else {
 932       // Note that the object need not necessarily be initialized,
 933       // because (for instance) the free list lock does NOT protect
 934       // object initialization. The closure application below must
 935       // therefore be correct in the face of uninitialized objects.
 936       size = cl->do_object_careful_m(oop(addr), mr);
 937       if (size == 0) {
 938         // An unparsable object found. Signal early termination.
 939         return addr;
 940       }
 941     }
 942   }
 943   return NULL;
 944 }
 945 
 946 
 947 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 948   NOT_PRODUCT(verify_objects_initialized());
 949   return _bt.block_start(p);
 950 }
 951 
 952 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 953   return _bt.block_start_careful(p);
 954 }
 955 
 956 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 957   NOT_PRODUCT(verify_objects_initialized());
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   while (true) {
 962     // We must do this until we get a consistent view of the object.
 963     if (FreeChunk::indicatesFreeChunk(p)) {
 964       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 965       size_t res = fc->size();
 966 
 967       // Bugfix for systems with weak memory model (PPC64/IA64). The
 968       // block's free bit was set and we have read the size of the
 969       // block. Acquire and check the free bit again. If the block is
 970       // still free, the read size is correct.
 971       OrderAccess::acquire();
 972 
 973       // If the object is still a free chunk, return the size, else it
 974       // has been allocated so try again.
 975       if (FreeChunk::indicatesFreeChunk(p)) {
 976         assert(res != 0, "Block size should not be 0");
 977         return res;
 978       }
 979     } else {
 980       // must read from what 'p' points to in each loop.
 981       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       }
 997     }
 998   }
 999 }
1000 
1001 // TODO: Now that is_parsable is gone, we should combine these two functions.
1002 // A variant of the above that uses the Printezis bits for
1003 // unparsable but allocated objects. This avoids any possible
1004 // stalls waiting for mutators to initialize objects, and is
1005 // thus potentially faster than the variant above. However,
1006 // this variant may return a zero size for a block that is
1007 // under mutation and for which a consistent size cannot be
1008 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1009 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1010                                                      const CMSCollector* c)
1011 const {
1012   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1013   // This must be volatile, or else there is a danger that the compiler
1014   // will compile the code below into a sometimes-infinite loop, by keeping
1015   // the value read the first time in a register.
1016   DEBUG_ONLY(uint loops = 0;)
1017   while (true) {
1018     // We must do this until we get a consistent view of the object.
1019     if (FreeChunk::indicatesFreeChunk(p)) {
1020       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1021       size_t res = fc->size();
1022 
1023       // Bugfix for systems with weak memory model (PPC64/IA64). The
1024       // free bit of the block was set and we have read the size of
1025       // the block. Acquire and check the free bit again. If the
1026       // block is still free, the read size is correct.
1027       OrderAccess::acquire();
1028 
1029       if (FreeChunk::indicatesFreeChunk(p)) {
1030         assert(res != 0, "Block size should not be 0");
1031         assert(loops == 0, "Should be 0");
1032         return res;
1033       }
1034     } else {
1035       // must read from what 'p' points to in each loop.
1036       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1037       // We trust the size of any object that has a non-NULL
1038       // klass and (for those in the perm gen) is parsable
1039       // -- irrespective of its conc_safe-ty.
1040       if (k != NULL) {
1041         assert(k->is_klass(), "Should really be klass oop.");
1042         oop o = (oop)p;
1043         assert(o->is_oop(), "Should be an oop");
1044 
1045         // Bugfix for systems with weak memory model (PPC64/IA64).
1046         // The object o may be an array. Acquire to make sure that the array
1047         // size (third word) is consistent.
1048         OrderAccess::acquire();
1049 
1050         size_t res = o->size_given_klass(k);
1051         res = adjustObjectSize(res);
1052         assert(res != 0, "Block size should not be 0");
1053         return res;
1054       } else {
1055         // May return 0 if P-bits not present.
1056         return c->block_size_if_printezis_bits(p);
1057       }
1058     }
1059     assert(loops == 0, "Can loop at most once");
1060     DEBUG_ONLY(loops++;)
1061   }
1062 }
1063 
1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1065   NOT_PRODUCT(verify_objects_initialized());
1066   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1067   FreeChunk* fc = (FreeChunk*)p;
1068   if (fc->is_free()) {
1069     return fc->size();
1070   } else {
1071     // Ignore mark word because this may be a recently promoted
1072     // object whose mark word is used to chain together grey
1073     // objects (the last one would have a null value).
1074     assert(oop(p)->is_oop(true), "Should be an oop");
1075     return adjustObjectSize(oop(p)->size());
1076   }
1077 }
1078 
1079 // This implementation assumes that the property of "being an object" is
1080 // stable.  But being a free chunk may not be (because of parallel
1081 // promotion.)
1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1083   FreeChunk* fc = (FreeChunk*)p;
1084   assert(is_in_reserved(p), "Should be in space");
1085   if (FreeChunk::indicatesFreeChunk(p)) return false;
1086   Klass* k = oop(p)->klass_or_null();
1087   if (k != NULL) {
1088     // Ignore mark word because it may have been used to
1089     // chain together promoted objects (the last one
1090     // would have a null value).
1091     assert(oop(p)->is_oop(true), "Should be an oop");
1092     return true;
1093   } else {
1094     return false;  // Was not an object at the start of collection.
1095   }
1096 }
1097 
1098 // Check if the object is alive. This fact is checked either by consulting
1099 // the main marking bitmap in the sweeping phase or, if it's a permanent
1100 // generation and we're not in the sweeping phase, by checking the
1101 // perm_gen_verify_bit_map where we store the "deadness" information if
1102 // we did not sweep the perm gen in the most recent previous GC cycle.
1103 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1104   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1105          "Else races are possible");
1106   assert(block_is_obj(p), "The address should point to an object");
1107 
1108   // If we're sweeping, we use object liveness information from the main bit map
1109   // for both perm gen and old gen.
1110   // We don't need to lock the bitmap (live_map or dead_map below), because
1111   // EITHER we are in the middle of the sweeping phase, and the
1112   // main marking bit map (live_map below) is locked,
1113   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1114   // is stable, because it's mutated only in the sweeping phase.
1115   // NOTE: This method is also used by jmap where, if class unloading is
1116   // off, the results can return "false" for legitimate perm objects,
1117   // when we are not in the midst of a sweeping phase, which can result
1118   // in jmap not reporting certain perm gen objects. This will be moot
1119   // if/when the perm gen goes away in the future.
1120   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1121     CMSBitMap* live_map = _collector->markBitMap();
1122     return live_map->par_isMarked((HeapWord*) p);
1123   }
1124   return true;
1125 }
1126 
1127 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1128   FreeChunk* fc = (FreeChunk*)p;
1129   assert(is_in_reserved(p), "Should be in space");
1130   assert(_bt.block_start(p) == p, "Should be a block boundary");
1131   if (!fc->is_free()) {
1132     // Ignore mark word because it may have been used to
1133     // chain together promoted objects (the last one
1134     // would have a null value).
1135     assert(oop(p)->is_oop(true), "Should be an oop");
1136     return true;
1137   }
1138   return false;
1139 }
1140 
1141 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1142 // approximate answer if you don't hold the freelistlock when you call this.
1143 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1144   size_t size = 0;
1145   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1146     debug_only(
1147       // We may be calling here without the lock in which case we
1148       // won't do this modest sanity check.
1149       if (freelistLock()->owned_by_self()) {
1150         size_t total_list_size = 0;
1151         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1152           fc = fc->next()) {
1153           total_list_size += i;
1154         }
1155         assert(total_list_size == i * _indexedFreeList[i].count(),
1156                "Count in list is incorrect");
1157       }
1158     )
1159     size += i * _indexedFreeList[i].count();
1160   }
1161   return size;
1162 }
1163 
1164 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1165   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1166   return allocate(size);
1167 }
1168 
1169 HeapWord*
1170 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1171   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1172 }
1173 
1174 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1175   assert_lock_strong(freelistLock());
1176   HeapWord* res = NULL;
1177   assert(size == adjustObjectSize(size),
1178          "use adjustObjectSize() before calling into allocate()");
1179 
1180   if (_adaptive_freelists) {
1181     res = allocate_adaptive_freelists(size);
1182   } else {  // non-adaptive free lists
1183     res = allocate_non_adaptive_freelists(size);
1184   }
1185 
1186   if (res != NULL) {
1187     // check that res does lie in this space!
1188     assert(is_in_reserved(res), "Not in this space!");
1189     assert(is_aligned((void*)res), "alignment check");
1190 
1191     FreeChunk* fc = (FreeChunk*)res;
1192     fc->markNotFree();
1193     assert(!fc->is_free(), "shouldn't be marked free");
1194     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1195     // Verify that the block offset table shows this to
1196     // be a single block, but not one which is unallocated.
1197     _bt.verify_single_block(res, size);
1198     _bt.verify_not_unallocated(res, size);
1199     // mangle a just allocated object with a distinct pattern.
1200     debug_only(fc->mangleAllocated(size));
1201   }
1202 
1203   return res;
1204 }
1205 
1206 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1207   HeapWord* res = NULL;
1208   // try and use linear allocation for smaller blocks
1209   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1210     // if successful, the following also adjusts block offset table
1211     res = getChunkFromSmallLinearAllocBlock(size);
1212   }
1213   // Else triage to indexed lists for smaller sizes
1214   if (res == NULL) {
1215     if (size < SmallForDictionary) {
1216       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1217     } else {
1218       // else get it from the big dictionary; if even this doesn't
1219       // work we are out of luck.
1220       res = (HeapWord*)getChunkFromDictionaryExact(size);
1221     }
1222   }
1223 
1224   return res;
1225 }
1226 
1227 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1228   assert_lock_strong(freelistLock());
1229   HeapWord* res = NULL;
1230   assert(size == adjustObjectSize(size),
1231          "use adjustObjectSize() before calling into allocate()");
1232 
1233   // Strategy
1234   //   if small
1235   //     exact size from small object indexed list if small
1236   //     small or large linear allocation block (linAB) as appropriate
1237   //     take from lists of greater sized chunks
1238   //   else
1239   //     dictionary
1240   //     small or large linear allocation block if it has the space
1241   // Try allocating exact size from indexTable first
1242   if (size < IndexSetSize) {
1243     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1244     if(res != NULL) {
1245       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1246         "Not removed from free list");
1247       // no block offset table adjustment is necessary on blocks in
1248       // the indexed lists.
1249 
1250     // Try allocating from the small LinAB
1251     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1252         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1253         // if successful, the above also adjusts block offset table
1254         // Note that this call will refill the LinAB to
1255         // satisfy the request.  This is different that
1256         // evm.
1257         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1258     } else {
1259       // Raid the exact free lists larger than size, even if they are not
1260       // overpopulated.
1261       res = (HeapWord*) getChunkFromGreater(size);
1262     }
1263   } else {
1264     // Big objects get allocated directly from the dictionary.
1265     res = (HeapWord*) getChunkFromDictionaryExact(size);
1266     if (res == NULL) {
1267       // Try hard not to fail since an allocation failure will likely
1268       // trigger a synchronous GC.  Try to get the space from the
1269       // allocation blocks.
1270       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1271     }
1272   }
1273 
1274   return res;
1275 }
1276 
1277 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1278 // when promoting obj.
1279 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1280   // Depending on the object size, expansion may require refilling either a
1281   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1282   // is added because the dictionary may over-allocate to avoid fragmentation.
1283   size_t space = obj_size;
1284   if (!_adaptive_freelists) {
1285     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1286   }
1287   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1288   return space;
1289 }
1290 
1291 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1292   FreeChunk* ret;
1293 
1294   assert(numWords >= MinChunkSize, "Size is less than minimum");
1295   assert(linearAllocationWouldFail() || bestFitFirst(),
1296     "Should not be here");
1297 
1298   size_t i;
1299   size_t currSize = numWords + MinChunkSize;
1300   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1301   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1302     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1303     if (fl->head()) {
1304       ret = getFromListGreater(fl, numWords);
1305       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1306       return ret;
1307     }
1308   }
1309 
1310   currSize = MAX2((size_t)SmallForDictionary,
1311                   (size_t)(numWords + MinChunkSize));
1312 
1313   /* Try to get a chunk that satisfies request, while avoiding
1314      fragmentation that can't be handled. */
1315   {
1316     ret =  dictionary()->get_chunk(currSize);
1317     if (ret != NULL) {
1318       assert(ret->size() - numWords >= MinChunkSize,
1319              "Chunk is too small");
1320       _bt.allocated((HeapWord*)ret, ret->size());
1321       /* Carve returned chunk. */
1322       (void) splitChunkAndReturnRemainder(ret, numWords);
1323       /* Label this as no longer a free chunk. */
1324       assert(ret->is_free(), "This chunk should be free");
1325       ret->link_prev(NULL);
1326     }
1327     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1328     return ret;
1329   }
1330   ShouldNotReachHere();
1331 }
1332 
1333 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1334   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1335   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1336 }
1337 
1338 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1339   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1340          (_smallLinearAllocBlock._word_size == fc->size()),
1341          "Linear allocation block shows incorrect size");
1342   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1343           (_smallLinearAllocBlock._word_size == fc->size()));
1344 }
1345 
1346 // Check if the purported free chunk is present either as a linear
1347 // allocation block, the size-indexed table of (smaller) free blocks,
1348 // or the larger free blocks kept in the binary tree dictionary.
1349 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1350   if (verify_chunk_is_linear_alloc_block(fc)) {
1351     return true;
1352   } else if (fc->size() < IndexSetSize) {
1353     return verifyChunkInIndexedFreeLists(fc);
1354   } else {
1355     return dictionary()->verify_chunk_in_free_list(fc);
1356   }
1357 }
1358 
1359 #ifndef PRODUCT
1360 void CompactibleFreeListSpace::assert_locked() const {
1361   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1362 }
1363 
1364 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1365   CMSLockVerifier::assert_locked(lock);
1366 }
1367 #endif
1368 
1369 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1370   // In the parallel case, the main thread holds the free list lock
1371   // on behalf the parallel threads.
1372   FreeChunk* fc;
1373   {
1374     // If GC is parallel, this might be called by several threads.
1375     // This should be rare enough that the locking overhead won't affect
1376     // the sequential code.
1377     MutexLockerEx x(parDictionaryAllocLock(),
1378                     Mutex::_no_safepoint_check_flag);
1379     fc = getChunkFromDictionary(size);
1380   }
1381   if (fc != NULL) {
1382     fc->dontCoalesce();
1383     assert(fc->is_free(), "Should be free, but not coalescable");
1384     // Verify that the block offset table shows this to
1385     // be a single block, but not one which is unallocated.
1386     _bt.verify_single_block((HeapWord*)fc, fc->size());
1387     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1388   }
1389   return fc;
1390 }
1391 
1392 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1393   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1394   assert_locked();
1395 
1396   // if we are tracking promotions, then first ensure space for
1397   // promotion (including spooling space for saving header if necessary).
1398   // then allocate and copy, then track promoted info if needed.
1399   // When tracking (see PromotionInfo::track()), the mark word may
1400   // be displaced and in this case restoration of the mark word
1401   // occurs in the (oop_since_save_marks_)iterate phase.
1402   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1403     return NULL;
1404   }
1405   // Call the allocate(size_t, bool) form directly to avoid the
1406   // additional call through the allocate(size_t) form.  Having
1407   // the compile inline the call is problematic because allocate(size_t)
1408   // is a virtual method.
1409   HeapWord* res = allocate(adjustObjectSize(obj_size));
1410   if (res != NULL) {
1411     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1412     // if we should be tracking promotions, do so.
1413     if (_promoInfo.tracking()) {
1414         _promoInfo.track((PromotedObject*)res);
1415     }
1416   }
1417   return oop(res);
1418 }
1419 
1420 HeapWord*
1421 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1422   assert_locked();
1423   assert(size >= MinChunkSize, "minimum chunk size");
1424   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1425     "maximum from smallLinearAllocBlock");
1426   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1427 }
1428 
1429 HeapWord*
1430 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1431                                                        size_t size) {
1432   assert_locked();
1433   assert(size >= MinChunkSize, "too small");
1434   HeapWord* res = NULL;
1435   // Try to do linear allocation from blk, making sure that
1436   if (blk->_word_size == 0) {
1437     // We have probably been unable to fill this either in the prologue or
1438     // when it was exhausted at the last linear allocation. Bail out until
1439     // next time.
1440     assert(blk->_ptr == NULL, "consistency check");
1441     return NULL;
1442   }
1443   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1444   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1445   if (res != NULL) return res;
1446 
1447   // about to exhaust this linear allocation block
1448   if (blk->_word_size == size) { // exactly satisfied
1449     res = blk->_ptr;
1450     _bt.allocated(res, blk->_word_size);
1451   } else if (size + MinChunkSize <= blk->_refillSize) {
1452     size_t sz = blk->_word_size;
1453     // Update _unallocated_block if the size is such that chunk would be
1454     // returned to the indexed free list.  All other chunks in the indexed
1455     // free lists are allocated from the dictionary so that _unallocated_block
1456     // has already been adjusted for them.  Do it here so that the cost
1457     // for all chunks added back to the indexed free lists.
1458     if (sz < SmallForDictionary) {
1459       _bt.allocated(blk->_ptr, sz);
1460     }
1461     // Return the chunk that isn't big enough, and then refill below.
1462     addChunkToFreeLists(blk->_ptr, sz);
1463     split_birth(sz);
1464     // Don't keep statistics on adding back chunk from a LinAB.
1465   } else {
1466     // A refilled block would not satisfy the request.
1467     return NULL;
1468   }
1469 
1470   blk->_ptr = NULL; blk->_word_size = 0;
1471   refillLinearAllocBlock(blk);
1472   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1473          "block was replenished");
1474   if (res != NULL) {
1475     split_birth(size);
1476     repairLinearAllocBlock(blk);
1477   } else if (blk->_ptr != NULL) {
1478     res = blk->_ptr;
1479     size_t blk_size = blk->_word_size;
1480     blk->_word_size -= size;
1481     blk->_ptr  += size;
1482     split_birth(size);
1483     repairLinearAllocBlock(blk);
1484     // Update BOT last so that other (parallel) GC threads see a consistent
1485     // view of the BOT and free blocks.
1486     // Above must occur before BOT is updated below.
1487     OrderAccess::storestore();
1488     _bt.split_block(res, blk_size, size);  // adjust block offset table
1489   }
1490   return res;
1491 }
1492 
1493 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1494                                         LinearAllocBlock* blk,
1495                                         size_t size) {
1496   assert_locked();
1497   assert(size >= MinChunkSize, "too small");
1498 
1499   HeapWord* res = NULL;
1500   // This is the common case.  Keep it simple.
1501   if (blk->_word_size >= size + MinChunkSize) {
1502     assert(blk->_ptr != NULL, "consistency check");
1503     res = blk->_ptr;
1504     // Note that the BOT is up-to-date for the linAB before allocation.  It
1505     // indicates the start of the linAB.  The split_block() updates the
1506     // BOT for the linAB after the allocation (indicates the start of the
1507     // next chunk to be allocated).
1508     size_t blk_size = blk->_word_size;
1509     blk->_word_size -= size;
1510     blk->_ptr  += size;
1511     split_birth(size);
1512     repairLinearAllocBlock(blk);
1513     // Update BOT last so that other (parallel) GC threads see a consistent
1514     // view of the BOT and free blocks.
1515     // Above must occur before BOT is updated below.
1516     OrderAccess::storestore();
1517     _bt.split_block(res, blk_size, size);  // adjust block offset table
1518     _bt.allocated(res, size);
1519   }
1520   return res;
1521 }
1522 
1523 FreeChunk*
1524 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1525   assert_locked();
1526   assert(size < SmallForDictionary, "just checking");
1527   FreeChunk* res;
1528   res = _indexedFreeList[size].get_chunk_at_head();
1529   if (res == NULL) {
1530     res = getChunkFromIndexedFreeListHelper(size);
1531   }
1532   _bt.verify_not_unallocated((HeapWord*) res, size);
1533   assert(res == NULL || res->size() == size, "Incorrect block size");
1534   return res;
1535 }
1536 
1537 FreeChunk*
1538 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1539   bool replenish) {
1540   assert_locked();
1541   FreeChunk* fc = NULL;
1542   if (size < SmallForDictionary) {
1543     assert(_indexedFreeList[size].head() == NULL ||
1544       _indexedFreeList[size].surplus() <= 0,
1545       "List for this size should be empty or under populated");
1546     // Try best fit in exact lists before replenishing the list
1547     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1548       // Replenish list.
1549       //
1550       // Things tried that failed.
1551       //   Tried allocating out of the two LinAB's first before
1552       // replenishing lists.
1553       //   Tried small linAB of size 256 (size in indexed list)
1554       // and replenishing indexed lists from the small linAB.
1555       //
1556       FreeChunk* newFc = NULL;
1557       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1558       if (replenish_size < SmallForDictionary) {
1559         // Do not replenish from an underpopulated size.
1560         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1561             _indexedFreeList[replenish_size].head() != NULL) {
1562           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1563         } else if (bestFitFirst()) {
1564           newFc = bestFitSmall(replenish_size);
1565         }
1566       }
1567       if (newFc == NULL && replenish_size > size) {
1568         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1569         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1570       }
1571       // Note: The stats update re split-death of block obtained above
1572       // will be recorded below precisely when we know we are going to
1573       // be actually splitting it into more than one pieces below.
1574       if (newFc != NULL) {
1575         if  (replenish || CMSReplenishIntermediate) {
1576           // Replenish this list and return one block to caller.
1577           size_t i;
1578           FreeChunk *curFc, *nextFc;
1579           size_t num_blk = newFc->size() / size;
1580           assert(num_blk >= 1, "Smaller than requested?");
1581           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1582           if (num_blk > 1) {
1583             // we are sure we will be splitting the block just obtained
1584             // into multiple pieces; record the split-death of the original
1585             splitDeath(replenish_size);
1586           }
1587           // carve up and link blocks 0, ..., num_blk - 2
1588           // The last chunk is not added to the lists but is returned as the
1589           // free chunk.
1590           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1591                i = 0;
1592                i < (num_blk - 1);
1593                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1594                i++) {
1595             curFc->set_size(size);
1596             // Don't record this as a return in order to try and
1597             // determine the "returns" from a GC.
1598             _bt.verify_not_unallocated((HeapWord*) fc, size);
1599             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1600             _bt.mark_block((HeapWord*)curFc, size);
1601             split_birth(size);
1602             // Don't record the initial population of the indexed list
1603             // as a split birth.
1604           }
1605 
1606           // check that the arithmetic was OK above
1607           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1608             "inconsistency in carving newFc");
1609           curFc->set_size(size);
1610           _bt.mark_block((HeapWord*)curFc, size);
1611           split_birth(size);
1612           fc = curFc;
1613         } else {
1614           // Return entire block to caller
1615           fc = newFc;
1616         }
1617       }
1618     }
1619   } else {
1620     // Get a free chunk from the free chunk dictionary to be returned to
1621     // replenish the indexed free list.
1622     fc = getChunkFromDictionaryExact(size);
1623   }
1624   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1625   return fc;
1626 }
1627 
1628 FreeChunk*
1629 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1630   assert_locked();
1631   FreeChunk* fc = _dictionary->get_chunk(size,
1632                                          FreeBlockDictionary<FreeChunk>::atLeast);
1633   if (fc == NULL) {
1634     return NULL;
1635   }
1636   _bt.allocated((HeapWord*)fc, fc->size());
1637   if (fc->size() >= size + MinChunkSize) {
1638     fc = splitChunkAndReturnRemainder(fc, size);
1639   }
1640   assert(fc->size() >= size, "chunk too small");
1641   assert(fc->size() < size + MinChunkSize, "chunk too big");
1642   _bt.verify_single_block((HeapWord*)fc, fc->size());
1643   return fc;
1644 }
1645 
1646 FreeChunk*
1647 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1648   assert_locked();
1649   FreeChunk* fc = _dictionary->get_chunk(size,
1650                                          FreeBlockDictionary<FreeChunk>::atLeast);
1651   if (fc == NULL) {
1652     return fc;
1653   }
1654   _bt.allocated((HeapWord*)fc, fc->size());
1655   if (fc->size() == size) {
1656     _bt.verify_single_block((HeapWord*)fc, size);
1657     return fc;
1658   }
1659   assert(fc->size() > size, "get_chunk() guarantee");
1660   if (fc->size() < size + MinChunkSize) {
1661     // Return the chunk to the dictionary and go get a bigger one.
1662     returnChunkToDictionary(fc);
1663     fc = _dictionary->get_chunk(size + MinChunkSize,
1664                                 FreeBlockDictionary<FreeChunk>::atLeast);
1665     if (fc == NULL) {
1666       return NULL;
1667     }
1668     _bt.allocated((HeapWord*)fc, fc->size());
1669   }
1670   assert(fc->size() >= size + MinChunkSize, "tautology");
1671   fc = splitChunkAndReturnRemainder(fc, size);
1672   assert(fc->size() == size, "chunk is wrong size");
1673   _bt.verify_single_block((HeapWord*)fc, size);
1674   return fc;
1675 }
1676 
1677 void
1678 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1679   assert_locked();
1680 
1681   size_t size = chunk->size();
1682   _bt.verify_single_block((HeapWord*)chunk, size);
1683   // adjust _unallocated_block downward, as necessary
1684   _bt.freed((HeapWord*)chunk, size);
1685   _dictionary->return_chunk(chunk);
1686 #ifndef PRODUCT
1687   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1688     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1689     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1690     tl->verify_stats();
1691   }
1692 #endif // PRODUCT
1693 }
1694 
1695 void
1696 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1697   assert_locked();
1698   size_t size = fc->size();
1699   _bt.verify_single_block((HeapWord*) fc, size);
1700   _bt.verify_not_unallocated((HeapWord*) fc, size);
1701   if (_adaptive_freelists) {
1702     _indexedFreeList[size].return_chunk_at_tail(fc);
1703   } else {
1704     _indexedFreeList[size].return_chunk_at_head(fc);
1705   }
1706 #ifndef PRODUCT
1707   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1708      _indexedFreeList[size].verify_stats();
1709   }
1710 #endif // PRODUCT
1711 }
1712 
1713 // Add chunk to end of last block -- if it's the largest
1714 // block -- and update BOT and census data. We would
1715 // of course have preferred to coalesce it with the
1716 // last block, but it's currently less expensive to find the
1717 // largest block than it is to find the last.
1718 void
1719 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1720   HeapWord* chunk, size_t     size) {
1721   // check that the chunk does lie in this space!
1722   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1723   // One of the parallel gc task threads may be here
1724   // whilst others are allocating.
1725   Mutex* lock = &_parDictionaryAllocLock;
1726   FreeChunk* ec;
1727   {
1728     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1729     ec = dictionary()->find_largest_dict();  // get largest block
1730     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1731       // It's a coterminal block - we can coalesce.
1732       size_t old_size = ec->size();
1733       coalDeath(old_size);
1734       removeChunkFromDictionary(ec);
1735       size += old_size;
1736     } else {
1737       ec = (FreeChunk*)chunk;
1738     }
1739   }
1740   ec->set_size(size);
1741   debug_only(ec->mangleFreed(size));
1742   if (size < SmallForDictionary) {
1743     lock = _indexedFreeListParLocks[size];
1744   }
1745   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1746   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1747   // record the birth under the lock since the recording involves
1748   // manipulation of the list on which the chunk lives and
1749   // if the chunk is allocated and is the last on the list,
1750   // the list can go away.
1751   coalBirth(size);
1752 }
1753 
1754 void
1755 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1756                                               size_t     size) {
1757   // check that the chunk does lie in this space!
1758   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1759   assert_locked();
1760   _bt.verify_single_block(chunk, size);
1761 
1762   FreeChunk* fc = (FreeChunk*) chunk;
1763   fc->set_size(size);
1764   debug_only(fc->mangleFreed(size));
1765   if (size < SmallForDictionary) {
1766     returnChunkToFreeList(fc);
1767   } else {
1768     returnChunkToDictionary(fc);
1769   }
1770 }
1771 
1772 void
1773 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1774   size_t size, bool coalesced) {
1775   assert_locked();
1776   assert(chunk != NULL, "null chunk");
1777   if (coalesced) {
1778     // repair BOT
1779     _bt.single_block(chunk, size);
1780   }
1781   addChunkToFreeLists(chunk, size);
1782 }
1783 
1784 // We _must_ find the purported chunk on our free lists;
1785 // we assert if we don't.
1786 void
1787 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1788   size_t size = fc->size();
1789   assert_locked();
1790   debug_only(verifyFreeLists());
1791   if (size < SmallForDictionary) {
1792     removeChunkFromIndexedFreeList(fc);
1793   } else {
1794     removeChunkFromDictionary(fc);
1795   }
1796   _bt.verify_single_block((HeapWord*)fc, size);
1797   debug_only(verifyFreeLists());
1798 }
1799 
1800 void
1801 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1802   size_t size = fc->size();
1803   assert_locked();
1804   assert(fc != NULL, "null chunk");
1805   _bt.verify_single_block((HeapWord*)fc, size);
1806   _dictionary->remove_chunk(fc);
1807   // adjust _unallocated_block upward, as necessary
1808   _bt.allocated((HeapWord*)fc, size);
1809 }
1810 
1811 void
1812 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1813   assert_locked();
1814   size_t size = fc->size();
1815   _bt.verify_single_block((HeapWord*)fc, size);
1816   NOT_PRODUCT(
1817     if (FLSVerifyIndexTable) {
1818       verifyIndexedFreeList(size);
1819     }
1820   )
1821   _indexedFreeList[size].remove_chunk(fc);
1822   NOT_PRODUCT(
1823     if (FLSVerifyIndexTable) {
1824       verifyIndexedFreeList(size);
1825     }
1826   )
1827 }
1828 
1829 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1830   /* A hint is the next larger size that has a surplus.
1831      Start search at a size large enough to guarantee that
1832      the excess is >= MIN_CHUNK. */
1833   size_t start = align_object_size(numWords + MinChunkSize);
1834   if (start < IndexSetSize) {
1835     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1836     size_t    hint = _indexedFreeList[start].hint();
1837     while (hint < IndexSetSize) {
1838       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1839       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1840       if (fl->surplus() > 0 && fl->head() != NULL) {
1841         // Found a list with surplus, reset original hint
1842         // and split out a free chunk which is returned.
1843         _indexedFreeList[start].set_hint(hint);
1844         FreeChunk* res = getFromListGreater(fl, numWords);
1845         assert(res == NULL || res->is_free(),
1846           "Should be returning a free chunk");
1847         return res;
1848       }
1849       hint = fl->hint(); /* keep looking */
1850     }
1851     /* None found. */
1852     it[start].set_hint(IndexSetSize);
1853   }
1854   return NULL;
1855 }
1856 
1857 /* Requires fl->size >= numWords + MinChunkSize */
1858 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1859   size_t numWords) {
1860   FreeChunk *curr = fl->head();
1861   size_t oldNumWords = curr->size();
1862   assert(numWords >= MinChunkSize, "Word size is too small");
1863   assert(curr != NULL, "List is empty");
1864   assert(oldNumWords >= numWords + MinChunkSize,
1865         "Size of chunks in the list is too small");
1866 
1867   fl->remove_chunk(curr);
1868   // recorded indirectly by splitChunkAndReturnRemainder -
1869   // smallSplit(oldNumWords, numWords);
1870   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1871   // Does anything have to be done for the remainder in terms of
1872   // fixing the card table?
1873   assert(new_chunk == NULL || new_chunk->is_free(),
1874     "Should be returning a free chunk");
1875   return new_chunk;
1876 }
1877 
1878 FreeChunk*
1879 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1880   size_t new_size) {
1881   assert_locked();
1882   size_t size = chunk->size();
1883   assert(size > new_size, "Split from a smaller block?");
1884   assert(is_aligned(chunk), "alignment problem");
1885   assert(size == adjustObjectSize(size), "alignment problem");
1886   size_t rem_sz = size - new_size;
1887   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1888   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1889   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1890   assert(is_aligned(ffc), "alignment problem");
1891   ffc->set_size(rem_sz);
1892   ffc->link_next(NULL);
1893   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1894   // Above must occur before BOT is updated below.
1895   // adjust block offset table
1896   OrderAccess::storestore();
1897   assert(chunk->is_free() && ffc->is_free(), "Error");
1898   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1899   if (rem_sz < SmallForDictionary) {
1900     bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
1901     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1902     assert(!is_par ||
1903            (GenCollectedHeap::heap()->n_par_threads() ==
1904             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
1905     returnChunkToFreeList(ffc);
1906     split(size, rem_sz);
1907     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1908   } else {
1909     returnChunkToDictionary(ffc);
1910     split(size, rem_sz);
1911   }
1912   chunk->set_size(new_size);
1913   return chunk;
1914 }
1915 
1916 void
1917 CompactibleFreeListSpace::sweep_completed() {
1918   // Now that space is probably plentiful, refill linear
1919   // allocation blocks as needed.
1920   refillLinearAllocBlocksIfNeeded();
1921 }
1922 
1923 void
1924 CompactibleFreeListSpace::gc_prologue() {
1925   assert_locked();
1926   if (PrintFLSStatistics != 0) {
1927     gclog_or_tty->print("Before GC:\n");
1928     reportFreeListStatistics();
1929   }
1930   refillLinearAllocBlocksIfNeeded();
1931 }
1932 
1933 void
1934 CompactibleFreeListSpace::gc_epilogue() {
1935   assert_locked();
1936   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1937     if (_smallLinearAllocBlock._word_size == 0)
1938       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1939   }
1940   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1941   _promoInfo.stopTrackingPromotions();
1942   repairLinearAllocationBlocks();
1943   // Print Space's stats
1944   if (PrintFLSStatistics != 0) {
1945     gclog_or_tty->print("After GC:\n");
1946     reportFreeListStatistics();
1947   }
1948 }
1949 
1950 // Iteration support, mostly delegated from a CMS generation
1951 
1952 void CompactibleFreeListSpace::save_marks() {
1953   assert(Thread::current()->is_VM_thread(),
1954          "Global variable should only be set when single-threaded");
1955   // Mark the "end" of the used space at the time of this call;
1956   // note, however, that promoted objects from this point
1957   // on are tracked in the _promoInfo below.
1958   set_saved_mark_word(unallocated_block());
1959 #ifdef ASSERT
1960   // Check the sanity of save_marks() etc.
1961   MemRegion ur    = used_region();
1962   MemRegion urasm = used_region_at_save_marks();
1963   assert(ur.contains(urasm),
1964          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1965                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1966                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1967 #endif
1968   // inform allocator that promotions should be tracked.
1969   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1970   _promoInfo.startTrackingPromotions();
1971 }
1972 
1973 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1974   assert(_promoInfo.tracking(), "No preceding save_marks?");
1975   assert(GenCollectedHeap::heap()->n_par_threads() == 0,
1976          "Shouldn't be called if using parallel gc.");
1977   return _promoInfo.noPromotions();
1978 }
1979 
1980 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1981                                                                             \
1982 void CompactibleFreeListSpace::                                             \
1983 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1984   assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
1985          "Shouldn't be called (yet) during parallel part of gc.");          \
1986   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1987   /*                                                                        \
1988    * This also restores any displaced headers and removes the elements from \
1989    * the iteration set as they are processed, so that we have a clean slate \
1990    * at the end of the iteration. Note, thus, that if new objects are       \
1991    * promoted as a result of the iteration they are iterated over as well.  \
1992    */                                                                       \
1993   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1994 }
1995 
1996 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1997 
1998 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1999   return _smallLinearAllocBlock._word_size == 0;
2000 }
2001 
2002 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2003   // Fix up linear allocation blocks to look like free blocks
2004   repairLinearAllocBlock(&_smallLinearAllocBlock);
2005 }
2006 
2007 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2008   assert_locked();
2009   if (blk->_ptr != NULL) {
2010     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2011            "Minimum block size requirement");
2012     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2013     fc->set_size(blk->_word_size);
2014     fc->link_prev(NULL);   // mark as free
2015     fc->dontCoalesce();
2016     assert(fc->is_free(), "just marked it free");
2017     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2018   }
2019 }
2020 
2021 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2022   assert_locked();
2023   if (_smallLinearAllocBlock._ptr == NULL) {
2024     assert(_smallLinearAllocBlock._word_size == 0,
2025       "Size of linAB should be zero if the ptr is NULL");
2026     // Reset the linAB refill and allocation size limit.
2027     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2028   }
2029   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2030 }
2031 
2032 void
2033 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2034   assert_locked();
2035   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2036          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2037          "blk invariant");
2038   if (blk->_ptr == NULL) {
2039     refillLinearAllocBlock(blk);
2040   }
2041   if (PrintMiscellaneous && Verbose) {
2042     if (blk->_word_size == 0) {
2043       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2044     }
2045   }
2046 }
2047 
2048 void
2049 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2050   assert_locked();
2051   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2052          "linear allocation block should be empty");
2053   FreeChunk* fc;
2054   if (blk->_refillSize < SmallForDictionary &&
2055       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2056     // A linAB's strategy might be to use small sizes to reduce
2057     // fragmentation but still get the benefits of allocation from a
2058     // linAB.
2059   } else {
2060     fc = getChunkFromDictionary(blk->_refillSize);
2061   }
2062   if (fc != NULL) {
2063     blk->_ptr  = (HeapWord*)fc;
2064     blk->_word_size = fc->size();
2065     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2066   }
2067 }
2068 
2069 // Support for concurrent collection policy decisions.
2070 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2071   // In the future we might want to add in fragmentation stats --
2072   // including erosion of the "mountain" into this decision as well.
2073   return !adaptive_freelists() && linearAllocationWouldFail();
2074 }
2075 
2076 // Support for compaction
2077 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2078   scan_and_forward(this, cp);
2079   // Prepare_for_compaction() uses the space between live objects
2080   // so that later phase can skip dead space quickly.  So verification
2081   // of the free lists doesn't work after.
2082 }
2083 
2084 void CompactibleFreeListSpace::adjust_pointers() {
2085   // In other versions of adjust_pointers(), a bail out
2086   // based on the amount of live data in the generation
2087   // (i.e., if 0, bail out) may be used.
2088   // Cannot test used() == 0 here because the free lists have already
2089   // been mangled by the compaction.
2090 
2091   scan_and_adjust_pointers(this);
2092   // See note about verification in prepare_for_compaction().
2093 }
2094 
2095 void CompactibleFreeListSpace::compact() {
2096   scan_and_compact(this);
2097 }
2098 
2099 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2100 // where fbs is free block sizes
2101 double CompactibleFreeListSpace::flsFrag() const {
2102   size_t itabFree = totalSizeInIndexedFreeLists();
2103   double frag = 0.0;
2104   size_t i;
2105 
2106   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2107     double sz  = i;
2108     frag      += _indexedFreeList[i].count() * (sz * sz);
2109   }
2110 
2111   double totFree = itabFree +
2112                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2113   if (totFree > 0) {
2114     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2115             (totFree * totFree));
2116     frag = (double)1.0  - frag;
2117   } else {
2118     assert(frag == 0.0, "Follows from totFree == 0");
2119   }
2120   return frag;
2121 }
2122 
2123 void CompactibleFreeListSpace::beginSweepFLCensus(
2124   float inter_sweep_current,
2125   float inter_sweep_estimate,
2126   float intra_sweep_estimate) {
2127   assert_locked();
2128   size_t i;
2129   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2130     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2131     if (PrintFLSStatistics > 1) {
2132       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2133     }
2134     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2135     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2136     fl->set_before_sweep(fl->count());
2137     fl->set_bfr_surp(fl->surplus());
2138   }
2139   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2140                                     inter_sweep_current,
2141                                     inter_sweep_estimate,
2142                                     intra_sweep_estimate);
2143 }
2144 
2145 void CompactibleFreeListSpace::setFLSurplus() {
2146   assert_locked();
2147   size_t i;
2148   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2149     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2150     fl->set_surplus(fl->count() -
2151                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2152   }
2153 }
2154 
2155 void CompactibleFreeListSpace::setFLHints() {
2156   assert_locked();
2157   size_t i;
2158   size_t h = IndexSetSize;
2159   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2160     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2161     fl->set_hint(h);
2162     if (fl->surplus() > 0) {
2163       h = i;
2164     }
2165   }
2166 }
2167 
2168 void CompactibleFreeListSpace::clearFLCensus() {
2169   assert_locked();
2170   size_t i;
2171   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2172     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2173     fl->set_prev_sweep(fl->count());
2174     fl->set_coal_births(0);
2175     fl->set_coal_deaths(0);
2176     fl->set_split_births(0);
2177     fl->set_split_deaths(0);
2178   }
2179 }
2180 
2181 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2182   if (PrintFLSStatistics > 0) {
2183     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2184     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2185                            p2i(largestAddr));
2186   }
2187   setFLSurplus();
2188   setFLHints();
2189   if (PrintGC && PrintFLSCensus > 0) {
2190     printFLCensus(sweep_count);
2191   }
2192   clearFLCensus();
2193   assert_locked();
2194   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2195 }
2196 
2197 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2198   if (size < SmallForDictionary) {
2199     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2200     return (fl->coal_desired() < 0) ||
2201            ((int)fl->count() > fl->coal_desired());
2202   } else {
2203     return dictionary()->coal_dict_over_populated(size);
2204   }
2205 }
2206 
2207 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2208   assert(size < SmallForDictionary, "Size too large for indexed list");
2209   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2210   fl->increment_coal_births();
2211   fl->increment_surplus();
2212 }
2213 
2214 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2215   assert(size < SmallForDictionary, "Size too large for indexed list");
2216   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2217   fl->increment_coal_deaths();
2218   fl->decrement_surplus();
2219 }
2220 
2221 void CompactibleFreeListSpace::coalBirth(size_t size) {
2222   if (size  < SmallForDictionary) {
2223     smallCoalBirth(size);
2224   } else {
2225     dictionary()->dict_census_update(size,
2226                                    false /* split */,
2227                                    true /* birth */);
2228   }
2229 }
2230 
2231 void CompactibleFreeListSpace::coalDeath(size_t size) {
2232   if(size  < SmallForDictionary) {
2233     smallCoalDeath(size);
2234   } else {
2235     dictionary()->dict_census_update(size,
2236                                    false /* split */,
2237                                    false /* birth */);
2238   }
2239 }
2240 
2241 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2242   assert(size < SmallForDictionary, "Size too large for indexed list");
2243   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2244   fl->increment_split_births();
2245   fl->increment_surplus();
2246 }
2247 
2248 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2249   assert(size < SmallForDictionary, "Size too large for indexed list");
2250   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2251   fl->increment_split_deaths();
2252   fl->decrement_surplus();
2253 }
2254 
2255 void CompactibleFreeListSpace::split_birth(size_t size) {
2256   if (size  < SmallForDictionary) {
2257     smallSplitBirth(size);
2258   } else {
2259     dictionary()->dict_census_update(size,
2260                                    true /* split */,
2261                                    true /* birth */);
2262   }
2263 }
2264 
2265 void CompactibleFreeListSpace::splitDeath(size_t size) {
2266   if (size  < SmallForDictionary) {
2267     smallSplitDeath(size);
2268   } else {
2269     dictionary()->dict_census_update(size,
2270                                    true /* split */,
2271                                    false /* birth */);
2272   }
2273 }
2274 
2275 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2276   size_t to2 = from - to1;
2277   splitDeath(from);
2278   split_birth(to1);
2279   split_birth(to2);
2280 }
2281 
2282 void CompactibleFreeListSpace::print() const {
2283   print_on(tty);
2284 }
2285 
2286 void CompactibleFreeListSpace::prepare_for_verify() {
2287   assert_locked();
2288   repairLinearAllocationBlocks();
2289   // Verify that the SpoolBlocks look like free blocks of
2290   // appropriate sizes... To be done ...
2291 }
2292 
2293 class VerifyAllBlksClosure: public BlkClosure {
2294  private:
2295   const CompactibleFreeListSpace* _sp;
2296   const MemRegion                 _span;
2297   HeapWord*                       _last_addr;
2298   size_t                          _last_size;
2299   bool                            _last_was_obj;
2300   bool                            _last_was_live;
2301 
2302  public:
2303   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2304     MemRegion span) :  _sp(sp), _span(span),
2305                        _last_addr(NULL), _last_size(0),
2306                        _last_was_obj(false), _last_was_live(false) { }
2307 
2308   virtual size_t do_blk(HeapWord* addr) {
2309     size_t res;
2310     bool   was_obj  = false;
2311     bool   was_live = false;
2312     if (_sp->block_is_obj(addr)) {
2313       was_obj = true;
2314       oop p = oop(addr);
2315       guarantee(p->is_oop(), "Should be an oop");
2316       res = _sp->adjustObjectSize(p->size());
2317       if (_sp->obj_is_alive(addr)) {
2318         was_live = true;
2319         p->verify();
2320       }
2321     } else {
2322       FreeChunk* fc = (FreeChunk*)addr;
2323       res = fc->size();
2324       if (FLSVerifyLists && !fc->cantCoalesce()) {
2325         guarantee(_sp->verify_chunk_in_free_list(fc),
2326                   "Chunk should be on a free list");
2327       }
2328     }
2329     if (res == 0) {
2330       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2331       gclog_or_tty->print_cr(
2332         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2333         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2334         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2335         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2336       _sp->print_on(gclog_or_tty);
2337       guarantee(false, "Seppuku!");
2338     }
2339     _last_addr = addr;
2340     _last_size = res;
2341     _last_was_obj  = was_obj;
2342     _last_was_live = was_live;
2343     return res;
2344   }
2345 };
2346 
2347 class VerifyAllOopsClosure: public OopClosure {
2348  private:
2349   const CMSCollector*             _collector;
2350   const CompactibleFreeListSpace* _sp;
2351   const MemRegion                 _span;
2352   const bool                      _past_remark;
2353   const CMSBitMap*                _bit_map;
2354 
2355  protected:
2356   void do_oop(void* p, oop obj) {
2357     if (_span.contains(obj)) { // the interior oop points into CMS heap
2358       if (!_span.contains(p)) { // reference from outside CMS heap
2359         // Should be a valid object; the first disjunct below allows
2360         // us to sidestep an assertion in block_is_obj() that insists
2361         // that p be in _sp. Note that several generations (and spaces)
2362         // are spanned by _span (CMS heap) above.
2363         guarantee(!_sp->is_in_reserved(obj) ||
2364                   _sp->block_is_obj((HeapWord*)obj),
2365                   "Should be an object");
2366         guarantee(obj->is_oop(), "Should be an oop");
2367         obj->verify();
2368         if (_past_remark) {
2369           // Remark has been completed, the object should be marked
2370           _bit_map->isMarked((HeapWord*)obj);
2371         }
2372       } else { // reference within CMS heap
2373         if (_past_remark) {
2374           // Remark has been completed -- so the referent should have
2375           // been marked, if referring object is.
2376           if (_bit_map->isMarked(_collector->block_start(p))) {
2377             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2378           }
2379         }
2380       }
2381     } else if (_sp->is_in_reserved(p)) {
2382       // the reference is from FLS, and points out of FLS
2383       guarantee(obj->is_oop(), "Should be an oop");
2384       obj->verify();
2385     }
2386   }
2387 
2388   template <class T> void do_oop_work(T* p) {
2389     T heap_oop = oopDesc::load_heap_oop(p);
2390     if (!oopDesc::is_null(heap_oop)) {
2391       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2392       do_oop(p, obj);
2393     }
2394   }
2395 
2396  public:
2397   VerifyAllOopsClosure(const CMSCollector* collector,
2398     const CompactibleFreeListSpace* sp, MemRegion span,
2399     bool past_remark, CMSBitMap* bit_map) :
2400     _collector(collector), _sp(sp), _span(span),
2401     _past_remark(past_remark), _bit_map(bit_map) { }
2402 
2403   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2404   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2405 };
2406 
2407 void CompactibleFreeListSpace::verify() const {
2408   assert_lock_strong(&_freelistLock);
2409   verify_objects_initialized();
2410   MemRegion span = _collector->_span;
2411   bool past_remark = (_collector->abstract_state() ==
2412                       CMSCollector::Sweeping);
2413 
2414   ResourceMark rm;
2415   HandleMark  hm;
2416 
2417   // Check integrity of CFL data structures
2418   _promoInfo.verify();
2419   _dictionary->verify();
2420   if (FLSVerifyIndexTable) {
2421     verifyIndexedFreeLists();
2422   }
2423   // Check integrity of all objects and free blocks in space
2424   {
2425     VerifyAllBlksClosure cl(this, span);
2426     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2427   }
2428   // Check that all references in the heap to FLS
2429   // are to valid objects in FLS or that references in
2430   // FLS are to valid objects elsewhere in the heap
2431   if (FLSVerifyAllHeapReferences)
2432   {
2433     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2434       _collector->markBitMap());
2435 
2436     // Iterate over all oops in the heap. Uses the _no_header version
2437     // since we are not interested in following the klass pointers.
2438     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2439   }
2440 
2441   if (VerifyObjectStartArray) {
2442     // Verify the block offset table
2443     _bt.verify();
2444   }
2445 }
2446 
2447 #ifndef PRODUCT
2448 void CompactibleFreeListSpace::verifyFreeLists() const {
2449   if (FLSVerifyLists) {
2450     _dictionary->verify();
2451     verifyIndexedFreeLists();
2452   } else {
2453     if (FLSVerifyDictionary) {
2454       _dictionary->verify();
2455     }
2456     if (FLSVerifyIndexTable) {
2457       verifyIndexedFreeLists();
2458     }
2459   }
2460 }
2461 #endif
2462 
2463 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2464   size_t i = 0;
2465   for (; i < IndexSetStart; i++) {
2466     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2467   }
2468   for (; i < IndexSetSize; i++) {
2469     verifyIndexedFreeList(i);
2470   }
2471 }
2472 
2473 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2474   FreeChunk* fc   =  _indexedFreeList[size].head();
2475   FreeChunk* tail =  _indexedFreeList[size].tail();
2476   size_t    num = _indexedFreeList[size].count();
2477   size_t      n = 0;
2478   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2479             "Slot should have been empty");
2480   for (; fc != NULL; fc = fc->next(), n++) {
2481     guarantee(fc->size() == size, "Size inconsistency");
2482     guarantee(fc->is_free(), "!free?");
2483     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2484     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2485   }
2486   guarantee(n == num, "Incorrect count");
2487 }
2488 
2489 #ifndef PRODUCT
2490 void CompactibleFreeListSpace::check_free_list_consistency() const {
2491   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2492     "Some sizes can't be allocated without recourse to"
2493     " linear allocation buffers");
2494   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2495     "else MIN_TREE_CHUNK_SIZE is wrong");
2496   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2497   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2498 }
2499 #endif
2500 
2501 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2502   assert_lock_strong(&_freelistLock);
2503   AdaptiveFreeList<FreeChunk> total;
2504   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2505   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2506   size_t total_free = 0;
2507   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2508     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2509     total_free += fl->count() * fl->size();
2510     if (i % (40*IndexSetStride) == 0) {
2511       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2512     }
2513     fl->print_on(gclog_or_tty);
2514     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2515     total.set_surplus(    total.surplus()     + fl->surplus()    );
2516     total.set_desired(    total.desired()     + fl->desired()    );
2517     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2518     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2519     total.set_count(      total.count()       + fl->count()      );
2520     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2521     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2522     total.set_split_births(total.split_births() + fl->split_births());
2523     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2524   }
2525   total.print_on(gclog_or_tty, "TOTAL");
2526   gclog_or_tty->print_cr("Total free in indexed lists "
2527                          SIZE_FORMAT " words", total_free);
2528   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2529     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2530             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2531     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2532   _dictionary->print_dict_census();
2533 }
2534 
2535 ///////////////////////////////////////////////////////////////////////////
2536 // CFLS_LAB
2537 ///////////////////////////////////////////////////////////////////////////
2538 
2539 #define VECTOR_257(x)                                                                                  \
2540   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2541   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2542      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2543      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2544      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2545      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2546      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2547      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2548      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2549      x }
2550 
2551 // Initialize with default setting for CMS, _not_
2552 // generic OldPLABSize, whose static default is different; if overridden at the
2553 // command-line, this will get reinitialized via a call to
2554 // modify_initialization() below.
2555 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2556   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2557 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2558 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2559 
2560 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2561   _cfls(cfls)
2562 {
2563   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2564   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2565        i < CompactibleFreeListSpace::IndexSetSize;
2566        i += CompactibleFreeListSpace::IndexSetStride) {
2567     _indexedFreeList[i].set_size(i);
2568     _num_blocks[i] = 0;
2569   }
2570 }
2571 
2572 static bool _CFLS_LAB_modified = false;
2573 
2574 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2575   assert(!_CFLS_LAB_modified, "Call only once");
2576   _CFLS_LAB_modified = true;
2577   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2578        i < CompactibleFreeListSpace::IndexSetSize;
2579        i += CompactibleFreeListSpace::IndexSetStride) {
2580     _blocks_to_claim[i].modify(n, wt, true /* force */);
2581   }
2582 }
2583 
2584 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2585   FreeChunk* res;
2586   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2587   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2588     // This locking manages sync with other large object allocations.
2589     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2590                     Mutex::_no_safepoint_check_flag);
2591     res = _cfls->getChunkFromDictionaryExact(word_sz);
2592     if (res == NULL) return NULL;
2593   } else {
2594     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2595     if (fl->count() == 0) {
2596       // Attempt to refill this local free list.
2597       get_from_global_pool(word_sz, fl);
2598       // If it didn't work, give up.
2599       if (fl->count() == 0) return NULL;
2600     }
2601     res = fl->get_chunk_at_head();
2602     assert(res != NULL, "Why was count non-zero?");
2603   }
2604   res->markNotFree();
2605   assert(!res->is_free(), "shouldn't be marked free");
2606   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2607   // mangle a just allocated object with a distinct pattern.
2608   debug_only(res->mangleAllocated(word_sz));
2609   return (HeapWord*)res;
2610 }
2611 
2612 // Get a chunk of blocks of the right size and update related
2613 // book-keeping stats
2614 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2615   // Get the #blocks we want to claim
2616   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2617   assert(n_blks > 0, "Error");
2618   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2619   // In some cases, when the application has a phase change,
2620   // there may be a sudden and sharp shift in the object survival
2621   // profile, and updating the counts at the end of a scavenge
2622   // may not be quick enough, giving rise to large scavenge pauses
2623   // during these phase changes. It is beneficial to detect such
2624   // changes on-the-fly during a scavenge and avoid such a phase-change
2625   // pothole. The following code is a heuristic attempt to do that.
2626   // It is protected by a product flag until we have gained
2627   // enough experience with this heuristic and fine-tuned its behavior.
2628   // WARNING: This might increase fragmentation if we overreact to
2629   // small spikes, so some kind of historical smoothing based on
2630   // previous experience with the greater reactivity might be useful.
2631   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2632   // default.
2633   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2634     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2635     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2636     n_blks = MIN2(n_blks, CMSOldPLABMax);
2637   }
2638   assert(n_blks > 0, "Error");
2639   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2640   // Update stats table entry for this block size
2641   _num_blocks[word_sz] += fl->count();
2642 }
2643 
2644 void CFLS_LAB::compute_desired_plab_size() {
2645   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2646        i < CompactibleFreeListSpace::IndexSetSize;
2647        i += CompactibleFreeListSpace::IndexSetStride) {
2648     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2649            "Counter inconsistency");
2650     if (_global_num_workers[i] > 0) {
2651       // Need to smooth wrt historical average
2652       if (ResizeOldPLAB) {
2653         _blocks_to_claim[i].sample(
2654           MAX2(CMSOldPLABMin,
2655           MIN2(CMSOldPLABMax,
2656                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2657       }
2658       // Reset counters for next round
2659       _global_num_workers[i] = 0;
2660       _global_num_blocks[i] = 0;
2661       if (PrintOldPLAB) {
2662         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2663                                i, (size_t)_blocks_to_claim[i].average());
2664       }
2665     }
2666   }
2667 }
2668 
2669 // If this is changed in the future to allow parallel
2670 // access, one would need to take the FL locks and,
2671 // depending on how it is used, stagger access from
2672 // parallel threads to reduce contention.
2673 void CFLS_LAB::retire(int tid) {
2674   // We run this single threaded with the world stopped;
2675   // so no need for locks and such.
2676   NOT_PRODUCT(Thread* t = Thread::current();)
2677   assert(Thread::current()->is_VM_thread(), "Error");
2678   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2679        i < CompactibleFreeListSpace::IndexSetSize;
2680        i += CompactibleFreeListSpace::IndexSetStride) {
2681     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2682            "Can't retire more than what we obtained");
2683     if (_num_blocks[i] > 0) {
2684       size_t num_retire =  _indexedFreeList[i].count();
2685       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2686       {
2687         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2688         //                Mutex::_no_safepoint_check_flag);
2689 
2690         // Update globals stats for num_blocks used
2691         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2692         _global_num_workers[i]++;
2693         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2694         if (num_retire > 0) {
2695           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2696           // Reset this list.
2697           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2698           _indexedFreeList[i].set_size(i);
2699         }
2700       }
2701       if (PrintOldPLAB) {
2702         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2703                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2704       }
2705       // Reset stats for next round
2706       _num_blocks[i]         = 0;
2707     }
2708   }
2709 }
2710 
2711 // Used by par_get_chunk_of_blocks() for the chunks from the
2712 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2713 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2714 // to the free list "fl".  "n" is the maximum number of chunks to
2715 // be added to "fl".
2716 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2717 
2718   // We'll try all multiples of word_sz in the indexed set, starting with
2719   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2720   // then try getting a big chunk and splitting it.
2721   {
2722     bool found;
2723     int  k;
2724     size_t cur_sz;
2725     for (k = 1, cur_sz = k * word_sz, found = false;
2726          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2727          (CMSSplitIndexedFreeListBlocks || k <= 1);
2728          k++, cur_sz = k * word_sz) {
2729       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2730       fl_for_cur_sz.set_size(cur_sz);
2731       {
2732         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2733                         Mutex::_no_safepoint_check_flag);
2734         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2735         if (gfl->count() != 0) {
2736           // nn is the number of chunks of size cur_sz that
2737           // we'd need to split k-ways each, in order to create
2738           // "n" chunks of size word_sz each.
2739           const size_t nn = MAX2(n/k, (size_t)1);
2740           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2741           found = true;
2742           if (k > 1) {
2743             // Update split death stats for the cur_sz-size blocks list:
2744             // we increment the split death count by the number of blocks
2745             // we just took from the cur_sz-size blocks list and which
2746             // we will be splitting below.
2747             ssize_t deaths = gfl->split_deaths() +
2748                              fl_for_cur_sz.count();
2749             gfl->set_split_deaths(deaths);
2750           }
2751         }
2752       }
2753       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2754       if (found) {
2755         if (k == 1) {
2756           fl->prepend(&fl_for_cur_sz);
2757         } else {
2758           // Divide each block on fl_for_cur_sz up k ways.
2759           FreeChunk* fc;
2760           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2761             // Must do this in reverse order, so that anybody attempting to
2762             // access the main chunk sees it as a single free block until we
2763             // change it.
2764             size_t fc_size = fc->size();
2765             assert(fc->is_free(), "Error");
2766             for (int i = k-1; i >= 0; i--) {
2767               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2768               assert((i != 0) ||
2769                         ((fc == ffc) && ffc->is_free() &&
2770                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2771                         "Counting error");
2772               ffc->set_size(word_sz);
2773               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2774               ffc->link_next(NULL);
2775               // Above must occur before BOT is updated below.
2776               OrderAccess::storestore();
2777               // splitting from the right, fc_size == i * word_sz
2778               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2779               fc_size -= word_sz;
2780               assert(fc_size == i*word_sz, "Error");
2781               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2782               _bt.verify_single_block((HeapWord*)fc, fc_size);
2783               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2784               // Push this on "fl".
2785               fl->return_chunk_at_head(ffc);
2786             }
2787             // TRAP
2788             assert(fl->tail()->next() == NULL, "List invariant.");
2789           }
2790         }
2791         // Update birth stats for this block size.
2792         size_t num = fl->count();
2793         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2794                         Mutex::_no_safepoint_check_flag);
2795         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2796         _indexedFreeList[word_sz].set_split_births(births);
2797         return true;
2798       }
2799     }
2800     return found;
2801   }
2802 }
2803 
2804 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2805 
2806   FreeChunk* fc = NULL;
2807   FreeChunk* rem_fc = NULL;
2808   size_t rem;
2809   {
2810     MutexLockerEx x(parDictionaryAllocLock(),
2811                     Mutex::_no_safepoint_check_flag);
2812     while (n > 0) {
2813       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2814                                   FreeBlockDictionary<FreeChunk>::atLeast);
2815       if (fc != NULL) {
2816         break;
2817       } else {
2818         n--;
2819       }
2820     }
2821     if (fc == NULL) return NULL;
2822     // Otherwise, split up that block.
2823     assert((ssize_t)n >= 1, "Control point invariant");
2824     assert(fc->is_free(), "Error: should be a free block");
2825     _bt.verify_single_block((HeapWord*)fc, fc->size());
2826     const size_t nn = fc->size() / word_sz;
2827     n = MIN2(nn, n);
2828     assert((ssize_t)n >= 1, "Control point invariant");
2829     rem = fc->size() - n * word_sz;
2830     // If there is a remainder, and it's too small, allocate one fewer.
2831     if (rem > 0 && rem < MinChunkSize) {
2832       n--; rem += word_sz;
2833     }
2834     // Note that at this point we may have n == 0.
2835     assert((ssize_t)n >= 0, "Control point invariant");
2836 
2837     // If n is 0, the chunk fc that was found is not large
2838     // enough to leave a viable remainder.  We are unable to
2839     // allocate even one block.  Return fc to the
2840     // dictionary and return, leaving "fl" empty.
2841     if (n == 0) {
2842       returnChunkToDictionary(fc);
2843       return NULL;
2844     }
2845 
2846     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2847     dictionary()->dict_census_update(fc->size(),
2848                                      true /*split*/,
2849                                      false /*birth*/);
2850 
2851     // First return the remainder, if any.
2852     // Note that we hold the lock until we decide if we're going to give
2853     // back the remainder to the dictionary, since a concurrent allocation
2854     // may otherwise see the heap as empty.  (We're willing to take that
2855     // hit if the block is a small block.)
2856     if (rem > 0) {
2857       size_t prefix_size = n * word_sz;
2858       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2859       rem_fc->set_size(rem);
2860       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2861       rem_fc->link_next(NULL);
2862       // Above must occur before BOT is updated below.
2863       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2864       OrderAccess::storestore();
2865       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2866       assert(fc->is_free(), "Error");
2867       fc->set_size(prefix_size);
2868       if (rem >= IndexSetSize) {
2869         returnChunkToDictionary(rem_fc);
2870         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2871         rem_fc = NULL;
2872       }
2873       // Otherwise, return it to the small list below.
2874     }
2875   }
2876   if (rem_fc != NULL) {
2877     MutexLockerEx x(_indexedFreeListParLocks[rem],
2878                     Mutex::_no_safepoint_check_flag);
2879     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2880     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2881     smallSplitBirth(rem);
2882   }
2883   assert(n * word_sz == fc->size(),
2884     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2885     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2886     fc->size(), n, word_sz));
2887   return fc;
2888 }
2889 
2890 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2891 
2892   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2893 
2894   if (fc == NULL) {
2895     return;
2896   }
2897 
2898   size_t n = fc->size() / word_sz;
2899 
2900   assert((ssize_t)n > 0, "Consistency");
2901   // Now do the splitting up.
2902   // Must do this in reverse order, so that anybody attempting to
2903   // access the main chunk sees it as a single free block until we
2904   // change it.
2905   size_t fc_size = n * word_sz;
2906   // All but first chunk in this loop
2907   for (ssize_t i = n-1; i > 0; i--) {
2908     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2909     ffc->set_size(word_sz);
2910     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2911     ffc->link_next(NULL);
2912     // Above must occur before BOT is updated below.
2913     OrderAccess::storestore();
2914     // splitting from the right, fc_size == (n - i + 1) * wordsize
2915     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2916     fc_size -= word_sz;
2917     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2918     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2919     _bt.verify_single_block((HeapWord*)fc, fc_size);
2920     // Push this on "fl".
2921     fl->return_chunk_at_head(ffc);
2922   }
2923   // First chunk
2924   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2925   // The blocks above should show their new sizes before the first block below
2926   fc->set_size(word_sz);
2927   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2928   fc->link_next(NULL);
2929   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2930   _bt.verify_single_block((HeapWord*)fc, fc->size());
2931   fl->return_chunk_at_head(fc);
2932 
2933   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2934   {
2935     // Update the stats for this block size.
2936     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2937                     Mutex::_no_safepoint_check_flag);
2938     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2939     _indexedFreeList[word_sz].set_split_births(births);
2940     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2941     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2942   }
2943 
2944   // TRAP
2945   assert(fl->tail()->next() == NULL, "List invariant.");
2946 }
2947 
2948 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2949   assert(fl->count() == 0, "Precondition.");
2950   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2951          "Precondition");
2952 
2953   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2954     // Got it
2955     return;
2956   }
2957 
2958   // Otherwise, we'll split a block from the dictionary.
2959   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2960 }
2961 
2962 // Set up the space's par_seq_tasks structure for work claiming
2963 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2964 // XXX Need to suitably abstract and generalize this and the next
2965 // method into one.
2966 void
2967 CompactibleFreeListSpace::
2968 initialize_sequential_subtasks_for_rescan(int n_threads) {
2969   // The "size" of each task is fixed according to rescan_task_size.
2970   assert(n_threads > 0, "Unexpected n_threads argument");
2971   const size_t task_size = rescan_task_size();
2972   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2973   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2974   assert(n_tasks == 0 ||
2975          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2976           (used_region().start() + n_tasks*task_size >= used_region().end())),
2977          "n_tasks calculation incorrect");
2978   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2979   assert(!pst->valid(), "Clobbering existing data?");
2980   // Sets the condition for completion of the subtask (how many threads
2981   // need to finish in order to be done).
2982   pst->set_n_threads(n_threads);
2983   pst->set_n_tasks((int)n_tasks);
2984 }
2985 
2986 // Set up the space's par_seq_tasks structure for work claiming
2987 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2988 void
2989 CompactibleFreeListSpace::
2990 initialize_sequential_subtasks_for_marking(int n_threads,
2991                                            HeapWord* low) {
2992   // The "size" of each task is fixed according to rescan_task_size.
2993   assert(n_threads > 0, "Unexpected n_threads argument");
2994   const size_t task_size = marking_task_size();
2995   assert(task_size > CardTableModRefBS::card_size_in_words &&
2996          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2997          "Otherwise arithmetic below would be incorrect");
2998   MemRegion span = _gen->reserved();
2999   if (low != NULL) {
3000     if (span.contains(low)) {
3001       // Align low down to  a card boundary so that
3002       // we can use block_offset_careful() on span boundaries.
3003       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3004                                  CardTableModRefBS::card_size);
3005       // Clip span prefix at aligned_low
3006       span = span.intersection(MemRegion(aligned_low, span.end()));
3007     } else if (low > span.end()) {
3008       span = MemRegion(low, low);  // Null region
3009     } // else use entire span
3010   }
3011   assert(span.is_empty() ||
3012          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3013         "span should start at a card boundary");
3014   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3015   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3016   assert(n_tasks == 0 ||
3017          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3018           (span.start() + n_tasks*task_size >= span.end())),
3019          "n_tasks calculation incorrect");
3020   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3021   assert(!pst->valid(), "Clobbering existing data?");
3022   // Sets the condition for completion of the subtask (how many threads
3023   // need to finish in order to be done).
3024   pst->set_n_threads(n_threads);
3025   pst->set_n_tasks((int)n_tasks);
3026 }