1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94          "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 105       _dictionary = new AFLBinaryTreeDictionary(mr);
 106       break;
 107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 109     default:
 110       warning("dictionaryChoice: selected option not understood; using"
 111               " default BinaryTreeDictionary implementation instead.");
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 126                                            FreeBlockDictionary<FreeChunk>::atLeast);
 127     // The small linAB initially has all the space and will allocate
 128     // a chunk of any size.
 129     HeapWord* addr = (HeapWord*) fc;
 130     _smallLinearAllocBlock.set(addr, fc->size() ,
 131       1024*SmallForLinearAlloc, fc->size());
 132     // Note that _unallocated_block is not updated here.
 133     // Allocations from the linear allocation block should
 134     // update it.
 135   } else {
 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);
 156       if (_indexedFreeListParLocks[i] == NULL)
 157         vm_exit_during_initialization("Could not allocate a par lock");
 158       DEBUG_ONLY(
 159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160       )
 161     }
 162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163   }
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288   }
 289   _promoInfo.reset();
 290   _smallLinearAllocBlock._ptr = NULL;
 291   _smallLinearAllocBlock._word_size = 0;
 292 }
 293 
 294 void CompactibleFreeListSpace::reset_after_compaction() {
 295   // Reset the space to the new reality - one free chunk.
 296   MemRegion mr(compaction_top(), end());
 297   reset(mr);
 298   // Now refill the linear allocation block(s) if possible.
 299   if (_adaptive_freelists) {
 300     refillLinearAllocBlocksIfNeeded();
 301   } else {
 302     // Place as much of mr in the linAB as we can get,
 303     // provided it was big enough to go into the dictionary.
 304     FreeChunk* fc = dictionary()->find_largest_dict();
 305     if (fc != NULL) {
 306       assert(fc->size() == mr.word_size(),
 307              "Why was the chunk broken up?");
 308       removeChunkFromDictionary(fc);
 309       HeapWord* addr = (HeapWord*) fc;
 310       _smallLinearAllocBlock.set(addr, fc->size() ,
 311         1024*SmallForLinearAlloc, fc->size());
 312       // Note that _unallocated_block is not updated here.
 313     }
 314   }
 315 }
 316 
 317 // Walks the entire dictionary, returning a coterminal
 318 // chunk, if it exists. Use with caution since it involves
 319 // a potentially complete walk of a potentially large tree.
 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 321 
 322   assert_lock_strong(&_freelistLock);
 323 
 324   return dictionary()->find_chunk_ends_at(end());
 325 }
 326 
 327 
 328 #ifndef PRODUCT
 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 332   }
 333 }
 334 
 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 336   size_t sum = 0;
 337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 339   }
 340   return sum;
 341 }
 342 
 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 344   size_t count = 0;
 345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 346     debug_only(
 347       ssize_t total_list_count = 0;
 348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 349          fc = fc->next()) {
 350         total_list_count++;
 351       }
 352       assert(total_list_count ==  _indexedFreeList[i].count(),
 353         "Count in list is incorrect");
 354     )
 355     count += _indexedFreeList[i].count();
 356   }
 357   return count;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::totalCount() {
 361   size_t num = totalCountInIndexedFreeLists();
 362   num +=  dictionary()->total_count();
 363   if (_smallLinearAllocBlock._word_size != 0) {
 364     num++;
 365   }
 366   return num;
 367 }
 368 #endif
 369 
 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 371   FreeChunk* fc = (FreeChunk*) p;
 372   return fc->is_free();
 373 }
 374 
 375 size_t CompactibleFreeListSpace::used() const {
 376   return capacity() - free();
 377 }
 378 
 379 size_t CompactibleFreeListSpace::free() const {
 380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 381   // if you do this while the structures are in flux you
 382   // may get an approximate answer only; for instance
 383   // because there is concurrent allocation either
 384   // directly by mutators or for promotion during a GC.
 385   // It's "MT-safe", however, in the sense that you are guaranteed
 386   // not to crash and burn, for instance, because of walking
 387   // pointers that could disappear as you were walking them.
 388   // The approximation is because the various components
 389   // that are read below are not read atomically (and
 390   // further the computation of totalSizeInIndexedFreeLists()
 391   // is itself a non-atomic computation. The normal use of
 392   // this is during a resize operation at the end of GC
 393   // and at that time you are guaranteed to get the
 394   // correct actual value. However, for instance, this is
 395   // also read completely asynchronously by the "perf-sampler"
 396   // that supports jvmstat, and you are apt to see the values
 397   // flicker in such cases.
 398   assert(_dictionary != NULL, "No _dictionary?");
 399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 400           totalSizeInIndexedFreeLists() +
 401           _smallLinearAllocBlock._word_size) * HeapWordSize;
 402 }
 403 
 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 405   assert(_dictionary != NULL, "No _dictionary?");
 406   assert_locked();
 407   size_t res = _dictionary->max_chunk_size();
 408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 409                        (size_t) SmallForLinearAlloc - 1));
 410   // XXX the following could potentially be pretty slow;
 411   // should one, pesimally for the rare cases when res
 412   // caclulated above is less than IndexSetSize,
 413   // just return res calculated above? My reasoning was that
 414   // those cases will be so rare that the extra time spent doesn't
 415   // really matter....
 416   // Note: do not change the loop test i >= res + IndexSetStride
 417   // to i > res below, because i is unsigned and res may be zero.
 418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 419        i -= IndexSetStride) {
 420     if (_indexedFreeList[i].head() != NULL) {
 421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 422       return i;
 423     }
 424   }
 425   return res;
 426 }
 427 
 428 void LinearAllocBlock::print_on(outputStream* st) const {
 429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 431             _ptr, _word_size, _refillSize, _allocation_size_limit);
 432 }
 433 
 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 435   st->print_cr("COMPACTIBLE FREELIST SPACE");
 436   st->print_cr(" Space:");
 437   Space::print_on(st);
 438 
 439   st->print_cr("promoInfo:");
 440   _promoInfo.print_on(st);
 441 
 442   st->print_cr("_smallLinearAllocBlock");
 443   _smallLinearAllocBlock.print_on(st);
 444 
 445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 446 
 447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 449 }
 450 
 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 452 const {
 453   reportIndexedFreeListStatistics();
 454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 455   gclog_or_tty->print_cr("---------------------------");
 456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 458     _indexedFreeList[i].print_on(gclog_or_tty);
 459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 460          fc = fc->next()) {
 461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 462                           fc, (HeapWord*)fc + i,
 463                           fc->cantCoalesce() ? "\t CC" : "");
 464     }
 465   }
 466 }
 467 
 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 469 const {
 470   _promoInfo.print_on(st);
 471 }
 472 
 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 474 const {
 475   _dictionary->report_statistics();
 476   st->print_cr("Layout of Freelists in Tree");
 477   st->print_cr("---------------------------");
 478   _dictionary->print_free_lists(st);
 479 }
 480 
 481 class BlkPrintingClosure: public BlkClosure {
 482   const CMSCollector*             _collector;
 483   const CompactibleFreeListSpace* _sp;
 484   const CMSBitMap*                _live_bit_map;
 485   const bool                      _post_remark;
 486   outputStream*                   _st;
 487 public:
 488   BlkPrintingClosure(const CMSCollector* collector,
 489                      const CompactibleFreeListSpace* sp,
 490                      const CMSBitMap* live_bit_map,
 491                      outputStream* st):
 492     _collector(collector),
 493     _sp(sp),
 494     _live_bit_map(live_bit_map),
 495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 496     _st(st) { }
 497   size_t do_blk(HeapWord* addr);
 498 };
 499 
 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 501   size_t sz = _sp->block_size_no_stall(addr, _collector);
 502   assert(sz != 0, "Should always be able to compute a size");
 503   if (_sp->block_is_obj(addr)) {
 504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 506       addr,
 507       dead ? "dead" : "live",
 508       sz,
 509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 510     if (CMSPrintObjectsInDump && !dead) {
 511       oop(addr)->print_on(_st);
 512       _st->print_cr("--------------------------------------");
 513     }
 514   } else { // free block
 515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 516       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 517     if (CMSPrintChunksInDump) {
 518       ((FreeChunk*)addr)->print_on(_st);
 519       _st->print_cr("--------------------------------------");
 520     }
 521   }
 522   return sz;
 523 }
 524 
 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 526   outputStream* st) {
 527   st->print_cr("\n=========================");
 528   st->print_cr("Block layout in CMS Heap:");
 529   st->print_cr("=========================");
 530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 531   blk_iterate(&bpcl);
 532 
 533   st->print_cr("\n=======================================");
 534   st->print_cr("Order & Layout of Promotion Info Blocks");
 535   st->print_cr("=======================================");
 536   print_promo_info_blocks(st);
 537 
 538   st->print_cr("\n===========================");
 539   st->print_cr("Order of Indexed Free Lists");
 540   st->print_cr("=========================");
 541   print_indexed_free_lists(st);
 542 
 543   st->print_cr("\n=================================");
 544   st->print_cr("Order of Free Lists in Dictionary");
 545   st->print_cr("=================================");
 546   print_dictionary_free_lists(st);
 547 }
 548 
 549 
 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 551   assert_lock_strong(&_freelistLock);
 552   assert(PrintFLSStatistics != 0, "Reporting error");
 553   _dictionary->report_statistics();
 554   if (PrintFLSStatistics > 1) {
 555     reportIndexedFreeListStatistics();
 556     size_t total_size = totalSizeInIndexedFreeLists() +
 557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 558     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 559   }
 560 }
 561 
 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 563   assert_lock_strong(&_freelistLock);
 564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 565                       "--------------------------------\n");
 566   size_t total_size = totalSizeInIndexedFreeLists();
 567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 568   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 569   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 570   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 571   if (free_blocks != 0) {
 572     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 573   }
 574 }
 575 
 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 577   size_t res = 0;
 578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 579     debug_only(
 580       ssize_t recount = 0;
 581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 582          fc = fc->next()) {
 583         recount += 1;
 584       }
 585       assert(recount == _indexedFreeList[i].count(),
 586         "Incorrect count in list");
 587     )
 588     res += _indexedFreeList[i].count();
 589   }
 590   return res;
 591 }
 592 
 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 595     if (_indexedFreeList[i].head() != NULL) {
 596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 597       return (size_t)i;
 598     }
 599   }
 600   return 0;
 601 }
 602 
 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 604   HeapWord* prevEnd = end();
 605   assert(prevEnd != value, "unnecessary set_end call");
 606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 607         "New end is below unallocated block");
 608   _end = value;
 609   if (prevEnd != NULL) {
 610     // Resize the underlying block offset table.
 611     _bt.resize(pointer_delta(value, bottom()));
 612     if (value <= prevEnd) {
 613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 614              "New end is below unallocated block");
 615     } else {
 616       // Now, take this new chunk and add it to the free blocks.
 617       // Note that the BOT has not yet been updated for this block.
 618       size_t newFcSize = pointer_delta(value, prevEnd);
 619       // XXX This is REALLY UGLY and should be fixed up. XXX
 620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 621         // Mark the boundary of the new block in BOT
 622         _bt.mark_block(prevEnd, value);
 623         // put it all in the linAB
 624         if (ParallelGCThreads == 0) {
 625           _smallLinearAllocBlock._ptr = prevEnd;
 626           _smallLinearAllocBlock._word_size = newFcSize;
 627           repairLinearAllocBlock(&_smallLinearAllocBlock);
 628         } else { // ParallelGCThreads > 0
 629           MutexLockerEx x(parDictionaryAllocLock(),
 630                           Mutex::_no_safepoint_check_flag);
 631           _smallLinearAllocBlock._ptr = prevEnd;
 632           _smallLinearAllocBlock._word_size = newFcSize;
 633           repairLinearAllocBlock(&_smallLinearAllocBlock);
 634         }
 635         // Births of chunks put into a LinAB are not recorded.  Births
 636         // of chunks as they are allocated out of a LinAB are.
 637       } else {
 638         // Add the block to the free lists, if possible coalescing it
 639         // with the last free block, and update the BOT and census data.
 640         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 641       }
 642     }
 643   }
 644 }
 645 
 646 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 647   CompactibleFreeListSpace* _cfls;
 648   CMSCollector* _collector;
 649 protected:
 650   // Override.
 651 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 652   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl);                \
 655       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 656                                        HeapWord* bottom, HeapWord* top, \
 657                                        ClosureType* cl);                \
 658     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 659                                        HeapWord* bottom, HeapWord* top, \
 660                                        ClosureType* cl)
 661   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 662   walk_mem_region_with_cl_DECL(FilteringClosure);
 663 
 664 public:
 665   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 666                       CMSCollector* collector,
 667                       ExtendedOopClosure* cl,
 668                       CardTableModRefBS::PrecisionStyle precision,
 669                       HeapWord* boundary) :
 670     Filtering_DCTOC(sp, cl, precision, boundary),
 671     _cfls(sp), _collector(collector) {}
 672 };
 673 
 674 // We de-virtualize the block-related calls below, since we know that our
 675 // space is a CompactibleFreeListSpace.
 676 
 677 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 678 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 679                                                  HeapWord* bottom,              \
 680                                                  HeapWord* top,                 \
 681                                                  ClosureType* cl) {             \
 682    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 683    if (is_par) {                                                                \
 684      assert(SharedHeap::heap()->n_par_threads() ==                              \
 685             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 686      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 687    } else {                                                                     \
 688      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 689    }                                                                            \
 690 }                                                                               \
 691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 692                                                       HeapWord* bottom,         \
 693                                                       HeapWord* top,            \
 694                                                       ClosureType* cl) {        \
 695   /* Skip parts that are before "mr", in case "block_start" sent us             \
 696      back too far. */                                                           \
 697   HeapWord* mr_start = mr.start();                                              \
 698   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 699   HeapWord* next = bottom + bot_size;                                           \
 700   while (next < mr_start) {                                                     \
 701     bottom = next;                                                              \
 702     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 703     next = bottom + bot_size;                                                   \
 704   }                                                                             \
 705                                                                                 \
 706   while (bottom < top) {                                                        \
 707     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 708         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 709                     oop(bottom)) &&                                             \
 710         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 711       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 712       bottom += _cfls->adjustObjectSize(word_sz);                               \
 713     } else {                                                                    \
 714       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 715     }                                                                           \
 716   }                                                                             \
 717 }                                                                               \
 718 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 719                                                         HeapWord* bottom,       \
 720                                                         HeapWord* top,          \
 721                                                         ClosureType* cl) {      \
 722   /* Skip parts that are before "mr", in case "block_start" sent us             \
 723      back too far. */                                                           \
 724   HeapWord* mr_start = mr.start();                                              \
 725   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 726   HeapWord* next = bottom + bot_size;                                           \
 727   while (next < mr_start) {                                                     \
 728     bottom = next;                                                              \
 729     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 730     next = bottom + bot_size;                                                   \
 731   }                                                                             \
 732                                                                                 \
 733   while (bottom < top) {                                                        \
 734     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 735         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 736                     oop(bottom)) &&                                             \
 737         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 738       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 739       bottom += _cfls->adjustObjectSize(word_sz);                               \
 740     } else {                                                                    \
 741       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 742     }                                                                           \
 743   }                                                                             \
 744 }
 745 
 746 // (There are only two of these, rather than N, because the split is due
 747 // only to the introduction of the FilteringClosure, a local part of the
 748 // impl of this abstraction.)
 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 751 
 752 DirtyCardToOopClosure*
 753 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 754                                       CardTableModRefBS::PrecisionStyle precision,
 755                                       HeapWord* boundary) {
 756   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 757 }
 758 
 759 
 760 // Note on locking for the space iteration functions:
 761 // since the collector's iteration activities are concurrent with
 762 // allocation activities by mutators, absent a suitable mutual exclusion
 763 // mechanism the iterators may go awry. For instace a block being iterated
 764 // may suddenly be allocated or divided up and part of it allocated and
 765 // so on.
 766 
 767 // Apply the given closure to each block in the space.
 768 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 769   assert_lock_strong(freelistLock());
 770   HeapWord *cur, *limit;
 771   for (cur = bottom(), limit = end(); cur < limit;
 772        cur += cl->do_blk_careful(cur));
 773 }
 774 
 775 // Apply the given closure to each block in the space.
 776 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 777   assert_lock_strong(freelistLock());
 778   HeapWord *cur, *limit;
 779   for (cur = bottom(), limit = end(); cur < limit;
 780        cur += cl->do_blk(cur));
 781 }
 782 
 783 // Apply the given closure to each oop in the space.
 784 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 785   assert_lock_strong(freelistLock());
 786   HeapWord *cur, *limit;
 787   size_t curSize;
 788   for (cur = bottom(), limit = end(); cur < limit;
 789        cur += curSize) {
 790     curSize = block_size(cur);
 791     if (block_is_obj(cur)) {
 792       oop(cur)->oop_iterate(cl);
 793     }
 794   }
 795 }
 796 
 797 // Apply the given closure to each oop in the space \intersect memory region.
 798 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
 799   assert_lock_strong(freelistLock());
 800   if (is_empty()) {
 801     return;
 802   }
 803   MemRegion cur = MemRegion(bottom(), end());
 804   mr = mr.intersection(cur);
 805   if (mr.is_empty()) {
 806     return;
 807   }
 808   if (mr.equals(cur)) {
 809     oop_iterate(cl);
 810     return;
 811   }
 812   assert(mr.end() <= end(), "just took an intersection above");
 813   HeapWord* obj_addr = block_start(mr.start());
 814   HeapWord* t = mr.end();
 815 
 816   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 817   if (block_is_obj(obj_addr)) {
 818     // Handle first object specially.
 819     oop obj = oop(obj_addr);
 820     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 821   } else {
 822     FreeChunk* fc = (FreeChunk*)obj_addr;
 823     obj_addr += fc->size();
 824   }
 825   while (obj_addr < t) {
 826     HeapWord* obj = obj_addr;
 827     obj_addr += block_size(obj_addr);
 828     // If "obj_addr" is not greater than top, then the
 829     // entire object "obj" is within the region.
 830     if (obj_addr <= t) {
 831       if (block_is_obj(obj)) {
 832         oop(obj)->oop_iterate(cl);
 833       }
 834     } else {
 835       // "obj" extends beyond end of region
 836       if (block_is_obj(obj)) {
 837         oop(obj)->oop_iterate(&smr_blk);
 838       }
 839       break;
 840     }
 841   }
 842 }
 843 
 844 // NOTE: In the following methods, in order to safely be able to
 845 // apply the closure to an object, we need to be sure that the
 846 // object has been initialized. We are guaranteed that an object
 847 // is initialized if we are holding the Heap_lock with the
 848 // world stopped.
 849 void CompactibleFreeListSpace::verify_objects_initialized() const {
 850   if (is_init_completed()) {
 851     assert_locked_or_safepoint(Heap_lock);
 852     if (Universe::is_fully_initialized()) {
 853       guarantee(SafepointSynchronize::is_at_safepoint(),
 854                 "Required for objects to be initialized");
 855     }
 856   } // else make a concession at vm start-up
 857 }
 858 
 859 // Apply the given closure to each object in the space
 860 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 861   assert_lock_strong(freelistLock());
 862   NOT_PRODUCT(verify_objects_initialized());
 863   HeapWord *cur, *limit;
 864   size_t curSize;
 865   for (cur = bottom(), limit = end(); cur < limit;
 866        cur += curSize) {
 867     curSize = block_size(cur);
 868     if (block_is_obj(cur)) {
 869       blk->do_object(oop(cur));
 870     }
 871   }
 872 }
 873 
 874 // Apply the given closure to each live object in the space
 875 //   The usage of CompactibleFreeListSpace
 876 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 877 // objects in the space with references to objects that are no longer
 878 // valid.  For example, an object may reference another object
 879 // that has already been sweep up (collected).  This method uses
 880 // obj_is_alive() to determine whether it is safe to apply the closure to
 881 // an object.  See obj_is_alive() for details on how liveness of an
 882 // object is decided.
 883 
 884 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 885   assert_lock_strong(freelistLock());
 886   NOT_PRODUCT(verify_objects_initialized());
 887   HeapWord *cur, *limit;
 888   size_t curSize;
 889   for (cur = bottom(), limit = end(); cur < limit;
 890        cur += curSize) {
 891     curSize = block_size(cur);
 892     if (block_is_obj(cur) && obj_is_alive(cur)) {
 893       blk->do_object(oop(cur));
 894     }
 895   }
 896 }
 897 
 898 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 899                                                   UpwardsObjectClosure* cl) {
 900   assert_locked(freelistLock());
 901   NOT_PRODUCT(verify_objects_initialized());
 902   Space::object_iterate_mem(mr, cl);
 903 }
 904 
 905 // Callers of this iterator beware: The closure application should
 906 // be robust in the face of uninitialized objects and should (always)
 907 // return a correct size so that the next addr + size below gives us a
 908 // valid block boundary. [See for instance,
 909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 910 // in ConcurrentMarkSweepGeneration.cpp.]
 911 HeapWord*
 912 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   HeapWord *addr, *last;
 915   size_t size;
 916   for (addr = bottom(), last  = end();
 917        addr < last; addr += size) {
 918     FreeChunk* fc = (FreeChunk*)addr;
 919     if (fc->is_free()) {
 920       // Since we hold the free list lock, which protects direct
 921       // allocation in this generation by mutators, a free object
 922       // will remain free throughout this iteration code.
 923       size = fc->size();
 924     } else {
 925       // Note that the object need not necessarily be initialized,
 926       // because (for instance) the free list lock does NOT protect
 927       // object initialization. The closure application below must
 928       // therefore be correct in the face of uninitialized objects.
 929       size = cl->do_object_careful(oop(addr));
 930       if (size == 0) {
 931         // An unparsable object found. Signal early termination.
 932         return addr;
 933       }
 934     }
 935   }
 936   return NULL;
 937 }
 938 
 939 // Callers of this iterator beware: The closure application should
 940 // be robust in the face of uninitialized objects and should (always)
 941 // return a correct size so that the next addr + size below gives us a
 942 // valid block boundary. [See for instance,
 943 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 944 // in ConcurrentMarkSweepGeneration.cpp.]
 945 HeapWord*
 946 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 947   ObjectClosureCareful* cl) {
 948   assert_lock_strong(freelistLock());
 949   // Can't use used_region() below because it may not necessarily
 950   // be the same as [bottom(),end()); although we could
 951   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 952   // that appears too cumbersome, so we just do the simpler check
 953   // in the assertion below.
 954   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 955          "mr should be non-empty and within used space");
 956   HeapWord *addr, *end;
 957   size_t size;
 958   for (addr = block_start_careful(mr.start()), end  = mr.end();
 959        addr < end; addr += size) {
 960     FreeChunk* fc = (FreeChunk*)addr;
 961     if (fc->is_free()) {
 962       // Since we hold the free list lock, which protects direct
 963       // allocation in this generation by mutators, a free object
 964       // will remain free throughout this iteration code.
 965       size = fc->size();
 966     } else {
 967       // Note that the object need not necessarily be initialized,
 968       // because (for instance) the free list lock does NOT protect
 969       // object initialization. The closure application below must
 970       // therefore be correct in the face of uninitialized objects.
 971       size = cl->do_object_careful_m(oop(addr), mr);
 972       if (size == 0) {
 973         // An unparsable object found. Signal early termination.
 974         return addr;
 975       }
 976     }
 977   }
 978   return NULL;
 979 }
 980 
 981 
 982 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 983   NOT_PRODUCT(verify_objects_initialized());
 984   return _bt.block_start(p);
 985 }
 986 
 987 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 988   return _bt.block_start_careful(p);
 989 }
 990 
 991 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 992   NOT_PRODUCT(verify_objects_initialized());
 993   // This must be volatile, or else there is a danger that the compiler
 994   // will compile the code below into a sometimes-infinite loop, by keeping
 995   // the value read the first time in a register.
 996   while (true) {
 997     // We must do this until we get a consistent view of the object.
 998     if (FreeChunk::indicatesFreeChunk(p)) {
 999       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1000       size_t res = fc->size();
1001       // If the object is still a free chunk, return the size, else it
1002       // has been allocated so try again.
1003       if (FreeChunk::indicatesFreeChunk(p)) {
1004         assert(res != 0, "Block size should not be 0");
1005         return res;
1006       }
1007     } else {
1008       // must read from what 'p' points to in each loop.
1009       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1010       if (k != NULL) {
1011         assert(k->is_klass(), "Should really be klass oop.");
1012         oop o = (oop)p;
1013         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1014         size_t res = o->size_given_klass(k);
1015         res = adjustObjectSize(res);
1016         assert(res != 0, "Block size should not be 0");
1017         return res;
1018       }
1019     }
1020   }
1021 }
1022 
1023 // TODO: Now that is_parsable is gone, we should combine these two functions.
1024 // A variant of the above that uses the Printezis bits for
1025 // unparsable but allocated objects. This avoids any possible
1026 // stalls waiting for mutators to initialize objects, and is
1027 // thus potentially faster than the variant above. However,
1028 // this variant may return a zero size for a block that is
1029 // under mutation and for which a consistent size cannot be
1030 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1031 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1032                                                      const CMSCollector* c)
1033 const {
1034   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1035   // This must be volatile, or else there is a danger that the compiler
1036   // will compile the code below into a sometimes-infinite loop, by keeping
1037   // the value read the first time in a register.
1038   DEBUG_ONLY(uint loops = 0;)
1039   while (true) {
1040     // We must do this until we get a consistent view of the object.
1041     if (FreeChunk::indicatesFreeChunk(p)) {
1042       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1043       size_t res = fc->size();
1044       if (FreeChunk::indicatesFreeChunk(p)) {
1045         assert(res != 0, "Block size should not be 0");
1046         assert(loops == 0, "Should be 0");
1047         return res;
1048       }
1049     } else {
1050       // must read from what 'p' points to in each loop.
1051       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1052       // We trust the size of any object that has a non-NULL
1053       // klass and (for those in the perm gen) is parsable
1054       // -- irrespective of its conc_safe-ty.
1055       if (k != NULL) {
1056         assert(k->is_klass(), "Should really be klass oop.");
1057         oop o = (oop)p;
1058         assert(o->is_oop(), "Should be an oop");
1059         size_t res = o->size_given_klass(k);
1060         res = adjustObjectSize(res);
1061         assert(res != 0, "Block size should not be 0");
1062         return res;
1063       } else {
1064         // May return 0 if P-bits not present.
1065         return c->block_size_if_printezis_bits(p);
1066       }
1067     }
1068     assert(loops == 0, "Can loop at most once");
1069     DEBUG_ONLY(loops++;)
1070   }
1071 }
1072 
1073 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1074   NOT_PRODUCT(verify_objects_initialized());
1075   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1076   FreeChunk* fc = (FreeChunk*)p;
1077   if (fc->is_free()) {
1078     return fc->size();
1079   } else {
1080     // Ignore mark word because this may be a recently promoted
1081     // object whose mark word is used to chain together grey
1082     // objects (the last one would have a null value).
1083     assert(oop(p)->is_oop(true), "Should be an oop");
1084     return adjustObjectSize(oop(p)->size());
1085   }
1086 }
1087 
1088 // This implementation assumes that the property of "being an object" is
1089 // stable.  But being a free chunk may not be (because of parallel
1090 // promotion.)
1091 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1092   FreeChunk* fc = (FreeChunk*)p;
1093   assert(is_in_reserved(p), "Should be in space");
1094   // When doing a mark-sweep-compact of the CMS generation, this
1095   // assertion may fail because prepare_for_compaction() uses
1096   // space that is garbage to maintain information on ranges of
1097   // live objects so that these live ranges can be moved as a whole.
1098   // Comment out this assertion until that problem can be solved
1099   // (i.e., that the block start calculation may look at objects
1100   // at address below "p" in finding the object that contains "p"
1101   // and those objects (if garbage) may have been modified to hold
1102   // live range information.
1103   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1104   //        "Should be a block boundary");
1105   if (FreeChunk::indicatesFreeChunk(p)) return false;
1106   Klass* k = oop(p)->klass_or_null();
1107   if (k != NULL) {
1108     // Ignore mark word because it may have been used to
1109     // chain together promoted objects (the last one
1110     // would have a null value).
1111     assert(oop(p)->is_oop(true), "Should be an oop");
1112     return true;
1113   } else {
1114     return false;  // Was not an object at the start of collection.
1115   }
1116 }
1117 
1118 // Check if the object is alive. This fact is checked either by consulting
1119 // the main marking bitmap in the sweeping phase or, if it's a permanent
1120 // generation and we're not in the sweeping phase, by checking the
1121 // perm_gen_verify_bit_map where we store the "deadness" information if
1122 // we did not sweep the perm gen in the most recent previous GC cycle.
1123 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1124   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1125          "Else races are possible");
1126   assert(block_is_obj(p), "The address should point to an object");
1127 
1128   // If we're sweeping, we use object liveness information from the main bit map
1129   // for both perm gen and old gen.
1130   // We don't need to lock the bitmap (live_map or dead_map below), because
1131   // EITHER we are in the middle of the sweeping phase, and the
1132   // main marking bit map (live_map below) is locked,
1133   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1134   // is stable, because it's mutated only in the sweeping phase.
1135   // NOTE: This method is also used by jmap where, if class unloading is
1136   // off, the results can return "false" for legitimate perm objects,
1137   // when we are not in the midst of a sweeping phase, which can result
1138   // in jmap not reporting certain perm gen objects. This will be moot
1139   // if/when the perm gen goes away in the future.
1140   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1141     CMSBitMap* live_map = _collector->markBitMap();
1142     return live_map->par_isMarked((HeapWord*) p);
1143   }
1144   return true;
1145 }
1146 
1147 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1148   FreeChunk* fc = (FreeChunk*)p;
1149   assert(is_in_reserved(p), "Should be in space");
1150   assert(_bt.block_start(p) == p, "Should be a block boundary");
1151   if (!fc->is_free()) {
1152     // Ignore mark word because it may have been used to
1153     // chain together promoted objects (the last one
1154     // would have a null value).
1155     assert(oop(p)->is_oop(true), "Should be an oop");
1156     return true;
1157   }
1158   return false;
1159 }
1160 
1161 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1162 // approximate answer if you don't hold the freelistlock when you call this.
1163 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1164   size_t size = 0;
1165   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1166     debug_only(
1167       // We may be calling here without the lock in which case we
1168       // won't do this modest sanity check.
1169       if (freelistLock()->owned_by_self()) {
1170         size_t total_list_size = 0;
1171         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1172           fc = fc->next()) {
1173           total_list_size += i;
1174         }
1175         assert(total_list_size == i * _indexedFreeList[i].count(),
1176                "Count in list is incorrect");
1177       }
1178     )
1179     size += i * _indexedFreeList[i].count();
1180   }
1181   return size;
1182 }
1183 
1184 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1185   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1186   return allocate(size);
1187 }
1188 
1189 HeapWord*
1190 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1191   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1192 }
1193 
1194 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1195   assert_lock_strong(freelistLock());
1196   HeapWord* res = NULL;
1197   assert(size == adjustObjectSize(size),
1198          "use adjustObjectSize() before calling into allocate()");
1199 
1200   if (_adaptive_freelists) {
1201     res = allocate_adaptive_freelists(size);
1202   } else {  // non-adaptive free lists
1203     res = allocate_non_adaptive_freelists(size);
1204   }
1205 
1206   if (res != NULL) {
1207     // check that res does lie in this space!
1208     assert(is_in_reserved(res), "Not in this space!");
1209     assert(is_aligned((void*)res), "alignment check");
1210 
1211     FreeChunk* fc = (FreeChunk*)res;
1212     fc->markNotFree();
1213     assert(!fc->is_free(), "shouldn't be marked free");
1214     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1215     // Verify that the block offset table shows this to
1216     // be a single block, but not one which is unallocated.
1217     _bt.verify_single_block(res, size);
1218     _bt.verify_not_unallocated(res, size);
1219     // mangle a just allocated object with a distinct pattern.
1220     debug_only(fc->mangleAllocated(size));
1221   }
1222 
1223   return res;
1224 }
1225 
1226 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1227   HeapWord* res = NULL;
1228   // try and use linear allocation for smaller blocks
1229   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1230     // if successful, the following also adjusts block offset table
1231     res = getChunkFromSmallLinearAllocBlock(size);
1232   }
1233   // Else triage to indexed lists for smaller sizes
1234   if (res == NULL) {
1235     if (size < SmallForDictionary) {
1236       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1237     } else {
1238       // else get it from the big dictionary; if even this doesn't
1239       // work we are out of luck.
1240       res = (HeapWord*)getChunkFromDictionaryExact(size);
1241     }
1242   }
1243 
1244   return res;
1245 }
1246 
1247 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1248   assert_lock_strong(freelistLock());
1249   HeapWord* res = NULL;
1250   assert(size == adjustObjectSize(size),
1251          "use adjustObjectSize() before calling into allocate()");
1252 
1253   // Strategy
1254   //   if small
1255   //     exact size from small object indexed list if small
1256   //     small or large linear allocation block (linAB) as appropriate
1257   //     take from lists of greater sized chunks
1258   //   else
1259   //     dictionary
1260   //     small or large linear allocation block if it has the space
1261   // Try allocating exact size from indexTable first
1262   if (size < IndexSetSize) {
1263     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1264     if(res != NULL) {
1265       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1266         "Not removed from free list");
1267       // no block offset table adjustment is necessary on blocks in
1268       // the indexed lists.
1269 
1270     // Try allocating from the small LinAB
1271     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1272         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1273         // if successful, the above also adjusts block offset table
1274         // Note that this call will refill the LinAB to
1275         // satisfy the request.  This is different that
1276         // evm.
1277         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1278     } else {
1279       // Raid the exact free lists larger than size, even if they are not
1280       // overpopulated.
1281       res = (HeapWord*) getChunkFromGreater(size);
1282     }
1283   } else {
1284     // Big objects get allocated directly from the dictionary.
1285     res = (HeapWord*) getChunkFromDictionaryExact(size);
1286     if (res == NULL) {
1287       // Try hard not to fail since an allocation failure will likely
1288       // trigger a synchronous GC.  Try to get the space from the
1289       // allocation blocks.
1290       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1291     }
1292   }
1293 
1294   return res;
1295 }
1296 
1297 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1298 // when promoting obj.
1299 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1300   // Depending on the object size, expansion may require refilling either a
1301   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1302   // is added because the dictionary may over-allocate to avoid fragmentation.
1303   size_t space = obj_size;
1304   if (!_adaptive_freelists) {
1305     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1306   }
1307   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1308   return space;
1309 }
1310 
1311 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1312   FreeChunk* ret;
1313 
1314   assert(numWords >= MinChunkSize, "Size is less than minimum");
1315   assert(linearAllocationWouldFail() || bestFitFirst(),
1316     "Should not be here");
1317 
1318   size_t i;
1319   size_t currSize = numWords + MinChunkSize;
1320   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1321   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1322     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1323     if (fl->head()) {
1324       ret = getFromListGreater(fl, numWords);
1325       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1326       return ret;
1327     }
1328   }
1329 
1330   currSize = MAX2((size_t)SmallForDictionary,
1331                   (size_t)(numWords + MinChunkSize));
1332 
1333   /* Try to get a chunk that satisfies request, while avoiding
1334      fragmentation that can't be handled. */
1335   {
1336     ret =  dictionary()->get_chunk(currSize);
1337     if (ret != NULL) {
1338       assert(ret->size() - numWords >= MinChunkSize,
1339              "Chunk is too small");
1340       _bt.allocated((HeapWord*)ret, ret->size());
1341       /* Carve returned chunk. */
1342       (void) splitChunkAndReturnRemainder(ret, numWords);
1343       /* Label this as no longer a free chunk. */
1344       assert(ret->is_free(), "This chunk should be free");
1345       ret->link_prev(NULL);
1346     }
1347     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1348     return ret;
1349   }
1350   ShouldNotReachHere();
1351 }
1352 
1353 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1354   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1355   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1356 }
1357 
1358 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1359   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1360          (_smallLinearAllocBlock._word_size == fc->size()),
1361          "Linear allocation block shows incorrect size");
1362   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1363           (_smallLinearAllocBlock._word_size == fc->size()));
1364 }
1365 
1366 // Check if the purported free chunk is present either as a linear
1367 // allocation block, the size-indexed table of (smaller) free blocks,
1368 // or the larger free blocks kept in the binary tree dictionary.
1369 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1370   if (verify_chunk_is_linear_alloc_block(fc)) {
1371     return true;
1372   } else if (fc->size() < IndexSetSize) {
1373     return verifyChunkInIndexedFreeLists(fc);
1374   } else {
1375     return dictionary()->verify_chunk_in_free_list(fc);
1376   }
1377 }
1378 
1379 #ifndef PRODUCT
1380 void CompactibleFreeListSpace::assert_locked() const {
1381   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1382 }
1383 
1384 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1385   CMSLockVerifier::assert_locked(lock);
1386 }
1387 #endif
1388 
1389 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1390   // In the parallel case, the main thread holds the free list lock
1391   // on behalf the parallel threads.
1392   FreeChunk* fc;
1393   {
1394     // If GC is parallel, this might be called by several threads.
1395     // This should be rare enough that the locking overhead won't affect
1396     // the sequential code.
1397     MutexLockerEx x(parDictionaryAllocLock(),
1398                     Mutex::_no_safepoint_check_flag);
1399     fc = getChunkFromDictionary(size);
1400   }
1401   if (fc != NULL) {
1402     fc->dontCoalesce();
1403     assert(fc->is_free(), "Should be free, but not coalescable");
1404     // Verify that the block offset table shows this to
1405     // be a single block, but not one which is unallocated.
1406     _bt.verify_single_block((HeapWord*)fc, fc->size());
1407     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1408   }
1409   return fc;
1410 }
1411 
1412 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1413   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1414   assert_locked();
1415 
1416   // if we are tracking promotions, then first ensure space for
1417   // promotion (including spooling space for saving header if necessary).
1418   // then allocate and copy, then track promoted info if needed.
1419   // When tracking (see PromotionInfo::track()), the mark word may
1420   // be displaced and in this case restoration of the mark word
1421   // occurs in the (oop_since_save_marks_)iterate phase.
1422   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1423     return NULL;
1424   }
1425   // Call the allocate(size_t, bool) form directly to avoid the
1426   // additional call through the allocate(size_t) form.  Having
1427   // the compile inline the call is problematic because allocate(size_t)
1428   // is a virtual method.
1429   HeapWord* res = allocate(adjustObjectSize(obj_size));
1430   if (res != NULL) {
1431     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1432     // if we should be tracking promotions, do so.
1433     if (_promoInfo.tracking()) {
1434         _promoInfo.track((PromotedObject*)res);
1435     }
1436   }
1437   return oop(res);
1438 }
1439 
1440 HeapWord*
1441 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1442   assert_locked();
1443   assert(size >= MinChunkSize, "minimum chunk size");
1444   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1445     "maximum from smallLinearAllocBlock");
1446   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1447 }
1448 
1449 HeapWord*
1450 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1451                                                        size_t size) {
1452   assert_locked();
1453   assert(size >= MinChunkSize, "too small");
1454   HeapWord* res = NULL;
1455   // Try to do linear allocation from blk, making sure that
1456   if (blk->_word_size == 0) {
1457     // We have probably been unable to fill this either in the prologue or
1458     // when it was exhausted at the last linear allocation. Bail out until
1459     // next time.
1460     assert(blk->_ptr == NULL, "consistency check");
1461     return NULL;
1462   }
1463   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1464   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1465   if (res != NULL) return res;
1466 
1467   // about to exhaust this linear allocation block
1468   if (blk->_word_size == size) { // exactly satisfied
1469     res = blk->_ptr;
1470     _bt.allocated(res, blk->_word_size);
1471   } else if (size + MinChunkSize <= blk->_refillSize) {
1472     size_t sz = blk->_word_size;
1473     // Update _unallocated_block if the size is such that chunk would be
1474     // returned to the indexed free list.  All other chunks in the indexed
1475     // free lists are allocated from the dictionary so that _unallocated_block
1476     // has already been adjusted for them.  Do it here so that the cost
1477     // for all chunks added back to the indexed free lists.
1478     if (sz < SmallForDictionary) {
1479       _bt.allocated(blk->_ptr, sz);
1480     }
1481     // Return the chunk that isn't big enough, and then refill below.
1482     addChunkToFreeLists(blk->_ptr, sz);
1483     split_birth(sz);
1484     // Don't keep statistics on adding back chunk from a LinAB.
1485   } else {
1486     // A refilled block would not satisfy the request.
1487     return NULL;
1488   }
1489 
1490   blk->_ptr = NULL; blk->_word_size = 0;
1491   refillLinearAllocBlock(blk);
1492   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1493          "block was replenished");
1494   if (res != NULL) {
1495     split_birth(size);
1496     repairLinearAllocBlock(blk);
1497   } else if (blk->_ptr != NULL) {
1498     res = blk->_ptr;
1499     size_t blk_size = blk->_word_size;
1500     blk->_word_size -= size;
1501     blk->_ptr  += size;
1502     split_birth(size);
1503     repairLinearAllocBlock(blk);
1504     // Update BOT last so that other (parallel) GC threads see a consistent
1505     // view of the BOT and free blocks.
1506     // Above must occur before BOT is updated below.
1507     OrderAccess::storestore();
1508     _bt.split_block(res, blk_size, size);  // adjust block offset table
1509   }
1510   return res;
1511 }
1512 
1513 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1514                                         LinearAllocBlock* blk,
1515                                         size_t size) {
1516   assert_locked();
1517   assert(size >= MinChunkSize, "too small");
1518 
1519   HeapWord* res = NULL;
1520   // This is the common case.  Keep it simple.
1521   if (blk->_word_size >= size + MinChunkSize) {
1522     assert(blk->_ptr != NULL, "consistency check");
1523     res = blk->_ptr;
1524     // Note that the BOT is up-to-date for the linAB before allocation.  It
1525     // indicates the start of the linAB.  The split_block() updates the
1526     // BOT for the linAB after the allocation (indicates the start of the
1527     // next chunk to be allocated).
1528     size_t blk_size = blk->_word_size;
1529     blk->_word_size -= size;
1530     blk->_ptr  += size;
1531     split_birth(size);
1532     repairLinearAllocBlock(blk);
1533     // Update BOT last so that other (parallel) GC threads see a consistent
1534     // view of the BOT and free blocks.
1535     // Above must occur before BOT is updated below.
1536     OrderAccess::storestore();
1537     _bt.split_block(res, blk_size, size);  // adjust block offset table
1538     _bt.allocated(res, size);
1539   }
1540   return res;
1541 }
1542 
1543 FreeChunk*
1544 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1545   assert_locked();
1546   assert(size < SmallForDictionary, "just checking");
1547   FreeChunk* res;
1548   res = _indexedFreeList[size].get_chunk_at_head();
1549   if (res == NULL) {
1550     res = getChunkFromIndexedFreeListHelper(size);
1551   }
1552   _bt.verify_not_unallocated((HeapWord*) res, size);
1553   assert(res == NULL || res->size() == size, "Incorrect block size");
1554   return res;
1555 }
1556 
1557 FreeChunk*
1558 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1559   bool replenish) {
1560   assert_locked();
1561   FreeChunk* fc = NULL;
1562   if (size < SmallForDictionary) {
1563     assert(_indexedFreeList[size].head() == NULL ||
1564       _indexedFreeList[size].surplus() <= 0,
1565       "List for this size should be empty or under populated");
1566     // Try best fit in exact lists before replenishing the list
1567     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1568       // Replenish list.
1569       //
1570       // Things tried that failed.
1571       //   Tried allocating out of the two LinAB's first before
1572       // replenishing lists.
1573       //   Tried small linAB of size 256 (size in indexed list)
1574       // and replenishing indexed lists from the small linAB.
1575       //
1576       FreeChunk* newFc = NULL;
1577       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1578       if (replenish_size < SmallForDictionary) {
1579         // Do not replenish from an underpopulated size.
1580         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1581             _indexedFreeList[replenish_size].head() != NULL) {
1582           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1583         } else if (bestFitFirst()) {
1584           newFc = bestFitSmall(replenish_size);
1585         }
1586       }
1587       if (newFc == NULL && replenish_size > size) {
1588         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1589         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1590       }
1591       // Note: The stats update re split-death of block obtained above
1592       // will be recorded below precisely when we know we are going to
1593       // be actually splitting it into more than one pieces below.
1594       if (newFc != NULL) {
1595         if  (replenish || CMSReplenishIntermediate) {
1596           // Replenish this list and return one block to caller.
1597           size_t i;
1598           FreeChunk *curFc, *nextFc;
1599           size_t num_blk = newFc->size() / size;
1600           assert(num_blk >= 1, "Smaller than requested?");
1601           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1602           if (num_blk > 1) {
1603             // we are sure we will be splitting the block just obtained
1604             // into multiple pieces; record the split-death of the original
1605             splitDeath(replenish_size);
1606           }
1607           // carve up and link blocks 0, ..., num_blk - 2
1608           // The last chunk is not added to the lists but is returned as the
1609           // free chunk.
1610           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1611                i = 0;
1612                i < (num_blk - 1);
1613                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1614                i++) {
1615             curFc->set_size(size);
1616             // Don't record this as a return in order to try and
1617             // determine the "returns" from a GC.
1618             _bt.verify_not_unallocated((HeapWord*) fc, size);
1619             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1620             _bt.mark_block((HeapWord*)curFc, size);
1621             split_birth(size);
1622             // Don't record the initial population of the indexed list
1623             // as a split birth.
1624           }
1625 
1626           // check that the arithmetic was OK above
1627           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1628             "inconsistency in carving newFc");
1629           curFc->set_size(size);
1630           _bt.mark_block((HeapWord*)curFc, size);
1631           split_birth(size);
1632           fc = curFc;
1633         } else {
1634           // Return entire block to caller
1635           fc = newFc;
1636         }
1637       }
1638     }
1639   } else {
1640     // Get a free chunk from the free chunk dictionary to be returned to
1641     // replenish the indexed free list.
1642     fc = getChunkFromDictionaryExact(size);
1643   }
1644   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1645   return fc;
1646 }
1647 
1648 FreeChunk*
1649 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1650   assert_locked();
1651   FreeChunk* fc = _dictionary->get_chunk(size,
1652                                          FreeBlockDictionary<FreeChunk>::atLeast);
1653   if (fc == NULL) {
1654     return NULL;
1655   }
1656   _bt.allocated((HeapWord*)fc, fc->size());
1657   if (fc->size() >= size + MinChunkSize) {
1658     fc = splitChunkAndReturnRemainder(fc, size);
1659   }
1660   assert(fc->size() >= size, "chunk too small");
1661   assert(fc->size() < size + MinChunkSize, "chunk too big");
1662   _bt.verify_single_block((HeapWord*)fc, fc->size());
1663   return fc;
1664 }
1665 
1666 FreeChunk*
1667 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1668   assert_locked();
1669   FreeChunk* fc = _dictionary->get_chunk(size,
1670                                          FreeBlockDictionary<FreeChunk>::atLeast);
1671   if (fc == NULL) {
1672     return fc;
1673   }
1674   _bt.allocated((HeapWord*)fc, fc->size());
1675   if (fc->size() == size) {
1676     _bt.verify_single_block((HeapWord*)fc, size);
1677     return fc;
1678   }
1679   assert(fc->size() > size, "get_chunk() guarantee");
1680   if (fc->size() < size + MinChunkSize) {
1681     // Return the chunk to the dictionary and go get a bigger one.
1682     returnChunkToDictionary(fc);
1683     fc = _dictionary->get_chunk(size + MinChunkSize,
1684                                 FreeBlockDictionary<FreeChunk>::atLeast);
1685     if (fc == NULL) {
1686       return NULL;
1687     }
1688     _bt.allocated((HeapWord*)fc, fc->size());
1689   }
1690   assert(fc->size() >= size + MinChunkSize, "tautology");
1691   fc = splitChunkAndReturnRemainder(fc, size);
1692   assert(fc->size() == size, "chunk is wrong size");
1693   _bt.verify_single_block((HeapWord*)fc, size);
1694   return fc;
1695 }
1696 
1697 void
1698 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1699   assert_locked();
1700 
1701   size_t size = chunk->size();
1702   _bt.verify_single_block((HeapWord*)chunk, size);
1703   // adjust _unallocated_block downward, as necessary
1704   _bt.freed((HeapWord*)chunk, size);
1705   _dictionary->return_chunk(chunk);
1706 #ifndef PRODUCT
1707   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1708     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
1709     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
1710     tl->verify_stats();
1711   }
1712 #endif // PRODUCT
1713 }
1714 
1715 void
1716 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1717   assert_locked();
1718   size_t size = fc->size();
1719   _bt.verify_single_block((HeapWord*) fc, size);
1720   _bt.verify_not_unallocated((HeapWord*) fc, size);
1721   if (_adaptive_freelists) {
1722     _indexedFreeList[size].return_chunk_at_tail(fc);
1723   } else {
1724     _indexedFreeList[size].return_chunk_at_head(fc);
1725   }
1726 #ifndef PRODUCT
1727   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1728      _indexedFreeList[size].verify_stats();
1729   }
1730 #endif // PRODUCT
1731 }
1732 
1733 // Add chunk to end of last block -- if it's the largest
1734 // block -- and update BOT and census data. We would
1735 // of course have preferred to coalesce it with the
1736 // last block, but it's currently less expensive to find the
1737 // largest block than it is to find the last.
1738 void
1739 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1740   HeapWord* chunk, size_t     size) {
1741   // check that the chunk does lie in this space!
1742   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1743   // One of the parallel gc task threads may be here
1744   // whilst others are allocating.
1745   Mutex* lock = NULL;
1746   if (ParallelGCThreads != 0) {
1747     lock = &_parDictionaryAllocLock;
1748   }
1749   FreeChunk* ec;
1750   {
1751     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1752     ec = dictionary()->find_largest_dict();  // get largest block
1753     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1754       // It's a coterminal block - we can coalesce.
1755       size_t old_size = ec->size();
1756       coalDeath(old_size);
1757       removeChunkFromDictionary(ec);
1758       size += old_size;
1759     } else {
1760       ec = (FreeChunk*)chunk;
1761     }
1762   }
1763   ec->set_size(size);
1764   debug_only(ec->mangleFreed(size));
1765   if (size < SmallForDictionary) {
1766     lock = _indexedFreeListParLocks[size];
1767   }
1768   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1769   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1770   // record the birth under the lock since the recording involves
1771   // manipulation of the list on which the chunk lives and
1772   // if the chunk is allocated and is the last on the list,
1773   // the list can go away.
1774   coalBirth(size);
1775 }
1776 
1777 void
1778 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1779                                               size_t     size) {
1780   // check that the chunk does lie in this space!
1781   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1782   assert_locked();
1783   _bt.verify_single_block(chunk, size);
1784 
1785   FreeChunk* fc = (FreeChunk*) chunk;
1786   fc->set_size(size);
1787   debug_only(fc->mangleFreed(size));
1788   if (size < SmallForDictionary) {
1789     returnChunkToFreeList(fc);
1790   } else {
1791     returnChunkToDictionary(fc);
1792   }
1793 }
1794 
1795 void
1796 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1797   size_t size, bool coalesced) {
1798   assert_locked();
1799   assert(chunk != NULL, "null chunk");
1800   if (coalesced) {
1801     // repair BOT
1802     _bt.single_block(chunk, size);
1803   }
1804   addChunkToFreeLists(chunk, size);
1805 }
1806 
1807 // We _must_ find the purported chunk on our free lists;
1808 // we assert if we don't.
1809 void
1810 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1811   size_t size = fc->size();
1812   assert_locked();
1813   debug_only(verifyFreeLists());
1814   if (size < SmallForDictionary) {
1815     removeChunkFromIndexedFreeList(fc);
1816   } else {
1817     removeChunkFromDictionary(fc);
1818   }
1819   _bt.verify_single_block((HeapWord*)fc, size);
1820   debug_only(verifyFreeLists());
1821 }
1822 
1823 void
1824 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1825   size_t size = fc->size();
1826   assert_locked();
1827   assert(fc != NULL, "null chunk");
1828   _bt.verify_single_block((HeapWord*)fc, size);
1829   _dictionary->remove_chunk(fc);
1830   // adjust _unallocated_block upward, as necessary
1831   _bt.allocated((HeapWord*)fc, size);
1832 }
1833 
1834 void
1835 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1836   assert_locked();
1837   size_t size = fc->size();
1838   _bt.verify_single_block((HeapWord*)fc, size);
1839   NOT_PRODUCT(
1840     if (FLSVerifyIndexTable) {
1841       verifyIndexedFreeList(size);
1842     }
1843   )
1844   _indexedFreeList[size].remove_chunk(fc);
1845   NOT_PRODUCT(
1846     if (FLSVerifyIndexTable) {
1847       verifyIndexedFreeList(size);
1848     }
1849   )
1850 }
1851 
1852 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1853   /* A hint is the next larger size that has a surplus.
1854      Start search at a size large enough to guarantee that
1855      the excess is >= MIN_CHUNK. */
1856   size_t start = align_object_size(numWords + MinChunkSize);
1857   if (start < IndexSetSize) {
1858     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1859     size_t    hint = _indexedFreeList[start].hint();
1860     while (hint < IndexSetSize) {
1861       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1862       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1863       if (fl->surplus() > 0 && fl->head() != NULL) {
1864         // Found a list with surplus, reset original hint
1865         // and split out a free chunk which is returned.
1866         _indexedFreeList[start].set_hint(hint);
1867         FreeChunk* res = getFromListGreater(fl, numWords);
1868         assert(res == NULL || res->is_free(),
1869           "Should be returning a free chunk");
1870         return res;
1871       }
1872       hint = fl->hint(); /* keep looking */
1873     }
1874     /* None found. */
1875     it[start].set_hint(IndexSetSize);
1876   }
1877   return NULL;
1878 }
1879 
1880 /* Requires fl->size >= numWords + MinChunkSize */
1881 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1882   size_t numWords) {
1883   FreeChunk *curr = fl->head();
1884   size_t oldNumWords = curr->size();
1885   assert(numWords >= MinChunkSize, "Word size is too small");
1886   assert(curr != NULL, "List is empty");
1887   assert(oldNumWords >= numWords + MinChunkSize,
1888         "Size of chunks in the list is too small");
1889 
1890   fl->remove_chunk(curr);
1891   // recorded indirectly by splitChunkAndReturnRemainder -
1892   // smallSplit(oldNumWords, numWords);
1893   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1894   // Does anything have to be done for the remainder in terms of
1895   // fixing the card table?
1896   assert(new_chunk == NULL || new_chunk->is_free(),
1897     "Should be returning a free chunk");
1898   return new_chunk;
1899 }
1900 
1901 FreeChunk*
1902 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1903   size_t new_size) {
1904   assert_locked();
1905   size_t size = chunk->size();
1906   assert(size > new_size, "Split from a smaller block?");
1907   assert(is_aligned(chunk), "alignment problem");
1908   assert(size == adjustObjectSize(size), "alignment problem");
1909   size_t rem_size = size - new_size;
1910   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1911   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1912   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1913   assert(is_aligned(ffc), "alignment problem");
1914   ffc->set_size(rem_size);
1915   ffc->link_next(NULL);
1916   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1917   // Above must occur before BOT is updated below.
1918   // adjust block offset table
1919   OrderAccess::storestore();
1920   assert(chunk->is_free() && ffc->is_free(), "Error");
1921   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1922   if (rem_size < SmallForDictionary) {
1923     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1924     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1925     assert(!is_par ||
1926            (SharedHeap::heap()->n_par_threads() ==
1927             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1928     returnChunkToFreeList(ffc);
1929     split(size, rem_size);
1930     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1931   } else {
1932     returnChunkToDictionary(ffc);
1933     split(size ,rem_size);
1934   }
1935   chunk->set_size(new_size);
1936   return chunk;
1937 }
1938 
1939 void
1940 CompactibleFreeListSpace::sweep_completed() {
1941   // Now that space is probably plentiful, refill linear
1942   // allocation blocks as needed.
1943   refillLinearAllocBlocksIfNeeded();
1944 }
1945 
1946 void
1947 CompactibleFreeListSpace::gc_prologue() {
1948   assert_locked();
1949   if (PrintFLSStatistics != 0) {
1950     gclog_or_tty->print("Before GC:\n");
1951     reportFreeListStatistics();
1952   }
1953   refillLinearAllocBlocksIfNeeded();
1954 }
1955 
1956 void
1957 CompactibleFreeListSpace::gc_epilogue() {
1958   assert_locked();
1959   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1960     if (_smallLinearAllocBlock._word_size == 0)
1961       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1962   }
1963   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1964   _promoInfo.stopTrackingPromotions();
1965   repairLinearAllocationBlocks();
1966   // Print Space's stats
1967   if (PrintFLSStatistics != 0) {
1968     gclog_or_tty->print("After GC:\n");
1969     reportFreeListStatistics();
1970   }
1971 }
1972 
1973 // Iteration support, mostly delegated from a CMS generation
1974 
1975 void CompactibleFreeListSpace::save_marks() {
1976   assert(Thread::current()->is_VM_thread(),
1977          "Global variable should only be set when single-threaded");
1978   // Mark the "end" of the used space at the time of this call;
1979   // note, however, that promoted objects from this point
1980   // on are tracked in the _promoInfo below.
1981   set_saved_mark_word(unallocated_block());
1982 #ifdef ASSERT
1983   // Check the sanity of save_marks() etc.
1984   MemRegion ur    = used_region();
1985   MemRegion urasm = used_region_at_save_marks();
1986   assert(ur.contains(urasm),
1987          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1988                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1989                  ur.start(), ur.end(), urasm.start(), urasm.end()));
1990 #endif
1991   // inform allocator that promotions should be tracked.
1992   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1993   _promoInfo.startTrackingPromotions();
1994 }
1995 
1996 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1997   assert(_promoInfo.tracking(), "No preceding save_marks?");
1998   assert(SharedHeap::heap()->n_par_threads() == 0,
1999          "Shouldn't be called if using parallel gc.");
2000   return _promoInfo.noPromotions();
2001 }
2002 
2003 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2004                                                                             \
2005 void CompactibleFreeListSpace::                                             \
2006 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2007   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2008          "Shouldn't be called (yet) during parallel part of gc.");          \
2009   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2010   /*                                                                        \
2011    * This also restores any displaced headers and removes the elements from \
2012    * the iteration set as they are processed, so that we have a clean slate \
2013    * at the end of the iteration. Note, thus, that if new objects are       \
2014    * promoted as a result of the iteration they are iterated over as well.  \
2015    */                                                                       \
2016   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2017 }
2018 
2019 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2020 
2021 
2022 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2023   // ugghh... how would one do this efficiently for a non-contiguous space?
2024   guarantee(false, "NYI");
2025 }
2026 
2027 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2028   return _smallLinearAllocBlock._word_size == 0;
2029 }
2030 
2031 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2032   // Fix up linear allocation blocks to look like free blocks
2033   repairLinearAllocBlock(&_smallLinearAllocBlock);
2034 }
2035 
2036 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2037   assert_locked();
2038   if (blk->_ptr != NULL) {
2039     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2040            "Minimum block size requirement");
2041     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2042     fc->set_size(blk->_word_size);
2043     fc->link_prev(NULL);   // mark as free
2044     fc->dontCoalesce();
2045     assert(fc->is_free(), "just marked it free");
2046     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2047   }
2048 }
2049 
2050 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2051   assert_locked();
2052   if (_smallLinearAllocBlock._ptr == NULL) {
2053     assert(_smallLinearAllocBlock._word_size == 0,
2054       "Size of linAB should be zero if the ptr is NULL");
2055     // Reset the linAB refill and allocation size limit.
2056     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2057   }
2058   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2059 }
2060 
2061 void
2062 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2063   assert_locked();
2064   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2065          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2066          "blk invariant");
2067   if (blk->_ptr == NULL) {
2068     refillLinearAllocBlock(blk);
2069   }
2070   if (PrintMiscellaneous && Verbose) {
2071     if (blk->_word_size == 0) {
2072       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2073     }
2074   }
2075 }
2076 
2077 void
2078 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2079   assert_locked();
2080   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2081          "linear allocation block should be empty");
2082   FreeChunk* fc;
2083   if (blk->_refillSize < SmallForDictionary &&
2084       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2085     // A linAB's strategy might be to use small sizes to reduce
2086     // fragmentation but still get the benefits of allocation from a
2087     // linAB.
2088   } else {
2089     fc = getChunkFromDictionary(blk->_refillSize);
2090   }
2091   if (fc != NULL) {
2092     blk->_ptr  = (HeapWord*)fc;
2093     blk->_word_size = fc->size();
2094     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2095   }
2096 }
2097 
2098 // Support for concurrent collection policy decisions.
2099 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2100   // In the future we might want to add in frgamentation stats --
2101   // including erosion of the "mountain" into this decision as well.
2102   return !adaptive_freelists() && linearAllocationWouldFail();
2103 }
2104 
2105 // Support for compaction
2106 
2107 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2108   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2109   // prepare_for_compaction() uses the space between live objects
2110   // so that later phase can skip dead space quickly.  So verification
2111   // of the free lists doesn't work after.
2112 }
2113 
2114 #define obj_size(q) adjustObjectSize(oop(q)->size())
2115 #define adjust_obj_size(s) adjustObjectSize(s)
2116 
2117 void CompactibleFreeListSpace::adjust_pointers() {
2118   // In other versions of adjust_pointers(), a bail out
2119   // based on the amount of live data in the generation
2120   // (i.e., if 0, bail out) may be used.
2121   // Cannot test used() == 0 here because the free lists have already
2122   // been mangled by the compaction.
2123 
2124   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2125   // See note about verification in prepare_for_compaction().
2126 }
2127 
2128 void CompactibleFreeListSpace::compact() {
2129   SCAN_AND_COMPACT(obj_size);
2130 }
2131 
2132 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2133 // where fbs is free block sizes
2134 double CompactibleFreeListSpace::flsFrag() const {
2135   size_t itabFree = totalSizeInIndexedFreeLists();
2136   double frag = 0.0;
2137   size_t i;
2138 
2139   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2140     double sz  = i;
2141     frag      += _indexedFreeList[i].count() * (sz * sz);
2142   }
2143 
2144   double totFree = itabFree +
2145                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2146   if (totFree > 0) {
2147     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2148             (totFree * totFree));
2149     frag = (double)1.0  - frag;
2150   } else {
2151     assert(frag == 0.0, "Follows from totFree == 0");
2152   }
2153   return frag;
2154 }
2155 
2156 void CompactibleFreeListSpace::beginSweepFLCensus(
2157   float inter_sweep_current,
2158   float inter_sweep_estimate,
2159   float intra_sweep_estimate) {
2160   assert_locked();
2161   size_t i;
2162   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2163     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2164     if (PrintFLSStatistics > 1) {
2165       gclog_or_tty->print("size[%d] : ", i);
2166     }
2167     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2168     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2169     fl->set_before_sweep(fl->count());
2170     fl->set_bfr_surp(fl->surplus());
2171   }
2172   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2173                                     inter_sweep_current,
2174                                     inter_sweep_estimate,
2175                                     intra_sweep_estimate);
2176 }
2177 
2178 void CompactibleFreeListSpace::setFLSurplus() {
2179   assert_locked();
2180   size_t i;
2181   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2182     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2183     fl->set_surplus(fl->count() -
2184                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2185   }
2186 }
2187 
2188 void CompactibleFreeListSpace::setFLHints() {
2189   assert_locked();
2190   size_t i;
2191   size_t h = IndexSetSize;
2192   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2193     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2194     fl->set_hint(h);
2195     if (fl->surplus() > 0) {
2196       h = i;
2197     }
2198   }
2199 }
2200 
2201 void CompactibleFreeListSpace::clearFLCensus() {
2202   assert_locked();
2203   size_t i;
2204   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2205     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2206     fl->set_prev_sweep(fl->count());
2207     fl->set_coal_births(0);
2208     fl->set_coal_deaths(0);
2209     fl->set_split_births(0);
2210     fl->set_split_deaths(0);
2211   }
2212 }
2213 
2214 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2215   if (PrintFLSStatistics > 0) {
2216     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2217     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2218                            largestAddr);
2219   }
2220   setFLSurplus();
2221   setFLHints();
2222   if (PrintGC && PrintFLSCensus > 0) {
2223     printFLCensus(sweep_count);
2224   }
2225   clearFLCensus();
2226   assert_locked();
2227   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2228 }
2229 
2230 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2231   if (size < SmallForDictionary) {
2232     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2233     return (fl->coal_desired() < 0) ||
2234            ((int)fl->count() > fl->coal_desired());
2235   } else {
2236     return dictionary()->coal_dict_over_populated(size);
2237   }
2238 }
2239 
2240 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2241   assert(size < SmallForDictionary, "Size too large for indexed list");
2242   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2243   fl->increment_coal_births();
2244   fl->increment_surplus();
2245 }
2246 
2247 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2248   assert(size < SmallForDictionary, "Size too large for indexed list");
2249   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2250   fl->increment_coal_deaths();
2251   fl->decrement_surplus();
2252 }
2253 
2254 void CompactibleFreeListSpace::coalBirth(size_t size) {
2255   if (size  < SmallForDictionary) {
2256     smallCoalBirth(size);
2257   } else {
2258     dictionary()->dict_census_update(size,
2259                                    false /* split */,
2260                                    true /* birth */);
2261   }
2262 }
2263 
2264 void CompactibleFreeListSpace::coalDeath(size_t size) {
2265   if(size  < SmallForDictionary) {
2266     smallCoalDeath(size);
2267   } else {
2268     dictionary()->dict_census_update(size,
2269                                    false /* split */,
2270                                    false /* birth */);
2271   }
2272 }
2273 
2274 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2275   assert(size < SmallForDictionary, "Size too large for indexed list");
2276   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2277   fl->increment_split_births();
2278   fl->increment_surplus();
2279 }
2280 
2281 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2282   assert(size < SmallForDictionary, "Size too large for indexed list");
2283   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2284   fl->increment_split_deaths();
2285   fl->decrement_surplus();
2286 }
2287 
2288 void CompactibleFreeListSpace::split_birth(size_t size) {
2289   if (size  < SmallForDictionary) {
2290     smallSplitBirth(size);
2291   } else {
2292     dictionary()->dict_census_update(size,
2293                                    true /* split */,
2294                                    true /* birth */);
2295   }
2296 }
2297 
2298 void CompactibleFreeListSpace::splitDeath(size_t size) {
2299   if (size  < SmallForDictionary) {
2300     smallSplitDeath(size);
2301   } else {
2302     dictionary()->dict_census_update(size,
2303                                    true /* split */,
2304                                    false /* birth */);
2305   }
2306 }
2307 
2308 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2309   size_t to2 = from - to1;
2310   splitDeath(from);
2311   split_birth(to1);
2312   split_birth(to2);
2313 }
2314 
2315 void CompactibleFreeListSpace::print() const {
2316   print_on(tty);
2317 }
2318 
2319 void CompactibleFreeListSpace::prepare_for_verify() {
2320   assert_locked();
2321   repairLinearAllocationBlocks();
2322   // Verify that the SpoolBlocks look like free blocks of
2323   // appropriate sizes... To be done ...
2324 }
2325 
2326 class VerifyAllBlksClosure: public BlkClosure {
2327  private:
2328   const CompactibleFreeListSpace* _sp;
2329   const MemRegion                 _span;
2330   HeapWord*                       _last_addr;
2331   size_t                          _last_size;
2332   bool                            _last_was_obj;
2333   bool                            _last_was_live;
2334 
2335  public:
2336   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2337     MemRegion span) :  _sp(sp), _span(span),
2338                        _last_addr(NULL), _last_size(0),
2339                        _last_was_obj(false), _last_was_live(false) { }
2340 
2341   virtual size_t do_blk(HeapWord* addr) {
2342     size_t res;
2343     bool   was_obj  = false;
2344     bool   was_live = false;
2345     if (_sp->block_is_obj(addr)) {
2346       was_obj = true;
2347       oop p = oop(addr);
2348       guarantee(p->is_oop(), "Should be an oop");
2349       res = _sp->adjustObjectSize(p->size());
2350       if (_sp->obj_is_alive(addr)) {
2351         was_live = true;
2352         p->verify();
2353       }
2354     } else {
2355       FreeChunk* fc = (FreeChunk*)addr;
2356       res = fc->size();
2357       if (FLSVerifyLists && !fc->cantCoalesce()) {
2358         guarantee(_sp->verify_chunk_in_free_list(fc),
2359                   "Chunk should be on a free list");
2360       }
2361     }
2362     if (res == 0) {
2363       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2364       gclog_or_tty->print_cr(
2365         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2366         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2367         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2368         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2369       _sp->print_on(gclog_or_tty);
2370       guarantee(false, "Seppuku!");
2371     }
2372     _last_addr = addr;
2373     _last_size = res;
2374     _last_was_obj  = was_obj;
2375     _last_was_live = was_live;
2376     return res;
2377   }
2378 };
2379 
2380 class VerifyAllOopsClosure: public OopClosure {
2381  private:
2382   const CMSCollector*             _collector;
2383   const CompactibleFreeListSpace* _sp;
2384   const MemRegion                 _span;
2385   const bool                      _past_remark;
2386   const CMSBitMap*                _bit_map;
2387 
2388  protected:
2389   void do_oop(void* p, oop obj) {
2390     if (_span.contains(obj)) { // the interior oop points into CMS heap
2391       if (!_span.contains(p)) { // reference from outside CMS heap
2392         // Should be a valid object; the first disjunct below allows
2393         // us to sidestep an assertion in block_is_obj() that insists
2394         // that p be in _sp. Note that several generations (and spaces)
2395         // are spanned by _span (CMS heap) above.
2396         guarantee(!_sp->is_in_reserved(obj) ||
2397                   _sp->block_is_obj((HeapWord*)obj),
2398                   "Should be an object");
2399         guarantee(obj->is_oop(), "Should be an oop");
2400         obj->verify();
2401         if (_past_remark) {
2402           // Remark has been completed, the object should be marked
2403           _bit_map->isMarked((HeapWord*)obj);
2404         }
2405       } else { // reference within CMS heap
2406         if (_past_remark) {
2407           // Remark has been completed -- so the referent should have
2408           // been marked, if referring object is.
2409           if (_bit_map->isMarked(_collector->block_start(p))) {
2410             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2411           }
2412         }
2413       }
2414     } else if (_sp->is_in_reserved(p)) {
2415       // the reference is from FLS, and points out of FLS
2416       guarantee(obj->is_oop(), "Should be an oop");
2417       obj->verify();
2418     }
2419   }
2420 
2421   template <class T> void do_oop_work(T* p) {
2422     T heap_oop = oopDesc::load_heap_oop(p);
2423     if (!oopDesc::is_null(heap_oop)) {
2424       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2425       do_oop(p, obj);
2426     }
2427   }
2428 
2429  public:
2430   VerifyAllOopsClosure(const CMSCollector* collector,
2431     const CompactibleFreeListSpace* sp, MemRegion span,
2432     bool past_remark, CMSBitMap* bit_map) :
2433     _collector(collector), _sp(sp), _span(span),
2434     _past_remark(past_remark), _bit_map(bit_map) { }
2435 
2436   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2437   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2438 };
2439 
2440 void CompactibleFreeListSpace::verify() const {
2441   assert_lock_strong(&_freelistLock);
2442   verify_objects_initialized();
2443   MemRegion span = _collector->_span;
2444   bool past_remark = (_collector->abstract_state() ==
2445                       CMSCollector::Sweeping);
2446 
2447   ResourceMark rm;
2448   HandleMark  hm;
2449 
2450   // Check integrity of CFL data structures
2451   _promoInfo.verify();
2452   _dictionary->verify();
2453   if (FLSVerifyIndexTable) {
2454     verifyIndexedFreeLists();
2455   }
2456   // Check integrity of all objects and free blocks in space
2457   {
2458     VerifyAllBlksClosure cl(this, span);
2459     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2460   }
2461   // Check that all references in the heap to FLS
2462   // are to valid objects in FLS or that references in
2463   // FLS are to valid objects elsewhere in the heap
2464   if (FLSVerifyAllHeapReferences)
2465   {
2466     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2467       _collector->markBitMap());
2468     CollectedHeap* ch = Universe::heap();
2469 
2470     // Iterate over all oops in the heap. Uses the _no_header version
2471     // since we are not interested in following the klass pointers.
2472     ch->oop_iterate_no_header(&cl);
2473   }
2474 
2475   if (VerifyObjectStartArray) {
2476     // Verify the block offset table
2477     _bt.verify();
2478   }
2479 }
2480 
2481 #ifndef PRODUCT
2482 void CompactibleFreeListSpace::verifyFreeLists() const {
2483   if (FLSVerifyLists) {
2484     _dictionary->verify();
2485     verifyIndexedFreeLists();
2486   } else {
2487     if (FLSVerifyDictionary) {
2488       _dictionary->verify();
2489     }
2490     if (FLSVerifyIndexTable) {
2491       verifyIndexedFreeLists();
2492     }
2493   }
2494 }
2495 #endif
2496 
2497 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2498   size_t i = 0;
2499   for (; i < IndexSetStart; i++) {
2500     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2501   }
2502   for (; i < IndexSetSize; i++) {
2503     verifyIndexedFreeList(i);
2504   }
2505 }
2506 
2507 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2508   FreeChunk* fc   =  _indexedFreeList[size].head();
2509   FreeChunk* tail =  _indexedFreeList[size].tail();
2510   size_t    num = _indexedFreeList[size].count();
2511   size_t      n = 0;
2512   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2513             "Slot should have been empty");
2514   for (; fc != NULL; fc = fc->next(), n++) {
2515     guarantee(fc->size() == size, "Size inconsistency");
2516     guarantee(fc->is_free(), "!free?");
2517     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2518     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2519   }
2520   guarantee(n == num, "Incorrect count");
2521 }
2522 
2523 #ifndef PRODUCT
2524 void CompactibleFreeListSpace::check_free_list_consistency() const {
2525   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
2526     "Some sizes can't be allocated without recourse to"
2527     " linear allocation buffers");
2528   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
2529     "else MIN_TREE_CHUNK_SIZE is wrong");
2530   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2531   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2532 }
2533 #endif
2534 
2535 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2536   assert_lock_strong(&_freelistLock);
2537   AdaptiveFreeList<FreeChunk> total;
2538   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2539   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2540   size_t total_free = 0;
2541   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2542     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2543     total_free += fl->count() * fl->size();
2544     if (i % (40*IndexSetStride) == 0) {
2545       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2546     }
2547     fl->print_on(gclog_or_tty);
2548     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2549     total.set_surplus(    total.surplus()     + fl->surplus()    );
2550     total.set_desired(    total.desired()     + fl->desired()    );
2551     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2552     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2553     total.set_count(      total.count()       + fl->count()      );
2554     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2555     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2556     total.set_split_births(total.split_births() + fl->split_births());
2557     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2558   }
2559   total.print_on(gclog_or_tty, "TOTAL");
2560   gclog_or_tty->print_cr("Total free in indexed lists "
2561                          SIZE_FORMAT " words", total_free);
2562   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2563     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2564             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2565     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2566   _dictionary->print_dict_census();
2567 }
2568 
2569 ///////////////////////////////////////////////////////////////////////////
2570 // CFLS_LAB
2571 ///////////////////////////////////////////////////////////////////////////
2572 
2573 #define VECTOR_257(x)                                                                                  \
2574   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2575   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2576      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2577      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2578      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2579      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2580      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2581      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2582      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2583      x }
2584 
2585 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2586 // OldPLABSize, whose static default is different; if overridden at the
2587 // command-line, this will get reinitialized via a call to
2588 // modify_initialization() below.
2589 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2590   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2591 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2592 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2593 
2594 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2595   _cfls(cfls)
2596 {
2597   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2598   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2599        i < CompactibleFreeListSpace::IndexSetSize;
2600        i += CompactibleFreeListSpace::IndexSetStride) {
2601     _indexedFreeList[i].set_size(i);
2602     _num_blocks[i] = 0;
2603   }
2604 }
2605 
2606 static bool _CFLS_LAB_modified = false;
2607 
2608 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2609   assert(!_CFLS_LAB_modified, "Call only once");
2610   _CFLS_LAB_modified = true;
2611   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2612        i < CompactibleFreeListSpace::IndexSetSize;
2613        i += CompactibleFreeListSpace::IndexSetStride) {
2614     _blocks_to_claim[i].modify(n, wt, true /* force */);
2615   }
2616 }
2617 
2618 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2619   FreeChunk* res;
2620   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2621   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2622     // This locking manages sync with other large object allocations.
2623     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2624                     Mutex::_no_safepoint_check_flag);
2625     res = _cfls->getChunkFromDictionaryExact(word_sz);
2626     if (res == NULL) return NULL;
2627   } else {
2628     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2629     if (fl->count() == 0) {
2630       // Attempt to refill this local free list.
2631       get_from_global_pool(word_sz, fl);
2632       // If it didn't work, give up.
2633       if (fl->count() == 0) return NULL;
2634     }
2635     res = fl->get_chunk_at_head();
2636     assert(res != NULL, "Why was count non-zero?");
2637   }
2638   res->markNotFree();
2639   assert(!res->is_free(), "shouldn't be marked free");
2640   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2641   // mangle a just allocated object with a distinct pattern.
2642   debug_only(res->mangleAllocated(word_sz));
2643   return (HeapWord*)res;
2644 }
2645 
2646 // Get a chunk of blocks of the right size and update related
2647 // book-keeping stats
2648 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2649   // Get the #blocks we want to claim
2650   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2651   assert(n_blks > 0, "Error");
2652   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2653   // In some cases, when the application has a phase change,
2654   // there may be a sudden and sharp shift in the object survival
2655   // profile, and updating the counts at the end of a scavenge
2656   // may not be quick enough, giving rise to large scavenge pauses
2657   // during these phase changes. It is beneficial to detect such
2658   // changes on-the-fly during a scavenge and avoid such a phase-change
2659   // pothole. The following code is a heuristic attempt to do that.
2660   // It is protected by a product flag until we have gained
2661   // enough experience with this heuristic and fine-tuned its behaviour.
2662   // WARNING: This might increase fragmentation if we overreact to
2663   // small spikes, so some kind of historical smoothing based on
2664   // previous experience with the greater reactivity might be useful.
2665   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2666   // default.
2667   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2668     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2669     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2670     n_blks = MIN2(n_blks, CMSOldPLABMax);
2671   }
2672   assert(n_blks > 0, "Error");
2673   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2674   // Update stats table entry for this block size
2675   _num_blocks[word_sz] += fl->count();
2676 }
2677 
2678 void CFLS_LAB::compute_desired_plab_size() {
2679   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2680        i < CompactibleFreeListSpace::IndexSetSize;
2681        i += CompactibleFreeListSpace::IndexSetStride) {
2682     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2683            "Counter inconsistency");
2684     if (_global_num_workers[i] > 0) {
2685       // Need to smooth wrt historical average
2686       if (ResizeOldPLAB) {
2687         _blocks_to_claim[i].sample(
2688           MAX2((size_t)CMSOldPLABMin,
2689           MIN2((size_t)CMSOldPLABMax,
2690                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2691       }
2692       // Reset counters for next round
2693       _global_num_workers[i] = 0;
2694       _global_num_blocks[i] = 0;
2695       if (PrintOldPLAB) {
2696         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2697       }
2698     }
2699   }
2700 }
2701 
2702 // If this is changed in the future to allow parallel
2703 // access, one would need to take the FL locks and,
2704 // depending on how it is used, stagger access from
2705 // parallel threads to reduce contention.
2706 void CFLS_LAB::retire(int tid) {
2707   // We run this single threaded with the world stopped;
2708   // so no need for locks and such.
2709   NOT_PRODUCT(Thread* t = Thread::current();)
2710   assert(Thread::current()->is_VM_thread(), "Error");
2711   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2712        i < CompactibleFreeListSpace::IndexSetSize;
2713        i += CompactibleFreeListSpace::IndexSetStride) {
2714     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2715            "Can't retire more than what we obtained");
2716     if (_num_blocks[i] > 0) {
2717       size_t num_retire =  _indexedFreeList[i].count();
2718       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2719       {
2720         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2721         //                Mutex::_no_safepoint_check_flag);
2722 
2723         // Update globals stats for num_blocks used
2724         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2725         _global_num_workers[i]++;
2726         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2727         if (num_retire > 0) {
2728           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2729           // Reset this list.
2730           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2731           _indexedFreeList[i].set_size(i);
2732         }
2733       }
2734       if (PrintOldPLAB) {
2735         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2736                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2737       }
2738       // Reset stats for next round
2739       _num_blocks[i]         = 0;
2740     }
2741   }
2742 }
2743 
2744 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2745   assert(fl->count() == 0, "Precondition.");
2746   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2747          "Precondition");
2748 
2749   // We'll try all multiples of word_sz in the indexed set, starting with
2750   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2751   // then try getting a big chunk and splitting it.
2752   {
2753     bool found;
2754     int  k;
2755     size_t cur_sz;
2756     for (k = 1, cur_sz = k * word_sz, found = false;
2757          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2758          (CMSSplitIndexedFreeListBlocks || k <= 1);
2759          k++, cur_sz = k * word_sz) {
2760       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2761       fl_for_cur_sz.set_size(cur_sz);
2762       {
2763         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2764                         Mutex::_no_safepoint_check_flag);
2765         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2766         if (gfl->count() != 0) {
2767           // nn is the number of chunks of size cur_sz that
2768           // we'd need to split k-ways each, in order to create
2769           // "n" chunks of size word_sz each.
2770           const size_t nn = MAX2(n/k, (size_t)1);
2771           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2772           found = true;
2773           if (k > 1) {
2774             // Update split death stats for the cur_sz-size blocks list:
2775             // we increment the split death count by the number of blocks
2776             // we just took from the cur_sz-size blocks list and which
2777             // we will be splitting below.
2778             ssize_t deaths = gfl->split_deaths() +
2779                              fl_for_cur_sz.count();
2780             gfl->set_split_deaths(deaths);
2781           }
2782         }
2783       }
2784       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2785       if (found) {
2786         if (k == 1) {
2787           fl->prepend(&fl_for_cur_sz);
2788         } else {
2789           // Divide each block on fl_for_cur_sz up k ways.
2790           FreeChunk* fc;
2791           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2792             // Must do this in reverse order, so that anybody attempting to
2793             // access the main chunk sees it as a single free block until we
2794             // change it.
2795             size_t fc_size = fc->size();
2796             assert(fc->is_free(), "Error");
2797             for (int i = k-1; i >= 0; i--) {
2798               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2799               assert((i != 0) ||
2800                         ((fc == ffc) && ffc->is_free() &&
2801                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2802                         "Counting error");
2803               ffc->set_size(word_sz);
2804               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2805               ffc->link_next(NULL);
2806               // Above must occur before BOT is updated below.
2807               OrderAccess::storestore();
2808               // splitting from the right, fc_size == i * word_sz
2809               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2810               fc_size -= word_sz;
2811               assert(fc_size == i*word_sz, "Error");
2812               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2813               _bt.verify_single_block((HeapWord*)fc, fc_size);
2814               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2815               // Push this on "fl".
2816               fl->return_chunk_at_head(ffc);
2817             }
2818             // TRAP
2819             assert(fl->tail()->next() == NULL, "List invariant.");
2820           }
2821         }
2822         // Update birth stats for this block size.
2823         size_t num = fl->count();
2824         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2825                         Mutex::_no_safepoint_check_flag);
2826         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2827         _indexedFreeList[word_sz].set_split_births(births);
2828         return;
2829       }
2830     }
2831   }
2832   // Otherwise, we'll split a block from the dictionary.
2833   FreeChunk* fc = NULL;
2834   FreeChunk* rem_fc = NULL;
2835   size_t rem;
2836   {
2837     MutexLockerEx x(parDictionaryAllocLock(),
2838                     Mutex::_no_safepoint_check_flag);
2839     while (n > 0) {
2840       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2841                                   FreeBlockDictionary<FreeChunk>::atLeast);
2842       if (fc != NULL) {
2843         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2844         dictionary()->dict_census_update(fc->size(),
2845                                        true /*split*/,
2846                                        false /*birth*/);
2847         break;
2848       } else {
2849         n--;
2850       }
2851     }
2852     if (fc == NULL) return;
2853     // Otherwise, split up that block.
2854     assert((ssize_t)n >= 1, "Control point invariant");
2855     assert(fc->is_free(), "Error: should be a free block");
2856     _bt.verify_single_block((HeapWord*)fc, fc->size());
2857     const size_t nn = fc->size() / word_sz;
2858     n = MIN2(nn, n);
2859     assert((ssize_t)n >= 1, "Control point invariant");
2860     rem = fc->size() - n * word_sz;
2861     // If there is a remainder, and it's too small, allocate one fewer.
2862     if (rem > 0 && rem < MinChunkSize) {
2863       n--; rem += word_sz;
2864     }
2865     // Note that at this point we may have n == 0.
2866     assert((ssize_t)n >= 0, "Control point invariant");
2867 
2868     // If n is 0, the chunk fc that was found is not large
2869     // enough to leave a viable remainder.  We are unable to
2870     // allocate even one block.  Return fc to the
2871     // dictionary and return, leaving "fl" empty.
2872     if (n == 0) {
2873       returnChunkToDictionary(fc);
2874       assert(fl->count() == 0, "We never allocated any blocks");
2875       return;
2876     }
2877 
2878     // First return the remainder, if any.
2879     // Note that we hold the lock until we decide if we're going to give
2880     // back the remainder to the dictionary, since a concurrent allocation
2881     // may otherwise see the heap as empty.  (We're willing to take that
2882     // hit if the block is a small block.)
2883     if (rem > 0) {
2884       size_t prefix_size = n * word_sz;
2885       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2886       rem_fc->set_size(rem);
2887       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2888       rem_fc->link_next(NULL);
2889       // Above must occur before BOT is updated below.
2890       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2891       OrderAccess::storestore();
2892       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2893       assert(fc->is_free(), "Error");
2894       fc->set_size(prefix_size);
2895       if (rem >= IndexSetSize) {
2896         returnChunkToDictionary(rem_fc);
2897         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2898         rem_fc = NULL;
2899       }
2900       // Otherwise, return it to the small list below.
2901     }
2902   }
2903   if (rem_fc != NULL) {
2904     MutexLockerEx x(_indexedFreeListParLocks[rem],
2905                     Mutex::_no_safepoint_check_flag);
2906     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2907     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2908     smallSplitBirth(rem);
2909   }
2910   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2911   // Now do the splitting up.
2912   // Must do this in reverse order, so that anybody attempting to
2913   // access the main chunk sees it as a single free block until we
2914   // change it.
2915   size_t fc_size = n * word_sz;
2916   // All but first chunk in this loop
2917   for (ssize_t i = n-1; i > 0; i--) {
2918     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2919     ffc->set_size(word_sz);
2920     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2921     ffc->link_next(NULL);
2922     // Above must occur before BOT is updated below.
2923     OrderAccess::storestore();
2924     // splitting from the right, fc_size == (n - i + 1) * wordsize
2925     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2926     fc_size -= word_sz;
2927     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2928     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2929     _bt.verify_single_block((HeapWord*)fc, fc_size);
2930     // Push this on "fl".
2931     fl->return_chunk_at_head(ffc);
2932   }
2933   // First chunk
2934   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2935   // The blocks above should show their new sizes before the first block below
2936   fc->set_size(word_sz);
2937   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2938   fc->link_next(NULL);
2939   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2940   _bt.verify_single_block((HeapWord*)fc, fc->size());
2941   fl->return_chunk_at_head(fc);
2942 
2943   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2944   {
2945     // Update the stats for this block size.
2946     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2947                     Mutex::_no_safepoint_check_flag);
2948     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2949     _indexedFreeList[word_sz].set_split_births(births);
2950     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2951     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2952   }
2953 
2954   // TRAP
2955   assert(fl->tail()->next() == NULL, "List invariant.");
2956 }
2957 
2958 // Set up the space's par_seq_tasks structure for work claiming
2959 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2960 // XXX Need to suitably abstract and generalize this and the next
2961 // method into one.
2962 void
2963 CompactibleFreeListSpace::
2964 initialize_sequential_subtasks_for_rescan(int n_threads) {
2965   // The "size" of each task is fixed according to rescan_task_size.
2966   assert(n_threads > 0, "Unexpected n_threads argument");
2967   const size_t task_size = rescan_task_size();
2968   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2969   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2970   assert(n_tasks == 0 ||
2971          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2972           (used_region().start() + n_tasks*task_size >= used_region().end())),
2973          "n_tasks calculation incorrect");
2974   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2975   assert(!pst->valid(), "Clobbering existing data?");
2976   // Sets the condition for completion of the subtask (how many threads
2977   // need to finish in order to be done).
2978   pst->set_n_threads(n_threads);
2979   pst->set_n_tasks((int)n_tasks);
2980 }
2981 
2982 // Set up the space's par_seq_tasks structure for work claiming
2983 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2984 void
2985 CompactibleFreeListSpace::
2986 initialize_sequential_subtasks_for_marking(int n_threads,
2987                                            HeapWord* low) {
2988   // The "size" of each task is fixed according to rescan_task_size.
2989   assert(n_threads > 0, "Unexpected n_threads argument");
2990   const size_t task_size = marking_task_size();
2991   assert(task_size > CardTableModRefBS::card_size_in_words &&
2992          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2993          "Otherwise arithmetic below would be incorrect");
2994   MemRegion span = _gen->reserved();
2995   if (low != NULL) {
2996     if (span.contains(low)) {
2997       // Align low down to  a card boundary so that
2998       // we can use block_offset_careful() on span boundaries.
2999       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3000                                  CardTableModRefBS::card_size);
3001       // Clip span prefix at aligned_low
3002       span = span.intersection(MemRegion(aligned_low, span.end()));
3003     } else if (low > span.end()) {
3004       span = MemRegion(low, low);  // Null region
3005     } // else use entire span
3006   }
3007   assert(span.is_empty() ||
3008          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3009         "span should start at a card boundary");
3010   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3011   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3012   assert(n_tasks == 0 ||
3013          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3014           (span.start() + n_tasks*task_size >= span.end())),
3015          "n_tasks calculation incorrect");
3016   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3017   assert(!pst->valid(), "Clobbering existing data?");
3018   // Sets the condition for completion of the subtask (how many threads
3019   // need to finish in order to be done).
3020   pst->set_n_threads(n_threads);
3021   pst->set_n_tasks((int)n_tasks);
3022 }