1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  87                 Monitor::_safepoint_check_sometimes),
  88   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  89                           "CompactibleFreeListSpace._dict_par_lock", true,
  90                           Monitor::_safepoint_check_never),
  91   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSRescanMultiple),
  93   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  94                     CMSConcMarkMultiple),
  95   _collector(NULL),
  96   _preconsumptionDirtyCardClosure(NULL)
  97 {
  98   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  99          "FreeChunk is larger than expected");
 100   _bt.set_space(this);
 101   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 102   // We have all of "mr", all of which we place in the dictionary
 103   // as one big chunk. We'll need to decide here which of several
 104   // possible alternative dictionary implementations to use. For
 105   // now the choice is easy, since we have only one working
 106   // implementation, namely, the simple binary tree (splaying
 107   // temporarily disabled).
 108   switch (dictionaryChoice) {
 109     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 110       _dictionary = new AFLBinaryTreeDictionary(mr);
 111       break;
 112     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 113     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 114     default:
 115       warning("dictionaryChoice: selected option not understood; using"
 116               " default BinaryTreeDictionary implementation instead.");
 117   }
 118   assert(_dictionary != NULL, "CMS dictionary initialization");
 119   // The indexed free lists are initially all empty and are lazily
 120   // filled in on demand. Initialize the array elements to NULL.
 121   initializeIndexedFreeListArray();
 122 
 123   // Not using adaptive free lists assumes that allocation is first
 124   // from the linAB's.  Also a cms perm gen which can be compacted
 125   // has to have the klass's klassKlass allocated at a lower
 126   // address in the heap than the klass so that the klassKlass is
 127   // moved to its new location before the klass is moved.
 128   // Set the _refillSize for the linear allocation blocks
 129   if (!use_adaptive_freelists) {
 130     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 131                                            FreeBlockDictionary<FreeChunk>::atLeast);
 132     // The small linAB initially has all the space and will allocate
 133     // a chunk of any size.
 134     HeapWord* addr = (HeapWord*) fc;
 135     _smallLinearAllocBlock.set(addr, fc->size() ,
 136       1024*SmallForLinearAlloc, fc->size());
 137     // Note that _unallocated_block is not updated here.
 138     // Allocations from the linear allocation block should
 139     // update it.
 140   } else {
 141     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 142                                SmallForLinearAlloc);
 143   }
 144   // CMSIndexedFreeListReplenish should be at least 1
 145   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 146   _promoInfo.setSpace(this);
 147   if (UseCMSBestFit) {
 148     _fitStrategy = FreeBlockBestFitFirst;
 149   } else {
 150     _fitStrategy = FreeBlockStrategyNone;
 151   }
 152   check_free_list_consistency();
 153 
 154   // Initialize locks for parallel case.
 155   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 156     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 157                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 158     DEBUG_ONLY(
 159       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160     )
 161   }
 162   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163 }
 164 
 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
 166 // update the block offset table.  Removed initialize_threshold call because
 167 // CFLS does not use a block offset array for contiguous spaces.
 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 169                                     CompactPoint* cp, HeapWord* compact_top) {
 170   // q is alive
 171   // First check if we should switch compaction space
 172   assert(this == cp->space, "'this' should be current compaction space.");
 173   size_t compaction_max_size = pointer_delta(end(), compact_top);
 174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 175     "virtual adjustObjectSize_v() method is not correct");
 176   size_t adjusted_size = adjustObjectSize(size);
 177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 178          "no small fragments allowed");
 179   assert(minimum_free_block_size() == MinChunkSize,
 180          "for de-virtualized reference below");
 181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 182   if (adjusted_size + MinChunkSize > compaction_max_size &&
 183       adjusted_size != compaction_max_size) {
 184     do {
 185       // switch to next compaction space
 186       cp->space->set_compaction_top(compact_top);
 187       cp->space = cp->space->next_compaction_space();
 188       if (cp->space == NULL) {
 189         cp->gen = GenCollectedHeap::heap()->young_gen();
 190         assert(cp->gen != NULL, "compaction must succeed");
 191         cp->space = cp->gen->first_compaction_space();
 192         assert(cp->space != NULL, "generation must have a first compaction space");
 193       }
 194       compact_top = cp->space->bottom();
 195       cp->space->set_compaction_top(compact_top);
 196       // The correct adjusted_size may not be the same as that for this method
 197       // (i.e., cp->space may no longer be "this" so adjust the size again.
 198       // Use the virtual method which is not used above to save the virtual
 199       // dispatch.
 200       adjusted_size = cp->space->adjust_object_size_v(size);
 201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 203     } while (adjusted_size > compaction_max_size);
 204   }
 205 
 206   // store the forwarding pointer into the mark word
 207   if ((HeapWord*)q != compact_top) {
 208     q->forward_to(oop(compact_top));
 209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 210   } else {
 211     // if the object isn't moving we can just set the mark to the default
 212     // mark and handle it specially later on.
 213     q->init_mark();
 214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 215   }
 216 
 217   compact_top += adjusted_size;
 218 
 219   // we need to update the offset table so that the beginnings of objects can be
 220   // found during scavenge.  Note that we are updating the offset table based on
 221   // where the object will be once the compaction phase finishes.
 222 
 223   // Always call cross_threshold().  A contiguous space can only call it when
 224   // the compaction_top exceeds the current threshold but not for an
 225   // non-contiguous space.
 226   cp->threshold =
 227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 228   return compact_top;
 229 }
 230 
 231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 232 // and use of single_block instead of alloc_block.  The name here is not really
 233 // appropriate - maybe a more general name could be invented for both the
 234 // contiguous and noncontiguous spaces.
 235 
 236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 237   _bt.single_block(start, the_end);
 238   return end();
 239 }
 240 
 241 // Initialize them to NULL.
 242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 243   for (size_t i = 0; i < IndexSetSize; i++) {
 244     // Note that on platforms where objects are double word aligned,
 245     // the odd array elements are not used.  It is convenient, however,
 246     // to map directly from the object size to the array element.
 247     _indexedFreeList[i].reset(IndexSetSize);
 248     _indexedFreeList[i].set_size(i);
 249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 253   }
 254 }
 255 
 256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 257   for (size_t i = 1; i < IndexSetSize; i++) {
 258     assert(_indexedFreeList[i].size() == (size_t) i,
 259       "Indexed free list sizes are incorrect");
 260     _indexedFreeList[i].reset(IndexSetSize);
 261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 265   }
 266 }
 267 
 268 void CompactibleFreeListSpace::reset(MemRegion mr) {
 269   resetIndexedFreeListArray();
 270   dictionary()->reset();
 271   if (BlockOffsetArrayUseUnallocatedBlock) {
 272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 273     // Everything's allocated until proven otherwise.
 274     _bt.set_unallocated_block(end());
 275   }
 276   if (!mr.is_empty()) {
 277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 278     _bt.single_block(mr.start(), mr.word_size());
 279     FreeChunk* fc = (FreeChunk*) mr.start();
 280     fc->set_size(mr.word_size());
 281     if (mr.word_size() >= IndexSetSize ) {
 282       returnChunkToDictionary(fc);
 283     } else {
 284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 286     }
 287     coalBirth(mr.word_size());
 288   }
 289   _promoInfo.reset();
 290   _smallLinearAllocBlock._ptr = NULL;
 291   _smallLinearAllocBlock._word_size = 0;
 292 }
 293 
 294 void CompactibleFreeListSpace::reset_after_compaction() {
 295   // Reset the space to the new reality - one free chunk.
 296   MemRegion mr(compaction_top(), end());
 297   reset(mr);
 298   // Now refill the linear allocation block(s) if possible.
 299   if (_adaptive_freelists) {
 300     refillLinearAllocBlocksIfNeeded();
 301   } else {
 302     // Place as much of mr in the linAB as we can get,
 303     // provided it was big enough to go into the dictionary.
 304     FreeChunk* fc = dictionary()->find_largest_dict();
 305     if (fc != NULL) {
 306       assert(fc->size() == mr.word_size(),
 307              "Why was the chunk broken up?");
 308       removeChunkFromDictionary(fc);
 309       HeapWord* addr = (HeapWord*) fc;
 310       _smallLinearAllocBlock.set(addr, fc->size() ,
 311         1024*SmallForLinearAlloc, fc->size());
 312       // Note that _unallocated_block is not updated here.
 313     }
 314   }
 315 }
 316 
 317 // Walks the entire dictionary, returning a coterminal
 318 // chunk, if it exists. Use with caution since it involves
 319 // a potentially complete walk of a potentially large tree.
 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 321 
 322   assert_lock_strong(&_freelistLock);
 323 
 324   return dictionary()->find_chunk_ends_at(end());
 325 }
 326 
 327 
 328 #ifndef PRODUCT
 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 332   }
 333 }
 334 
 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 336   size_t sum = 0;
 337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 339   }
 340   return sum;
 341 }
 342 
 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 344   size_t count = 0;
 345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 346     debug_only(
 347       ssize_t total_list_count = 0;
 348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 349          fc = fc->next()) {
 350         total_list_count++;
 351       }
 352       assert(total_list_count ==  _indexedFreeList[i].count(),
 353         "Count in list is incorrect");
 354     )
 355     count += _indexedFreeList[i].count();
 356   }
 357   return count;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::totalCount() {
 361   size_t num = totalCountInIndexedFreeLists();
 362   num +=  dictionary()->total_count();
 363   if (_smallLinearAllocBlock._word_size != 0) {
 364     num++;
 365   }
 366   return num;
 367 }
 368 #endif
 369 
 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 371   FreeChunk* fc = (FreeChunk*) p;
 372   return fc->is_free();
 373 }
 374 
 375 size_t CompactibleFreeListSpace::used() const {
 376   return capacity() - free();
 377 }
 378 
 379 size_t CompactibleFreeListSpace::free() const {
 380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 381   // if you do this while the structures are in flux you
 382   // may get an approximate answer only; for instance
 383   // because there is concurrent allocation either
 384   // directly by mutators or for promotion during a GC.
 385   // It's "MT-safe", however, in the sense that you are guaranteed
 386   // not to crash and burn, for instance, because of walking
 387   // pointers that could disappear as you were walking them.
 388   // The approximation is because the various components
 389   // that are read below are not read atomically (and
 390   // further the computation of totalSizeInIndexedFreeLists()
 391   // is itself a non-atomic computation. The normal use of
 392   // this is during a resize operation at the end of GC
 393   // and at that time you are guaranteed to get the
 394   // correct actual value. However, for instance, this is
 395   // also read completely asynchronously by the "perf-sampler"
 396   // that supports jvmstat, and you are apt to see the values
 397   // flicker in such cases.
 398   assert(_dictionary != NULL, "No _dictionary?");
 399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 400           totalSizeInIndexedFreeLists() +
 401           _smallLinearAllocBlock._word_size) * HeapWordSize;
 402 }
 403 
 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 405   assert(_dictionary != NULL, "No _dictionary?");
 406   assert_locked();
 407   size_t res = _dictionary->max_chunk_size();
 408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 409                        (size_t) SmallForLinearAlloc - 1));
 410   // XXX the following could potentially be pretty slow;
 411   // should one, pessimistically for the rare cases when res
 412   // calculated above is less than IndexSetSize,
 413   // just return res calculated above? My reasoning was that
 414   // those cases will be so rare that the extra time spent doesn't
 415   // really matter....
 416   // Note: do not change the loop test i >= res + IndexSetStride
 417   // to i > res below, because i is unsigned and res may be zero.
 418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 419        i -= IndexSetStride) {
 420     if (_indexedFreeList[i].head() != NULL) {
 421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 422       return i;
 423     }
 424   }
 425   return res;
 426 }
 427 
 428 void LinearAllocBlock::print_on(outputStream* st) const {
 429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 431             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 432 }
 433 
 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 435   st->print_cr("COMPACTIBLE FREELIST SPACE");
 436   st->print_cr(" Space:");
 437   Space::print_on(st);
 438 
 439   st->print_cr("promoInfo:");
 440   _promoInfo.print_on(st);
 441 
 442   st->print_cr("_smallLinearAllocBlock");
 443   _smallLinearAllocBlock.print_on(st);
 444 
 445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 446 
 447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 449 }
 450 
 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 452 const {
 453   reportIndexedFreeListStatistics();
 454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 455   gclog_or_tty->print_cr("---------------------------");
 456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 458     _indexedFreeList[i].print_on(gclog_or_tty);
 459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 460          fc = fc->next()) {
 461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 462                           p2i(fc), p2i((HeapWord*)fc + i),
 463                           fc->cantCoalesce() ? "\t CC" : "");
 464     }
 465   }
 466 }
 467 
 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 469 const {
 470   _promoInfo.print_on(st);
 471 }
 472 
 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 474 const {
 475   _dictionary->report_statistics();
 476   st->print_cr("Layout of Freelists in Tree");
 477   st->print_cr("---------------------------");
 478   _dictionary->print_free_lists(st);
 479 }
 480 
 481 class BlkPrintingClosure: public BlkClosure {
 482   const CMSCollector*             _collector;
 483   const CompactibleFreeListSpace* _sp;
 484   const CMSBitMap*                _live_bit_map;
 485   const bool                      _post_remark;
 486   outputStream*                   _st;
 487 public:
 488   BlkPrintingClosure(const CMSCollector* collector,
 489                      const CompactibleFreeListSpace* sp,
 490                      const CMSBitMap* live_bit_map,
 491                      outputStream* st):
 492     _collector(collector),
 493     _sp(sp),
 494     _live_bit_map(live_bit_map),
 495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 496     _st(st) { }
 497   size_t do_blk(HeapWord* addr);
 498 };
 499 
 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 501   size_t sz = _sp->block_size_no_stall(addr, _collector);
 502   assert(sz != 0, "Should always be able to compute a size");
 503   if (_sp->block_is_obj(addr)) {
 504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 506       p2i(addr),
 507       dead ? "dead" : "live",
 508       sz,
 509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 510     if (CMSPrintObjectsInDump && !dead) {
 511       oop(addr)->print_on(_st);
 512       _st->print_cr("--------------------------------------");
 513     }
 514   } else { // free block
 515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 516       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 517     if (CMSPrintChunksInDump) {
 518       ((FreeChunk*)addr)->print_on(_st);
 519       _st->print_cr("--------------------------------------");
 520     }
 521   }
 522   return sz;
 523 }
 524 
 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 526   outputStream* st) {
 527   st->print_cr("\n=========================");
 528   st->print_cr("Block layout in CMS Heap:");
 529   st->print_cr("=========================");
 530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 531   blk_iterate(&bpcl);
 532 
 533   st->print_cr("\n=======================================");
 534   st->print_cr("Order & Layout of Promotion Info Blocks");
 535   st->print_cr("=======================================");
 536   print_promo_info_blocks(st);
 537 
 538   st->print_cr("\n===========================");
 539   st->print_cr("Order of Indexed Free Lists");
 540   st->print_cr("=========================");
 541   print_indexed_free_lists(st);
 542 
 543   st->print_cr("\n=================================");
 544   st->print_cr("Order of Free Lists in Dictionary");
 545   st->print_cr("=================================");
 546   print_dictionary_free_lists(st);
 547 }
 548 
 549 
 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 551   assert_lock_strong(&_freelistLock);
 552   assert(PrintFLSStatistics != 0, "Reporting error");
 553   _dictionary->report_statistics();
 554   if (PrintFLSStatistics > 1) {
 555     reportIndexedFreeListStatistics();
 556     size_t total_size = totalSizeInIndexedFreeLists() +
 557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 558     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 559   }
 560 }
 561 
 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 563   assert_lock_strong(&_freelistLock);
 564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 565                       "--------------------------------\n");
 566   size_t total_size = totalSizeInIndexedFreeLists();
 567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 568   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 569   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 570   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 571   if (free_blocks != 0) {
 572     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 573   }
 574 }
 575 
 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 577   size_t res = 0;
 578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 579     debug_only(
 580       ssize_t recount = 0;
 581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 582          fc = fc->next()) {
 583         recount += 1;
 584       }
 585       assert(recount == _indexedFreeList[i].count(),
 586         "Incorrect count in list");
 587     )
 588     res += _indexedFreeList[i].count();
 589   }
 590   return res;
 591 }
 592 
 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 595     if (_indexedFreeList[i].head() != NULL) {
 596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 597       return (size_t)i;
 598     }
 599   }
 600   return 0;
 601 }
 602 
 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 604   HeapWord* prevEnd = end();
 605   assert(prevEnd != value, "unnecessary set_end call");
 606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 607         "New end is below unallocated block");
 608   _end = value;
 609   if (prevEnd != NULL) {
 610     // Resize the underlying block offset table.
 611     _bt.resize(pointer_delta(value, bottom()));
 612     if (value <= prevEnd) {
 613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 614              "New end is below unallocated block");
 615     } else {
 616       // Now, take this new chunk and add it to the free blocks.
 617       // Note that the BOT has not yet been updated for this block.
 618       size_t newFcSize = pointer_delta(value, prevEnd);
 619       // XXX This is REALLY UGLY and should be fixed up. XXX
 620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 621         // Mark the boundary of the new block in BOT
 622         _bt.mark_block(prevEnd, value);
 623         // put it all in the linAB
 624         MutexLockerEx x(parDictionaryAllocLock(),
 625                         Mutex::_no_safepoint_check_flag);
 626         _smallLinearAllocBlock._ptr = prevEnd;
 627         _smallLinearAllocBlock._word_size = newFcSize;
 628         repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         // Births of chunks put into a LinAB are not recorded.  Births
 630         // of chunks as they are allocated out of a LinAB are.
 631       } else {
 632         // Add the block to the free lists, if possible coalescing it
 633         // with the last free block, and update the BOT and census data.
 634         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 635       }
 636     }
 637   }
 638 }
 639 
 640 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 641   CompactibleFreeListSpace* _cfls;
 642   CMSCollector* _collector;
 643 protected:
 644   // Override.
 645 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 646   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 647                                        HeapWord* bottom, HeapWord* top, \
 648                                        ClosureType* cl);                \
 649       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 650                                        HeapWord* bottom, HeapWord* top, \
 651                                        ClosureType* cl);                \
 652     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl)
 655   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 656   walk_mem_region_with_cl_DECL(FilteringClosure);
 657 
 658 public:
 659   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 660                       CMSCollector* collector,
 661                       ExtendedOopClosure* cl,
 662                       CardTableModRefBS::PrecisionStyle precision,
 663                       HeapWord* boundary) :
 664     Filtering_DCTOC(sp, cl, precision, boundary),
 665     _cfls(sp), _collector(collector) {}
 666 };
 667 
 668 // We de-virtualize the block-related calls below, since we know that our
 669 // space is a CompactibleFreeListSpace.
 670 
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0;                 \
 677    if (is_par) {                                                                \
 678      assert(GenCollectedHeap::heap()->n_par_threads() ==                        \
 679             GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \
 700   while (bottom < top) {                                                        \
 701     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 702         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 703                     oop(bottom)) &&                                             \
 704         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 705       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 706       bottom += _cfls->adjustObjectSize(word_sz);                               \
 707     } else {                                                                    \
 708       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 709     }                                                                           \
 710   }                                                                             \
 711 }                                                                               \
 712 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 713                                                         HeapWord* bottom,       \
 714                                                         HeapWord* top,          \
 715                                                         ClosureType* cl) {      \
 716   /* Skip parts that are before "mr", in case "block_start" sent us             \
 717      back too far. */                                                           \
 718   HeapWord* mr_start = mr.start();                                              \
 719   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 720   HeapWord* next = bottom + bot_size;                                           \
 721   while (next < mr_start) {                                                     \
 722     bottom = next;                                                              \
 723     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 724     next = bottom + bot_size;                                                   \
 725   }                                                                             \
 726                                                                                 \
 727   while (bottom < top) {                                                        \
 728     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 729         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 730                     oop(bottom)) &&                                             \
 731         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 732       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 733       bottom += _cfls->adjustObjectSize(word_sz);                               \
 734     } else {                                                                    \
 735       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 736     }                                                                           \
 737   }                                                                             \
 738 }
 739 
 740 // (There are only two of these, rather than N, because the split is due
 741 // only to the introduction of the FilteringClosure, a local part of the
 742 // impl of this abstraction.)
 743 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 745 
 746 DirtyCardToOopClosure*
 747 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 748                                       CardTableModRefBS::PrecisionStyle precision,
 749                                       HeapWord* boundary) {
 750   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 751 }
 752 
 753 
 754 // Note on locking for the space iteration functions:
 755 // since the collector's iteration activities are concurrent with
 756 // allocation activities by mutators, absent a suitable mutual exclusion
 757 // mechanism the iterators may go awry. For instance a block being iterated
 758 // may suddenly be allocated or divided up and part of it allocated and
 759 // so on.
 760 
 761 // Apply the given closure to each block in the space.
 762 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 763   assert_lock_strong(freelistLock());
 764   HeapWord *cur, *limit;
 765   for (cur = bottom(), limit = end(); cur < limit;
 766        cur += cl->do_blk_careful(cur));
 767 }
 768 
 769 // Apply the given closure to each block in the space.
 770 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 771   assert_lock_strong(freelistLock());
 772   HeapWord *cur, *limit;
 773   for (cur = bottom(), limit = end(); cur < limit;
 774        cur += cl->do_blk(cur));
 775 }
 776 
 777 // Apply the given closure to each oop in the space.
 778 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 779   assert_lock_strong(freelistLock());
 780   HeapWord *cur, *limit;
 781   size_t curSize;
 782   for (cur = bottom(), limit = end(); cur < limit;
 783        cur += curSize) {
 784     curSize = block_size(cur);
 785     if (block_is_obj(cur)) {
 786       oop(cur)->oop_iterate(cl);
 787     }
 788   }
 789 }
 790 
 791 // NOTE: In the following methods, in order to safely be able to
 792 // apply the closure to an object, we need to be sure that the
 793 // object has been initialized. We are guaranteed that an object
 794 // is initialized if we are holding the Heap_lock with the
 795 // world stopped.
 796 void CompactibleFreeListSpace::verify_objects_initialized() const {
 797   if (is_init_completed()) {
 798     assert_locked_or_safepoint(Heap_lock);
 799     if (Universe::is_fully_initialized()) {
 800       guarantee(SafepointSynchronize::is_at_safepoint(),
 801                 "Required for objects to be initialized");
 802     }
 803   } // else make a concession at vm start-up
 804 }
 805 
 806 // Apply the given closure to each object in the space
 807 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 808   assert_lock_strong(freelistLock());
 809   NOT_PRODUCT(verify_objects_initialized());
 810   HeapWord *cur, *limit;
 811   size_t curSize;
 812   for (cur = bottom(), limit = end(); cur < limit;
 813        cur += curSize) {
 814     curSize = block_size(cur);
 815     if (block_is_obj(cur)) {
 816       blk->do_object(oop(cur));
 817     }
 818   }
 819 }
 820 
 821 // Apply the given closure to each live object in the space
 822 //   The usage of CompactibleFreeListSpace
 823 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 824 // objects in the space with references to objects that are no longer
 825 // valid.  For example, an object may reference another object
 826 // that has already been sweep up (collected).  This method uses
 827 // obj_is_alive() to determine whether it is safe to apply the closure to
 828 // an object.  See obj_is_alive() for details on how liveness of an
 829 // object is decided.
 830 
 831 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 832   assert_lock_strong(freelistLock());
 833   NOT_PRODUCT(verify_objects_initialized());
 834   HeapWord *cur, *limit;
 835   size_t curSize;
 836   for (cur = bottom(), limit = end(); cur < limit;
 837        cur += curSize) {
 838     curSize = block_size(cur);
 839     if (block_is_obj(cur) && obj_is_alive(cur)) {
 840       blk->do_object(oop(cur));
 841     }
 842   }
 843 }
 844 
 845 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 846                                                   UpwardsObjectClosure* cl) {
 847   assert_locked(freelistLock());
 848   NOT_PRODUCT(verify_objects_initialized());
 849   assert(!mr.is_empty(), "Should be non-empty");
 850   // We use MemRegion(bottom(), end()) rather than used_region() below
 851   // because the two are not necessarily equal for some kinds of
 852   // spaces, in particular, certain kinds of free list spaces.
 853   // We could use the more complicated but more precise:
 854   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 855   // but the slight imprecision seems acceptable in the assertion check.
 856   assert(MemRegion(bottom(), end()).contains(mr),
 857          "Should be within used space");
 858   HeapWord* prev = cl->previous();   // max address from last time
 859   if (prev >= mr.end()) { // nothing to do
 860     return;
 861   }
 862   // This assert will not work when we go from cms space to perm
 863   // space, and use same closure. Easy fix deferred for later. XXX YSR
 864   // assert(prev == NULL || contains(prev), "Should be within space");
 865 
 866   bool last_was_obj_array = false;
 867   HeapWord *blk_start_addr, *region_start_addr;
 868   if (prev > mr.start()) {
 869     region_start_addr = prev;
 870     blk_start_addr    = prev;
 871     // The previous invocation may have pushed "prev" beyond the
 872     // last allocated block yet there may be still be blocks
 873     // in this region due to a particular coalescing policy.
 874     // Relax the assertion so that the case where the unallocated
 875     // block is maintained and "prev" is beyond the unallocated
 876     // block does not cause the assertion to fire.
 877     assert((BlockOffsetArrayUseUnallocatedBlock &&
 878             (!is_in(prev))) ||
 879            (blk_start_addr == block_start(region_start_addr)), "invariant");
 880   } else {
 881     region_start_addr = mr.start();
 882     blk_start_addr    = block_start(region_start_addr);
 883   }
 884   HeapWord* region_end_addr = mr.end();
 885   MemRegion derived_mr(region_start_addr, region_end_addr);
 886   while (blk_start_addr < region_end_addr) {
 887     const size_t size = block_size(blk_start_addr);
 888     if (block_is_obj(blk_start_addr)) {
 889       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 890     } else {
 891       last_was_obj_array = false;
 892     }
 893     blk_start_addr += size;
 894   }
 895   if (!last_was_obj_array) {
 896     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 897            "Should be within (closed) used space");
 898     assert(blk_start_addr > prev, "Invariant");
 899     cl->set_previous(blk_start_addr); // min address for next time
 900   }
 901 }
 902 
 903 // Callers of this iterator beware: The closure application should
 904 // be robust in the face of uninitialized objects and should (always)
 905 // return a correct size so that the next addr + size below gives us a
 906 // valid block boundary. [See for instance,
 907 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 908 // in ConcurrentMarkSweepGeneration.cpp.]
 909 HeapWord*
 910 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 911   ObjectClosureCareful* cl) {
 912   assert_lock_strong(freelistLock());
 913   // Can't use used_region() below because it may not necessarily
 914   // be the same as [bottom(),end()); although we could
 915   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 916   // that appears too cumbersome, so we just do the simpler check
 917   // in the assertion below.
 918   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 919          "mr should be non-empty and within used space");
 920   HeapWord *addr, *end;
 921   size_t size;
 922   for (addr = block_start_careful(mr.start()), end  = mr.end();
 923        addr < end; addr += size) {
 924     FreeChunk* fc = (FreeChunk*)addr;
 925     if (fc->is_free()) {
 926       // Since we hold the free list lock, which protects direct
 927       // allocation in this generation by mutators, a free object
 928       // will remain free throughout this iteration code.
 929       size = fc->size();
 930     } else {
 931       // Note that the object need not necessarily be initialized,
 932       // because (for instance) the free list lock does NOT protect
 933       // object initialization. The closure application below must
 934       // therefore be correct in the face of uninitialized objects.
 935       size = cl->do_object_careful_m(oop(addr), mr);
 936       if (size == 0) {
 937         // An unparsable object found. Signal early termination.
 938         return addr;
 939       }
 940     }
 941   }
 942   return NULL;
 943 }
 944 
 945 
 946 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 947   NOT_PRODUCT(verify_objects_initialized());
 948   return _bt.block_start(p);
 949 }
 950 
 951 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 952   return _bt.block_start_careful(p);
 953 }
 954 
 955 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 956   NOT_PRODUCT(verify_objects_initialized());
 957   // This must be volatile, or else there is a danger that the compiler
 958   // will compile the code below into a sometimes-infinite loop, by keeping
 959   // the value read the first time in a register.
 960   while (true) {
 961     // We must do this until we get a consistent view of the object.
 962     if (FreeChunk::indicatesFreeChunk(p)) {
 963       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 964       size_t res = fc->size();
 965 
 966       // Bugfix for systems with weak memory model (PPC64/IA64). The
 967       // block's free bit was set and we have read the size of the
 968       // block. Acquire and check the free bit again. If the block is
 969       // still free, the read size is correct.
 970       OrderAccess::acquire();
 971 
 972       // If the object is still a free chunk, return the size, else it
 973       // has been allocated so try again.
 974       if (FreeChunk::indicatesFreeChunk(p)) {
 975         assert(res != 0, "Block size should not be 0");
 976         return res;
 977       }
 978     } else {
 979       // must read from what 'p' points to in each loop.
 980       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 981       if (k != NULL) {
 982         assert(k->is_klass(), "Should really be klass oop.");
 983         oop o = (oop)p;
 984         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 985 
 986         // Bugfix for systems with weak memory model (PPC64/IA64).
 987         // The object o may be an array. Acquire to make sure that the array
 988         // size (third word) is consistent.
 989         OrderAccess::acquire();
 990 
 991         size_t res = o->size_given_klass(k);
 992         res = adjustObjectSize(res);
 993         assert(res != 0, "Block size should not be 0");
 994         return res;
 995       }
 996     }
 997   }
 998 }
 999 
1000 // TODO: Now that is_parsable is gone, we should combine these two functions.
1001 // A variant of the above that uses the Printezis bits for
1002 // unparsable but allocated objects. This avoids any possible
1003 // stalls waiting for mutators to initialize objects, and is
1004 // thus potentially faster than the variant above. However,
1005 // this variant may return a zero size for a block that is
1006 // under mutation and for which a consistent size cannot be
1007 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1008 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1009                                                      const CMSCollector* c)
1010 const {
1011   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1012   // This must be volatile, or else there is a danger that the compiler
1013   // will compile the code below into a sometimes-infinite loop, by keeping
1014   // the value read the first time in a register.
1015   DEBUG_ONLY(uint loops = 0;)
1016   while (true) {
1017     // We must do this until we get a consistent view of the object.
1018     if (FreeChunk::indicatesFreeChunk(p)) {
1019       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1020       size_t res = fc->size();
1021 
1022       // Bugfix for systems with weak memory model (PPC64/IA64). The
1023       // free bit of the block was set and we have read the size of
1024       // the block. Acquire and check the free bit again. If the
1025       // block is still free, the read size is correct.
1026       OrderAccess::acquire();
1027 
1028       if (FreeChunk::indicatesFreeChunk(p)) {
1029         assert(res != 0, "Block size should not be 0");
1030         assert(loops == 0, "Should be 0");
1031         return res;
1032       }
1033     } else {
1034       // must read from what 'p' points to in each loop.
1035       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1036       // We trust the size of any object that has a non-NULL
1037       // klass and (for those in the perm gen) is parsable
1038       // -- irrespective of its conc_safe-ty.
1039       if (k != NULL) {
1040         assert(k->is_klass(), "Should really be klass oop.");
1041         oop o = (oop)p;
1042         assert(o->is_oop(), "Should be an oop");
1043 
1044         // Bugfix for systems with weak memory model (PPC64/IA64).
1045         // The object o may be an array. Acquire to make sure that the array
1046         // size (third word) is consistent.
1047         OrderAccess::acquire();
1048 
1049         size_t res = o->size_given_klass(k);
1050         res = adjustObjectSize(res);
1051         assert(res != 0, "Block size should not be 0");
1052         return res;
1053       } else {
1054         // May return 0 if P-bits not present.
1055         return c->block_size_if_printezis_bits(p);
1056       }
1057     }
1058     assert(loops == 0, "Can loop at most once");
1059     DEBUG_ONLY(loops++;)
1060   }
1061 }
1062 
1063 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1064   NOT_PRODUCT(verify_objects_initialized());
1065   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1066   FreeChunk* fc = (FreeChunk*)p;
1067   if (fc->is_free()) {
1068     return fc->size();
1069   } else {
1070     // Ignore mark word because this may be a recently promoted
1071     // object whose mark word is used to chain together grey
1072     // objects (the last one would have a null value).
1073     assert(oop(p)->is_oop(true), "Should be an oop");
1074     return adjustObjectSize(oop(p)->size());
1075   }
1076 }
1077 
1078 // This implementation assumes that the property of "being an object" is
1079 // stable.  But being a free chunk may not be (because of parallel
1080 // promotion.)
1081 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1082   FreeChunk* fc = (FreeChunk*)p;
1083   assert(is_in_reserved(p), "Should be in space");
1084   // When doing a mark-sweep-compact of the CMS generation, this
1085   // assertion may fail because prepare_for_compaction() uses
1086   // space that is garbage to maintain information on ranges of
1087   // live objects so that these live ranges can be moved as a whole.
1088   // Comment out this assertion until that problem can be solved
1089   // (i.e., that the block start calculation may look at objects
1090   // at address below "p" in finding the object that contains "p"
1091   // and those objects (if garbage) may have been modified to hold
1092   // live range information.
1093   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1094   //        "Should be a block boundary");
1095   if (FreeChunk::indicatesFreeChunk(p)) return false;
1096   Klass* k = oop(p)->klass_or_null();
1097   if (k != NULL) {
1098     // Ignore mark word because it may have been used to
1099     // chain together promoted objects (the last one
1100     // would have a null value).
1101     assert(oop(p)->is_oop(true), "Should be an oop");
1102     return true;
1103   } else {
1104     return false;  // Was not an object at the start of collection.
1105   }
1106 }
1107 
1108 // Check if the object is alive. This fact is checked either by consulting
1109 // the main marking bitmap in the sweeping phase or, if it's a permanent
1110 // generation and we're not in the sweeping phase, by checking the
1111 // perm_gen_verify_bit_map where we store the "deadness" information if
1112 // we did not sweep the perm gen in the most recent previous GC cycle.
1113 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1114   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1115          "Else races are possible");
1116   assert(block_is_obj(p), "The address should point to an object");
1117 
1118   // If we're sweeping, we use object liveness information from the main bit map
1119   // for both perm gen and old gen.
1120   // We don't need to lock the bitmap (live_map or dead_map below), because
1121   // EITHER we are in the middle of the sweeping phase, and the
1122   // main marking bit map (live_map below) is locked,
1123   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1124   // is stable, because it's mutated only in the sweeping phase.
1125   // NOTE: This method is also used by jmap where, if class unloading is
1126   // off, the results can return "false" for legitimate perm objects,
1127   // when we are not in the midst of a sweeping phase, which can result
1128   // in jmap not reporting certain perm gen objects. This will be moot
1129   // if/when the perm gen goes away in the future.
1130   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1131     CMSBitMap* live_map = _collector->markBitMap();
1132     return live_map->par_isMarked((HeapWord*) p);
1133   }
1134   return true;
1135 }
1136 
1137 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1138   FreeChunk* fc = (FreeChunk*)p;
1139   assert(is_in_reserved(p), "Should be in space");
1140   assert(_bt.block_start(p) == p, "Should be a block boundary");
1141   if (!fc->is_free()) {
1142     // Ignore mark word because it may have been used to
1143     // chain together promoted objects (the last one
1144     // would have a null value).
1145     assert(oop(p)->is_oop(true), "Should be an oop");
1146     return true;
1147   }
1148   return false;
1149 }
1150 
1151 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1152 // approximate answer if you don't hold the freelistlock when you call this.
1153 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1154   size_t size = 0;
1155   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1156     debug_only(
1157       // We may be calling here without the lock in which case we
1158       // won't do this modest sanity check.
1159       if (freelistLock()->owned_by_self()) {
1160         size_t total_list_size = 0;
1161         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1162           fc = fc->next()) {
1163           total_list_size += i;
1164         }
1165         assert(total_list_size == i * _indexedFreeList[i].count(),
1166                "Count in list is incorrect");
1167       }
1168     )
1169     size += i * _indexedFreeList[i].count();
1170   }
1171   return size;
1172 }
1173 
1174 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1175   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1176   return allocate(size);
1177 }
1178 
1179 HeapWord*
1180 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1181   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1182 }
1183 
1184 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1185   assert_lock_strong(freelistLock());
1186   HeapWord* res = NULL;
1187   assert(size == adjustObjectSize(size),
1188          "use adjustObjectSize() before calling into allocate()");
1189 
1190   if (_adaptive_freelists) {
1191     res = allocate_adaptive_freelists(size);
1192   } else {  // non-adaptive free lists
1193     res = allocate_non_adaptive_freelists(size);
1194   }
1195 
1196   if (res != NULL) {
1197     // check that res does lie in this space!
1198     assert(is_in_reserved(res), "Not in this space!");
1199     assert(is_aligned((void*)res), "alignment check");
1200 
1201     FreeChunk* fc = (FreeChunk*)res;
1202     fc->markNotFree();
1203     assert(!fc->is_free(), "shouldn't be marked free");
1204     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1205     // Verify that the block offset table shows this to
1206     // be a single block, but not one which is unallocated.
1207     _bt.verify_single_block(res, size);
1208     _bt.verify_not_unallocated(res, size);
1209     // mangle a just allocated object with a distinct pattern.
1210     debug_only(fc->mangleAllocated(size));
1211   }
1212 
1213   return res;
1214 }
1215 
1216 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1217   HeapWord* res = NULL;
1218   // try and use linear allocation for smaller blocks
1219   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1220     // if successful, the following also adjusts block offset table
1221     res = getChunkFromSmallLinearAllocBlock(size);
1222   }
1223   // Else triage to indexed lists for smaller sizes
1224   if (res == NULL) {
1225     if (size < SmallForDictionary) {
1226       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1227     } else {
1228       // else get it from the big dictionary; if even this doesn't
1229       // work we are out of luck.
1230       res = (HeapWord*)getChunkFromDictionaryExact(size);
1231     }
1232   }
1233 
1234   return res;
1235 }
1236 
1237 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1238   assert_lock_strong(freelistLock());
1239   HeapWord* res = NULL;
1240   assert(size == adjustObjectSize(size),
1241          "use adjustObjectSize() before calling into allocate()");
1242 
1243   // Strategy
1244   //   if small
1245   //     exact size from small object indexed list if small
1246   //     small or large linear allocation block (linAB) as appropriate
1247   //     take from lists of greater sized chunks
1248   //   else
1249   //     dictionary
1250   //     small or large linear allocation block if it has the space
1251   // Try allocating exact size from indexTable first
1252   if (size < IndexSetSize) {
1253     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1254     if(res != NULL) {
1255       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1256         "Not removed from free list");
1257       // no block offset table adjustment is necessary on blocks in
1258       // the indexed lists.
1259 
1260     // Try allocating from the small LinAB
1261     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1262         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1263         // if successful, the above also adjusts block offset table
1264         // Note that this call will refill the LinAB to
1265         // satisfy the request.  This is different that
1266         // evm.
1267         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1268     } else {
1269       // Raid the exact free lists larger than size, even if they are not
1270       // overpopulated.
1271       res = (HeapWord*) getChunkFromGreater(size);
1272     }
1273   } else {
1274     // Big objects get allocated directly from the dictionary.
1275     res = (HeapWord*) getChunkFromDictionaryExact(size);
1276     if (res == NULL) {
1277       // Try hard not to fail since an allocation failure will likely
1278       // trigger a synchronous GC.  Try to get the space from the
1279       // allocation blocks.
1280       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1281     }
1282   }
1283 
1284   return res;
1285 }
1286 
1287 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1288 // when promoting obj.
1289 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1290   // Depending on the object size, expansion may require refilling either a
1291   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1292   // is added because the dictionary may over-allocate to avoid fragmentation.
1293   size_t space = obj_size;
1294   if (!_adaptive_freelists) {
1295     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1296   }
1297   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1298   return space;
1299 }
1300 
1301 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1302   FreeChunk* ret;
1303 
1304   assert(numWords >= MinChunkSize, "Size is less than minimum");
1305   assert(linearAllocationWouldFail() || bestFitFirst(),
1306     "Should not be here");
1307 
1308   size_t i;
1309   size_t currSize = numWords + MinChunkSize;
1310   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1311   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1312     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1313     if (fl->head()) {
1314       ret = getFromListGreater(fl, numWords);
1315       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1316       return ret;
1317     }
1318   }
1319 
1320   currSize = MAX2((size_t)SmallForDictionary,
1321                   (size_t)(numWords + MinChunkSize));
1322 
1323   /* Try to get a chunk that satisfies request, while avoiding
1324      fragmentation that can't be handled. */
1325   {
1326     ret =  dictionary()->get_chunk(currSize);
1327     if (ret != NULL) {
1328       assert(ret->size() - numWords >= MinChunkSize,
1329              "Chunk is too small");
1330       _bt.allocated((HeapWord*)ret, ret->size());
1331       /* Carve returned chunk. */
1332       (void) splitChunkAndReturnRemainder(ret, numWords);
1333       /* Label this as no longer a free chunk. */
1334       assert(ret->is_free(), "This chunk should be free");
1335       ret->link_prev(NULL);
1336     }
1337     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1338     return ret;
1339   }
1340   ShouldNotReachHere();
1341 }
1342 
1343 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1344   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1345   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1346 }
1347 
1348 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1349   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1350          (_smallLinearAllocBlock._word_size == fc->size()),
1351          "Linear allocation block shows incorrect size");
1352   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1353           (_smallLinearAllocBlock._word_size == fc->size()));
1354 }
1355 
1356 // Check if the purported free chunk is present either as a linear
1357 // allocation block, the size-indexed table of (smaller) free blocks,
1358 // or the larger free blocks kept in the binary tree dictionary.
1359 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1360   if (verify_chunk_is_linear_alloc_block(fc)) {
1361     return true;
1362   } else if (fc->size() < IndexSetSize) {
1363     return verifyChunkInIndexedFreeLists(fc);
1364   } else {
1365     return dictionary()->verify_chunk_in_free_list(fc);
1366   }
1367 }
1368 
1369 #ifndef PRODUCT
1370 void CompactibleFreeListSpace::assert_locked() const {
1371   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1372 }
1373 
1374 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1375   CMSLockVerifier::assert_locked(lock);
1376 }
1377 #endif
1378 
1379 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1380   // In the parallel case, the main thread holds the free list lock
1381   // on behalf the parallel threads.
1382   FreeChunk* fc;
1383   {
1384     // If GC is parallel, this might be called by several threads.
1385     // This should be rare enough that the locking overhead won't affect
1386     // the sequential code.
1387     MutexLockerEx x(parDictionaryAllocLock(),
1388                     Mutex::_no_safepoint_check_flag);
1389     fc = getChunkFromDictionary(size);
1390   }
1391   if (fc != NULL) {
1392     fc->dontCoalesce();
1393     assert(fc->is_free(), "Should be free, but not coalescable");
1394     // Verify that the block offset table shows this to
1395     // be a single block, but not one which is unallocated.
1396     _bt.verify_single_block((HeapWord*)fc, fc->size());
1397     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1398   }
1399   return fc;
1400 }
1401 
1402 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1403   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1404   assert_locked();
1405 
1406   // if we are tracking promotions, then first ensure space for
1407   // promotion (including spooling space for saving header if necessary).
1408   // then allocate and copy, then track promoted info if needed.
1409   // When tracking (see PromotionInfo::track()), the mark word may
1410   // be displaced and in this case restoration of the mark word
1411   // occurs in the (oop_since_save_marks_)iterate phase.
1412   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1413     return NULL;
1414   }
1415   // Call the allocate(size_t, bool) form directly to avoid the
1416   // additional call through the allocate(size_t) form.  Having
1417   // the compile inline the call is problematic because allocate(size_t)
1418   // is a virtual method.
1419   HeapWord* res = allocate(adjustObjectSize(obj_size));
1420   if (res != NULL) {
1421     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1422     // if we should be tracking promotions, do so.
1423     if (_promoInfo.tracking()) {
1424         _promoInfo.track((PromotedObject*)res);
1425     }
1426   }
1427   return oop(res);
1428 }
1429 
1430 HeapWord*
1431 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1432   assert_locked();
1433   assert(size >= MinChunkSize, "minimum chunk size");
1434   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1435     "maximum from smallLinearAllocBlock");
1436   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1437 }
1438 
1439 HeapWord*
1440 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1441                                                        size_t size) {
1442   assert_locked();
1443   assert(size >= MinChunkSize, "too small");
1444   HeapWord* res = NULL;
1445   // Try to do linear allocation from blk, making sure that
1446   if (blk->_word_size == 0) {
1447     // We have probably been unable to fill this either in the prologue or
1448     // when it was exhausted at the last linear allocation. Bail out until
1449     // next time.
1450     assert(blk->_ptr == NULL, "consistency check");
1451     return NULL;
1452   }
1453   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1454   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1455   if (res != NULL) return res;
1456 
1457   // about to exhaust this linear allocation block
1458   if (blk->_word_size == size) { // exactly satisfied
1459     res = blk->_ptr;
1460     _bt.allocated(res, blk->_word_size);
1461   } else if (size + MinChunkSize <= blk->_refillSize) {
1462     size_t sz = blk->_word_size;
1463     // Update _unallocated_block if the size is such that chunk would be
1464     // returned to the indexed free list.  All other chunks in the indexed
1465     // free lists are allocated from the dictionary so that _unallocated_block
1466     // has already been adjusted for them.  Do it here so that the cost
1467     // for all chunks added back to the indexed free lists.
1468     if (sz < SmallForDictionary) {
1469       _bt.allocated(blk->_ptr, sz);
1470     }
1471     // Return the chunk that isn't big enough, and then refill below.
1472     addChunkToFreeLists(blk->_ptr, sz);
1473     split_birth(sz);
1474     // Don't keep statistics on adding back chunk from a LinAB.
1475   } else {
1476     // A refilled block would not satisfy the request.
1477     return NULL;
1478   }
1479 
1480   blk->_ptr = NULL; blk->_word_size = 0;
1481   refillLinearAllocBlock(blk);
1482   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1483          "block was replenished");
1484   if (res != NULL) {
1485     split_birth(size);
1486     repairLinearAllocBlock(blk);
1487   } else if (blk->_ptr != NULL) {
1488     res = blk->_ptr;
1489     size_t blk_size = blk->_word_size;
1490     blk->_word_size -= size;
1491     blk->_ptr  += size;
1492     split_birth(size);
1493     repairLinearAllocBlock(blk);
1494     // Update BOT last so that other (parallel) GC threads see a consistent
1495     // view of the BOT and free blocks.
1496     // Above must occur before BOT is updated below.
1497     OrderAccess::storestore();
1498     _bt.split_block(res, blk_size, size);  // adjust block offset table
1499   }
1500   return res;
1501 }
1502 
1503 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1504                                         LinearAllocBlock* blk,
1505                                         size_t size) {
1506   assert_locked();
1507   assert(size >= MinChunkSize, "too small");
1508 
1509   HeapWord* res = NULL;
1510   // This is the common case.  Keep it simple.
1511   if (blk->_word_size >= size + MinChunkSize) {
1512     assert(blk->_ptr != NULL, "consistency check");
1513     res = blk->_ptr;
1514     // Note that the BOT is up-to-date for the linAB before allocation.  It
1515     // indicates the start of the linAB.  The split_block() updates the
1516     // BOT for the linAB after the allocation (indicates the start of the
1517     // next chunk to be allocated).
1518     size_t blk_size = blk->_word_size;
1519     blk->_word_size -= size;
1520     blk->_ptr  += size;
1521     split_birth(size);
1522     repairLinearAllocBlock(blk);
1523     // Update BOT last so that other (parallel) GC threads see a consistent
1524     // view of the BOT and free blocks.
1525     // Above must occur before BOT is updated below.
1526     OrderAccess::storestore();
1527     _bt.split_block(res, blk_size, size);  // adjust block offset table
1528     _bt.allocated(res, size);
1529   }
1530   return res;
1531 }
1532 
1533 FreeChunk*
1534 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1535   assert_locked();
1536   assert(size < SmallForDictionary, "just checking");
1537   FreeChunk* res;
1538   res = _indexedFreeList[size].get_chunk_at_head();
1539   if (res == NULL) {
1540     res = getChunkFromIndexedFreeListHelper(size);
1541   }
1542   _bt.verify_not_unallocated((HeapWord*) res, size);
1543   assert(res == NULL || res->size() == size, "Incorrect block size");
1544   return res;
1545 }
1546 
1547 FreeChunk*
1548 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1549   bool replenish) {
1550   assert_locked();
1551   FreeChunk* fc = NULL;
1552   if (size < SmallForDictionary) {
1553     assert(_indexedFreeList[size].head() == NULL ||
1554       _indexedFreeList[size].surplus() <= 0,
1555       "List for this size should be empty or under populated");
1556     // Try best fit in exact lists before replenishing the list
1557     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1558       // Replenish list.
1559       //
1560       // Things tried that failed.
1561       //   Tried allocating out of the two LinAB's first before
1562       // replenishing lists.
1563       //   Tried small linAB of size 256 (size in indexed list)
1564       // and replenishing indexed lists from the small linAB.
1565       //
1566       FreeChunk* newFc = NULL;
1567       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1568       if (replenish_size < SmallForDictionary) {
1569         // Do not replenish from an underpopulated size.
1570         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1571             _indexedFreeList[replenish_size].head() != NULL) {
1572           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1573         } else if (bestFitFirst()) {
1574           newFc = bestFitSmall(replenish_size);
1575         }
1576       }
1577       if (newFc == NULL && replenish_size > size) {
1578         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1579         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1580       }
1581       // Note: The stats update re split-death of block obtained above
1582       // will be recorded below precisely when we know we are going to
1583       // be actually splitting it into more than one pieces below.
1584       if (newFc != NULL) {
1585         if  (replenish || CMSReplenishIntermediate) {
1586           // Replenish this list and return one block to caller.
1587           size_t i;
1588           FreeChunk *curFc, *nextFc;
1589           size_t num_blk = newFc->size() / size;
1590           assert(num_blk >= 1, "Smaller than requested?");
1591           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1592           if (num_blk > 1) {
1593             // we are sure we will be splitting the block just obtained
1594             // into multiple pieces; record the split-death of the original
1595             splitDeath(replenish_size);
1596           }
1597           // carve up and link blocks 0, ..., num_blk - 2
1598           // The last chunk is not added to the lists but is returned as the
1599           // free chunk.
1600           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1601                i = 0;
1602                i < (num_blk - 1);
1603                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1604                i++) {
1605             curFc->set_size(size);
1606             // Don't record this as a return in order to try and
1607             // determine the "returns" from a GC.
1608             _bt.verify_not_unallocated((HeapWord*) fc, size);
1609             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1610             _bt.mark_block((HeapWord*)curFc, size);
1611             split_birth(size);
1612             // Don't record the initial population of the indexed list
1613             // as a split birth.
1614           }
1615 
1616           // check that the arithmetic was OK above
1617           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1618             "inconsistency in carving newFc");
1619           curFc->set_size(size);
1620           _bt.mark_block((HeapWord*)curFc, size);
1621           split_birth(size);
1622           fc = curFc;
1623         } else {
1624           // Return entire block to caller
1625           fc = newFc;
1626         }
1627       }
1628     }
1629   } else {
1630     // Get a free chunk from the free chunk dictionary to be returned to
1631     // replenish the indexed free list.
1632     fc = getChunkFromDictionaryExact(size);
1633   }
1634   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1635   return fc;
1636 }
1637 
1638 FreeChunk*
1639 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1640   assert_locked();
1641   FreeChunk* fc = _dictionary->get_chunk(size,
1642                                          FreeBlockDictionary<FreeChunk>::atLeast);
1643   if (fc == NULL) {
1644     return NULL;
1645   }
1646   _bt.allocated((HeapWord*)fc, fc->size());
1647   if (fc->size() >= size + MinChunkSize) {
1648     fc = splitChunkAndReturnRemainder(fc, size);
1649   }
1650   assert(fc->size() >= size, "chunk too small");
1651   assert(fc->size() < size + MinChunkSize, "chunk too big");
1652   _bt.verify_single_block((HeapWord*)fc, fc->size());
1653   return fc;
1654 }
1655 
1656 FreeChunk*
1657 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1658   assert_locked();
1659   FreeChunk* fc = _dictionary->get_chunk(size,
1660                                          FreeBlockDictionary<FreeChunk>::atLeast);
1661   if (fc == NULL) {
1662     return fc;
1663   }
1664   _bt.allocated((HeapWord*)fc, fc->size());
1665   if (fc->size() == size) {
1666     _bt.verify_single_block((HeapWord*)fc, size);
1667     return fc;
1668   }
1669   assert(fc->size() > size, "get_chunk() guarantee");
1670   if (fc->size() < size + MinChunkSize) {
1671     // Return the chunk to the dictionary and go get a bigger one.
1672     returnChunkToDictionary(fc);
1673     fc = _dictionary->get_chunk(size + MinChunkSize,
1674                                 FreeBlockDictionary<FreeChunk>::atLeast);
1675     if (fc == NULL) {
1676       return NULL;
1677     }
1678     _bt.allocated((HeapWord*)fc, fc->size());
1679   }
1680   assert(fc->size() >= size + MinChunkSize, "tautology");
1681   fc = splitChunkAndReturnRemainder(fc, size);
1682   assert(fc->size() == size, "chunk is wrong size");
1683   _bt.verify_single_block((HeapWord*)fc, size);
1684   return fc;
1685 }
1686 
1687 void
1688 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1689   assert_locked();
1690 
1691   size_t size = chunk->size();
1692   _bt.verify_single_block((HeapWord*)chunk, size);
1693   // adjust _unallocated_block downward, as necessary
1694   _bt.freed((HeapWord*)chunk, size);
1695   _dictionary->return_chunk(chunk);
1696 #ifndef PRODUCT
1697   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1698     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1699     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1700     tl->verify_stats();
1701   }
1702 #endif // PRODUCT
1703 }
1704 
1705 void
1706 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1707   assert_locked();
1708   size_t size = fc->size();
1709   _bt.verify_single_block((HeapWord*) fc, size);
1710   _bt.verify_not_unallocated((HeapWord*) fc, size);
1711   if (_adaptive_freelists) {
1712     _indexedFreeList[size].return_chunk_at_tail(fc);
1713   } else {
1714     _indexedFreeList[size].return_chunk_at_head(fc);
1715   }
1716 #ifndef PRODUCT
1717   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1718      _indexedFreeList[size].verify_stats();
1719   }
1720 #endif // PRODUCT
1721 }
1722 
1723 // Add chunk to end of last block -- if it's the largest
1724 // block -- and update BOT and census data. We would
1725 // of course have preferred to coalesce it with the
1726 // last block, but it's currently less expensive to find the
1727 // largest block than it is to find the last.
1728 void
1729 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1730   HeapWord* chunk, size_t     size) {
1731   // check that the chunk does lie in this space!
1732   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1733   // One of the parallel gc task threads may be here
1734   // whilst others are allocating.
1735   Mutex* lock = &_parDictionaryAllocLock;
1736   FreeChunk* ec;
1737   {
1738     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1739     ec = dictionary()->find_largest_dict();  // get largest block
1740     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1741       // It's a coterminal block - we can coalesce.
1742       size_t old_size = ec->size();
1743       coalDeath(old_size);
1744       removeChunkFromDictionary(ec);
1745       size += old_size;
1746     } else {
1747       ec = (FreeChunk*)chunk;
1748     }
1749   }
1750   ec->set_size(size);
1751   debug_only(ec->mangleFreed(size));
1752   if (size < SmallForDictionary) {
1753     lock = _indexedFreeListParLocks[size];
1754   }
1755   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1756   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1757   // record the birth under the lock since the recording involves
1758   // manipulation of the list on which the chunk lives and
1759   // if the chunk is allocated and is the last on the list,
1760   // the list can go away.
1761   coalBirth(size);
1762 }
1763 
1764 void
1765 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1766                                               size_t     size) {
1767   // check that the chunk does lie in this space!
1768   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1769   assert_locked();
1770   _bt.verify_single_block(chunk, size);
1771 
1772   FreeChunk* fc = (FreeChunk*) chunk;
1773   fc->set_size(size);
1774   debug_only(fc->mangleFreed(size));
1775   if (size < SmallForDictionary) {
1776     returnChunkToFreeList(fc);
1777   } else {
1778     returnChunkToDictionary(fc);
1779   }
1780 }
1781 
1782 void
1783 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1784   size_t size, bool coalesced) {
1785   assert_locked();
1786   assert(chunk != NULL, "null chunk");
1787   if (coalesced) {
1788     // repair BOT
1789     _bt.single_block(chunk, size);
1790   }
1791   addChunkToFreeLists(chunk, size);
1792 }
1793 
1794 // We _must_ find the purported chunk on our free lists;
1795 // we assert if we don't.
1796 void
1797 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1798   size_t size = fc->size();
1799   assert_locked();
1800   debug_only(verifyFreeLists());
1801   if (size < SmallForDictionary) {
1802     removeChunkFromIndexedFreeList(fc);
1803   } else {
1804     removeChunkFromDictionary(fc);
1805   }
1806   _bt.verify_single_block((HeapWord*)fc, size);
1807   debug_only(verifyFreeLists());
1808 }
1809 
1810 void
1811 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1812   size_t size = fc->size();
1813   assert_locked();
1814   assert(fc != NULL, "null chunk");
1815   _bt.verify_single_block((HeapWord*)fc, size);
1816   _dictionary->remove_chunk(fc);
1817   // adjust _unallocated_block upward, as necessary
1818   _bt.allocated((HeapWord*)fc, size);
1819 }
1820 
1821 void
1822 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1823   assert_locked();
1824   size_t size = fc->size();
1825   _bt.verify_single_block((HeapWord*)fc, size);
1826   NOT_PRODUCT(
1827     if (FLSVerifyIndexTable) {
1828       verifyIndexedFreeList(size);
1829     }
1830   )
1831   _indexedFreeList[size].remove_chunk(fc);
1832   NOT_PRODUCT(
1833     if (FLSVerifyIndexTable) {
1834       verifyIndexedFreeList(size);
1835     }
1836   )
1837 }
1838 
1839 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1840   /* A hint is the next larger size that has a surplus.
1841      Start search at a size large enough to guarantee that
1842      the excess is >= MIN_CHUNK. */
1843   size_t start = align_object_size(numWords + MinChunkSize);
1844   if (start < IndexSetSize) {
1845     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1846     size_t    hint = _indexedFreeList[start].hint();
1847     while (hint < IndexSetSize) {
1848       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1849       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1850       if (fl->surplus() > 0 && fl->head() != NULL) {
1851         // Found a list with surplus, reset original hint
1852         // and split out a free chunk which is returned.
1853         _indexedFreeList[start].set_hint(hint);
1854         FreeChunk* res = getFromListGreater(fl, numWords);
1855         assert(res == NULL || res->is_free(),
1856           "Should be returning a free chunk");
1857         return res;
1858       }
1859       hint = fl->hint(); /* keep looking */
1860     }
1861     /* None found. */
1862     it[start].set_hint(IndexSetSize);
1863   }
1864   return NULL;
1865 }
1866 
1867 /* Requires fl->size >= numWords + MinChunkSize */
1868 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1869   size_t numWords) {
1870   FreeChunk *curr = fl->head();
1871   size_t oldNumWords = curr->size();
1872   assert(numWords >= MinChunkSize, "Word size is too small");
1873   assert(curr != NULL, "List is empty");
1874   assert(oldNumWords >= numWords + MinChunkSize,
1875         "Size of chunks in the list is too small");
1876 
1877   fl->remove_chunk(curr);
1878   // recorded indirectly by splitChunkAndReturnRemainder -
1879   // smallSplit(oldNumWords, numWords);
1880   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1881   // Does anything have to be done for the remainder in terms of
1882   // fixing the card table?
1883   assert(new_chunk == NULL || new_chunk->is_free(),
1884     "Should be returning a free chunk");
1885   return new_chunk;
1886 }
1887 
1888 FreeChunk*
1889 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1890   size_t new_size) {
1891   assert_locked();
1892   size_t size = chunk->size();
1893   assert(size > new_size, "Split from a smaller block?");
1894   assert(is_aligned(chunk), "alignment problem");
1895   assert(size == adjustObjectSize(size), "alignment problem");
1896   size_t rem_sz = size - new_size;
1897   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1898   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1899   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1900   assert(is_aligned(ffc), "alignment problem");
1901   ffc->set_size(rem_sz);
1902   ffc->link_next(NULL);
1903   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1904   // Above must occur before BOT is updated below.
1905   // adjust block offset table
1906   OrderAccess::storestore();
1907   assert(chunk->is_free() && ffc->is_free(), "Error");
1908   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1909   if (rem_sz < SmallForDictionary) {
1910     bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0);
1911     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1912     assert(!is_par ||
1913            (GenCollectedHeap::heap()->n_par_threads() ==
1914             GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch");
1915     returnChunkToFreeList(ffc);
1916     split(size, rem_sz);
1917     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1918   } else {
1919     returnChunkToDictionary(ffc);
1920     split(size, rem_sz);
1921   }
1922   chunk->set_size(new_size);
1923   return chunk;
1924 }
1925 
1926 void
1927 CompactibleFreeListSpace::sweep_completed() {
1928   // Now that space is probably plentiful, refill linear
1929   // allocation blocks as needed.
1930   refillLinearAllocBlocksIfNeeded();
1931 }
1932 
1933 void
1934 CompactibleFreeListSpace::gc_prologue() {
1935   assert_locked();
1936   if (PrintFLSStatistics != 0) {
1937     gclog_or_tty->print("Before GC:\n");
1938     reportFreeListStatistics();
1939   }
1940   refillLinearAllocBlocksIfNeeded();
1941 }
1942 
1943 void
1944 CompactibleFreeListSpace::gc_epilogue() {
1945   assert_locked();
1946   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1947     if (_smallLinearAllocBlock._word_size == 0)
1948       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1949   }
1950   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1951   _promoInfo.stopTrackingPromotions();
1952   repairLinearAllocationBlocks();
1953   // Print Space's stats
1954   if (PrintFLSStatistics != 0) {
1955     gclog_or_tty->print("After GC:\n");
1956     reportFreeListStatistics();
1957   }
1958 }
1959 
1960 // Iteration support, mostly delegated from a CMS generation
1961 
1962 void CompactibleFreeListSpace::save_marks() {
1963   assert(Thread::current()->is_VM_thread(),
1964          "Global variable should only be set when single-threaded");
1965   // Mark the "end" of the used space at the time of this call;
1966   // note, however, that promoted objects from this point
1967   // on are tracked in the _promoInfo below.
1968   set_saved_mark_word(unallocated_block());
1969 #ifdef ASSERT
1970   // Check the sanity of save_marks() etc.
1971   MemRegion ur    = used_region();
1972   MemRegion urasm = used_region_at_save_marks();
1973   assert(ur.contains(urasm),
1974          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1975                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1976                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1977 #endif
1978   // inform allocator that promotions should be tracked.
1979   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1980   _promoInfo.startTrackingPromotions();
1981 }
1982 
1983 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1984   assert(_promoInfo.tracking(), "No preceding save_marks?");
1985   assert(GenCollectedHeap::heap()->n_par_threads() == 0,
1986          "Shouldn't be called if using parallel gc.");
1987   return _promoInfo.noPromotions();
1988 }
1989 
1990 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1991                                                                             \
1992 void CompactibleFreeListSpace::                                             \
1993 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1994   assert(GenCollectedHeap::heap()->n_par_threads() == 0,                    \
1995          "Shouldn't be called (yet) during parallel part of gc.");          \
1996   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1997   /*                                                                        \
1998    * This also restores any displaced headers and removes the elements from \
1999    * the iteration set as they are processed, so that we have a clean slate \
2000    * at the end of the iteration. Note, thus, that if new objects are       \
2001    * promoted as a result of the iteration they are iterated over as well.  \
2002    */                                                                       \
2003   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2004 }
2005 
2006 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2007 
2008 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2009   return _smallLinearAllocBlock._word_size == 0;
2010 }
2011 
2012 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2013   // Fix up linear allocation blocks to look like free blocks
2014   repairLinearAllocBlock(&_smallLinearAllocBlock);
2015 }
2016 
2017 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2018   assert_locked();
2019   if (blk->_ptr != NULL) {
2020     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2021            "Minimum block size requirement");
2022     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2023     fc->set_size(blk->_word_size);
2024     fc->link_prev(NULL);   // mark as free
2025     fc->dontCoalesce();
2026     assert(fc->is_free(), "just marked it free");
2027     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2028   }
2029 }
2030 
2031 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2032   assert_locked();
2033   if (_smallLinearAllocBlock._ptr == NULL) {
2034     assert(_smallLinearAllocBlock._word_size == 0,
2035       "Size of linAB should be zero if the ptr is NULL");
2036     // Reset the linAB refill and allocation size limit.
2037     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2038   }
2039   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2040 }
2041 
2042 void
2043 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2044   assert_locked();
2045   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2046          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2047          "blk invariant");
2048   if (blk->_ptr == NULL) {
2049     refillLinearAllocBlock(blk);
2050   }
2051   if (PrintMiscellaneous && Verbose) {
2052     if (blk->_word_size == 0) {
2053       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2054     }
2055   }
2056 }
2057 
2058 void
2059 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2060   assert_locked();
2061   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2062          "linear allocation block should be empty");
2063   FreeChunk* fc;
2064   if (blk->_refillSize < SmallForDictionary &&
2065       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2066     // A linAB's strategy might be to use small sizes to reduce
2067     // fragmentation but still get the benefits of allocation from a
2068     // linAB.
2069   } else {
2070     fc = getChunkFromDictionary(blk->_refillSize);
2071   }
2072   if (fc != NULL) {
2073     blk->_ptr  = (HeapWord*)fc;
2074     blk->_word_size = fc->size();
2075     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2076   }
2077 }
2078 
2079 // Support for concurrent collection policy decisions.
2080 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2081   // In the future we might want to add in fragmentation stats --
2082   // including erosion of the "mountain" into this decision as well.
2083   return !adaptive_freelists() && linearAllocationWouldFail();
2084 }
2085 
2086 // Support for compaction
2087 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2088   scan_and_forward(this, cp);
2089   // Prepare_for_compaction() uses the space between live objects
2090   // so that later phase can skip dead space quickly.  So verification
2091   // of the free lists doesn't work after.
2092 }
2093 
2094 void CompactibleFreeListSpace::adjust_pointers() {
2095   // In other versions of adjust_pointers(), a bail out
2096   // based on the amount of live data in the generation
2097   // (i.e., if 0, bail out) may be used.
2098   // Cannot test used() == 0 here because the free lists have already
2099   // been mangled by the compaction.
2100 
2101   scan_and_adjust_pointers(this);
2102   // See note about verification in prepare_for_compaction().
2103 }
2104 
2105 void CompactibleFreeListSpace::compact() {
2106   scan_and_compact(this);
2107 }
2108 
2109 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2110 // where fbs is free block sizes
2111 double CompactibleFreeListSpace::flsFrag() const {
2112   size_t itabFree = totalSizeInIndexedFreeLists();
2113   double frag = 0.0;
2114   size_t i;
2115 
2116   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2117     double sz  = i;
2118     frag      += _indexedFreeList[i].count() * (sz * sz);
2119   }
2120 
2121   double totFree = itabFree +
2122                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2123   if (totFree > 0) {
2124     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2125             (totFree * totFree));
2126     frag = (double)1.0  - frag;
2127   } else {
2128     assert(frag == 0.0, "Follows from totFree == 0");
2129   }
2130   return frag;
2131 }
2132 
2133 void CompactibleFreeListSpace::beginSweepFLCensus(
2134   float inter_sweep_current,
2135   float inter_sweep_estimate,
2136   float intra_sweep_estimate) {
2137   assert_locked();
2138   size_t i;
2139   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2140     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2141     if (PrintFLSStatistics > 1) {
2142       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2143     }
2144     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2145     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2146     fl->set_before_sweep(fl->count());
2147     fl->set_bfr_surp(fl->surplus());
2148   }
2149   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2150                                     inter_sweep_current,
2151                                     inter_sweep_estimate,
2152                                     intra_sweep_estimate);
2153 }
2154 
2155 void CompactibleFreeListSpace::setFLSurplus() {
2156   assert_locked();
2157   size_t i;
2158   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2159     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2160     fl->set_surplus(fl->count() -
2161                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2162   }
2163 }
2164 
2165 void CompactibleFreeListSpace::setFLHints() {
2166   assert_locked();
2167   size_t i;
2168   size_t h = IndexSetSize;
2169   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2170     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2171     fl->set_hint(h);
2172     if (fl->surplus() > 0) {
2173       h = i;
2174     }
2175   }
2176 }
2177 
2178 void CompactibleFreeListSpace::clearFLCensus() {
2179   assert_locked();
2180   size_t i;
2181   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2182     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2183     fl->set_prev_sweep(fl->count());
2184     fl->set_coal_births(0);
2185     fl->set_coal_deaths(0);
2186     fl->set_split_births(0);
2187     fl->set_split_deaths(0);
2188   }
2189 }
2190 
2191 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2192   if (PrintFLSStatistics > 0) {
2193     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2194     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2195                            p2i(largestAddr));
2196   }
2197   setFLSurplus();
2198   setFLHints();
2199   if (PrintGC && PrintFLSCensus > 0) {
2200     printFLCensus(sweep_count);
2201   }
2202   clearFLCensus();
2203   assert_locked();
2204   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2205 }
2206 
2207 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2208   if (size < SmallForDictionary) {
2209     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2210     return (fl->coal_desired() < 0) ||
2211            ((int)fl->count() > fl->coal_desired());
2212   } else {
2213     return dictionary()->coal_dict_over_populated(size);
2214   }
2215 }
2216 
2217 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2218   assert(size < SmallForDictionary, "Size too large for indexed list");
2219   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2220   fl->increment_coal_births();
2221   fl->increment_surplus();
2222 }
2223 
2224 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2225   assert(size < SmallForDictionary, "Size too large for indexed list");
2226   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2227   fl->increment_coal_deaths();
2228   fl->decrement_surplus();
2229 }
2230 
2231 void CompactibleFreeListSpace::coalBirth(size_t size) {
2232   if (size  < SmallForDictionary) {
2233     smallCoalBirth(size);
2234   } else {
2235     dictionary()->dict_census_update(size,
2236                                    false /* split */,
2237                                    true /* birth */);
2238   }
2239 }
2240 
2241 void CompactibleFreeListSpace::coalDeath(size_t size) {
2242   if(size  < SmallForDictionary) {
2243     smallCoalDeath(size);
2244   } else {
2245     dictionary()->dict_census_update(size,
2246                                    false /* split */,
2247                                    false /* birth */);
2248   }
2249 }
2250 
2251 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2252   assert(size < SmallForDictionary, "Size too large for indexed list");
2253   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2254   fl->increment_split_births();
2255   fl->increment_surplus();
2256 }
2257 
2258 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2259   assert(size < SmallForDictionary, "Size too large for indexed list");
2260   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2261   fl->increment_split_deaths();
2262   fl->decrement_surplus();
2263 }
2264 
2265 void CompactibleFreeListSpace::split_birth(size_t size) {
2266   if (size  < SmallForDictionary) {
2267     smallSplitBirth(size);
2268   } else {
2269     dictionary()->dict_census_update(size,
2270                                    true /* split */,
2271                                    true /* birth */);
2272   }
2273 }
2274 
2275 void CompactibleFreeListSpace::splitDeath(size_t size) {
2276   if (size  < SmallForDictionary) {
2277     smallSplitDeath(size);
2278   } else {
2279     dictionary()->dict_census_update(size,
2280                                    true /* split */,
2281                                    false /* birth */);
2282   }
2283 }
2284 
2285 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2286   size_t to2 = from - to1;
2287   splitDeath(from);
2288   split_birth(to1);
2289   split_birth(to2);
2290 }
2291 
2292 void CompactibleFreeListSpace::print() const {
2293   print_on(tty);
2294 }
2295 
2296 void CompactibleFreeListSpace::prepare_for_verify() {
2297   assert_locked();
2298   repairLinearAllocationBlocks();
2299   // Verify that the SpoolBlocks look like free blocks of
2300   // appropriate sizes... To be done ...
2301 }
2302 
2303 class VerifyAllBlksClosure: public BlkClosure {
2304  private:
2305   const CompactibleFreeListSpace* _sp;
2306   const MemRegion                 _span;
2307   HeapWord*                       _last_addr;
2308   size_t                          _last_size;
2309   bool                            _last_was_obj;
2310   bool                            _last_was_live;
2311 
2312  public:
2313   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2314     MemRegion span) :  _sp(sp), _span(span),
2315                        _last_addr(NULL), _last_size(0),
2316                        _last_was_obj(false), _last_was_live(false) { }
2317 
2318   virtual size_t do_blk(HeapWord* addr) {
2319     size_t res;
2320     bool   was_obj  = false;
2321     bool   was_live = false;
2322     if (_sp->block_is_obj(addr)) {
2323       was_obj = true;
2324       oop p = oop(addr);
2325       guarantee(p->is_oop(), "Should be an oop");
2326       res = _sp->adjustObjectSize(p->size());
2327       if (_sp->obj_is_alive(addr)) {
2328         was_live = true;
2329         p->verify();
2330       }
2331     } else {
2332       FreeChunk* fc = (FreeChunk*)addr;
2333       res = fc->size();
2334       if (FLSVerifyLists && !fc->cantCoalesce()) {
2335         guarantee(_sp->verify_chunk_in_free_list(fc),
2336                   "Chunk should be on a free list");
2337       }
2338     }
2339     if (res == 0) {
2340       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2341       gclog_or_tty->print_cr(
2342         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2343         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2344         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2345         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2346       _sp->print_on(gclog_or_tty);
2347       guarantee(false, "Seppuku!");
2348     }
2349     _last_addr = addr;
2350     _last_size = res;
2351     _last_was_obj  = was_obj;
2352     _last_was_live = was_live;
2353     return res;
2354   }
2355 };
2356 
2357 class VerifyAllOopsClosure: public OopClosure {
2358  private:
2359   const CMSCollector*             _collector;
2360   const CompactibleFreeListSpace* _sp;
2361   const MemRegion                 _span;
2362   const bool                      _past_remark;
2363   const CMSBitMap*                _bit_map;
2364 
2365  protected:
2366   void do_oop(void* p, oop obj) {
2367     if (_span.contains(obj)) { // the interior oop points into CMS heap
2368       if (!_span.contains(p)) { // reference from outside CMS heap
2369         // Should be a valid object; the first disjunct below allows
2370         // us to sidestep an assertion in block_is_obj() that insists
2371         // that p be in _sp. Note that several generations (and spaces)
2372         // are spanned by _span (CMS heap) above.
2373         guarantee(!_sp->is_in_reserved(obj) ||
2374                   _sp->block_is_obj((HeapWord*)obj),
2375                   "Should be an object");
2376         guarantee(obj->is_oop(), "Should be an oop");
2377         obj->verify();
2378         if (_past_remark) {
2379           // Remark has been completed, the object should be marked
2380           _bit_map->isMarked((HeapWord*)obj);
2381         }
2382       } else { // reference within CMS heap
2383         if (_past_remark) {
2384           // Remark has been completed -- so the referent should have
2385           // been marked, if referring object is.
2386           if (_bit_map->isMarked(_collector->block_start(p))) {
2387             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2388           }
2389         }
2390       }
2391     } else if (_sp->is_in_reserved(p)) {
2392       // the reference is from FLS, and points out of FLS
2393       guarantee(obj->is_oop(), "Should be an oop");
2394       obj->verify();
2395     }
2396   }
2397 
2398   template <class T> void do_oop_work(T* p) {
2399     T heap_oop = oopDesc::load_heap_oop(p);
2400     if (!oopDesc::is_null(heap_oop)) {
2401       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2402       do_oop(p, obj);
2403     }
2404   }
2405 
2406  public:
2407   VerifyAllOopsClosure(const CMSCollector* collector,
2408     const CompactibleFreeListSpace* sp, MemRegion span,
2409     bool past_remark, CMSBitMap* bit_map) :
2410     _collector(collector), _sp(sp), _span(span),
2411     _past_remark(past_remark), _bit_map(bit_map) { }
2412 
2413   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2414   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2415 };
2416 
2417 void CompactibleFreeListSpace::verify() const {
2418   assert_lock_strong(&_freelistLock);
2419   verify_objects_initialized();
2420   MemRegion span = _collector->_span;
2421   bool past_remark = (_collector->abstract_state() ==
2422                       CMSCollector::Sweeping);
2423 
2424   ResourceMark rm;
2425   HandleMark  hm;
2426 
2427   // Check integrity of CFL data structures
2428   _promoInfo.verify();
2429   _dictionary->verify();
2430   if (FLSVerifyIndexTable) {
2431     verifyIndexedFreeLists();
2432   }
2433   // Check integrity of all objects and free blocks in space
2434   {
2435     VerifyAllBlksClosure cl(this, span);
2436     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2437   }
2438   // Check that all references in the heap to FLS
2439   // are to valid objects in FLS or that references in
2440   // FLS are to valid objects elsewhere in the heap
2441   if (FLSVerifyAllHeapReferences)
2442   {
2443     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2444       _collector->markBitMap());
2445     CollectedHeap* ch = Universe::heap();
2446 
2447     // Iterate over all oops in the heap. Uses the _no_header version
2448     // since we are not interested in following the klass pointers.
2449     ch->oop_iterate_no_header(&cl);
2450   }
2451 
2452   if (VerifyObjectStartArray) {
2453     // Verify the block offset table
2454     _bt.verify();
2455   }
2456 }
2457 
2458 #ifndef PRODUCT
2459 void CompactibleFreeListSpace::verifyFreeLists() const {
2460   if (FLSVerifyLists) {
2461     _dictionary->verify();
2462     verifyIndexedFreeLists();
2463   } else {
2464     if (FLSVerifyDictionary) {
2465       _dictionary->verify();
2466     }
2467     if (FLSVerifyIndexTable) {
2468       verifyIndexedFreeLists();
2469     }
2470   }
2471 }
2472 #endif
2473 
2474 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2475   size_t i = 0;
2476   for (; i < IndexSetStart; i++) {
2477     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2478   }
2479   for (; i < IndexSetSize; i++) {
2480     verifyIndexedFreeList(i);
2481   }
2482 }
2483 
2484 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2485   FreeChunk* fc   =  _indexedFreeList[size].head();
2486   FreeChunk* tail =  _indexedFreeList[size].tail();
2487   size_t    num = _indexedFreeList[size].count();
2488   size_t      n = 0;
2489   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2490             "Slot should have been empty");
2491   for (; fc != NULL; fc = fc->next(), n++) {
2492     guarantee(fc->size() == size, "Size inconsistency");
2493     guarantee(fc->is_free(), "!free?");
2494     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2495     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2496   }
2497   guarantee(n == num, "Incorrect count");
2498 }
2499 
2500 #ifndef PRODUCT
2501 void CompactibleFreeListSpace::check_free_list_consistency() const {
2502   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2503     "Some sizes can't be allocated without recourse to"
2504     " linear allocation buffers");
2505   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2506     "else MIN_TREE_CHUNK_SIZE is wrong");
2507   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2508   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2509 }
2510 #endif
2511 
2512 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2513   assert_lock_strong(&_freelistLock);
2514   AdaptiveFreeList<FreeChunk> total;
2515   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2516   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2517   size_t total_free = 0;
2518   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2519     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2520     total_free += fl->count() * fl->size();
2521     if (i % (40*IndexSetStride) == 0) {
2522       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2523     }
2524     fl->print_on(gclog_or_tty);
2525     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2526     total.set_surplus(    total.surplus()     + fl->surplus()    );
2527     total.set_desired(    total.desired()     + fl->desired()    );
2528     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2529     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2530     total.set_count(      total.count()       + fl->count()      );
2531     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2532     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2533     total.set_split_births(total.split_births() + fl->split_births());
2534     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2535   }
2536   total.print_on(gclog_or_tty, "TOTAL");
2537   gclog_or_tty->print_cr("Total free in indexed lists "
2538                          SIZE_FORMAT " words", total_free);
2539   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2540     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2541             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2542     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2543   _dictionary->print_dict_census();
2544 }
2545 
2546 ///////////////////////////////////////////////////////////////////////////
2547 // CFLS_LAB
2548 ///////////////////////////////////////////////////////////////////////////
2549 
2550 #define VECTOR_257(x)                                                                                  \
2551   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2552   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2559      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2560      x }
2561 
2562 // Initialize with default setting for CMS, _not_
2563 // generic OldPLABSize, whose static default is different; if overridden at the
2564 // command-line, this will get reinitialized via a call to
2565 // modify_initialization() below.
2566 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2567   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2568 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2569 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2570 
2571 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2572   _cfls(cfls)
2573 {
2574   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2575   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2576        i < CompactibleFreeListSpace::IndexSetSize;
2577        i += CompactibleFreeListSpace::IndexSetStride) {
2578     _indexedFreeList[i].set_size(i);
2579     _num_blocks[i] = 0;
2580   }
2581 }
2582 
2583 static bool _CFLS_LAB_modified = false;
2584 
2585 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2586   assert(!_CFLS_LAB_modified, "Call only once");
2587   _CFLS_LAB_modified = true;
2588   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2589        i < CompactibleFreeListSpace::IndexSetSize;
2590        i += CompactibleFreeListSpace::IndexSetStride) {
2591     _blocks_to_claim[i].modify(n, wt, true /* force */);
2592   }
2593 }
2594 
2595 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2596   FreeChunk* res;
2597   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2598   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2599     // This locking manages sync with other large object allocations.
2600     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2601                     Mutex::_no_safepoint_check_flag);
2602     res = _cfls->getChunkFromDictionaryExact(word_sz);
2603     if (res == NULL) return NULL;
2604   } else {
2605     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2606     if (fl->count() == 0) {
2607       // Attempt to refill this local free list.
2608       get_from_global_pool(word_sz, fl);
2609       // If it didn't work, give up.
2610       if (fl->count() == 0) return NULL;
2611     }
2612     res = fl->get_chunk_at_head();
2613     assert(res != NULL, "Why was count non-zero?");
2614   }
2615   res->markNotFree();
2616   assert(!res->is_free(), "shouldn't be marked free");
2617   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2618   // mangle a just allocated object with a distinct pattern.
2619   debug_only(res->mangleAllocated(word_sz));
2620   return (HeapWord*)res;
2621 }
2622 
2623 // Get a chunk of blocks of the right size and update related
2624 // book-keeping stats
2625 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2626   // Get the #blocks we want to claim
2627   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2628   assert(n_blks > 0, "Error");
2629   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2630   // In some cases, when the application has a phase change,
2631   // there may be a sudden and sharp shift in the object survival
2632   // profile, and updating the counts at the end of a scavenge
2633   // may not be quick enough, giving rise to large scavenge pauses
2634   // during these phase changes. It is beneficial to detect such
2635   // changes on-the-fly during a scavenge and avoid such a phase-change
2636   // pothole. The following code is a heuristic attempt to do that.
2637   // It is protected by a product flag until we have gained
2638   // enough experience with this heuristic and fine-tuned its behavior.
2639   // WARNING: This might increase fragmentation if we overreact to
2640   // small spikes, so some kind of historical smoothing based on
2641   // previous experience with the greater reactivity might be useful.
2642   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2643   // default.
2644   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2645     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2646     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2647     n_blks = MIN2(n_blks, CMSOldPLABMax);
2648   }
2649   assert(n_blks > 0, "Error");
2650   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2651   // Update stats table entry for this block size
2652   _num_blocks[word_sz] += fl->count();
2653 }
2654 
2655 void CFLS_LAB::compute_desired_plab_size() {
2656   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2657        i < CompactibleFreeListSpace::IndexSetSize;
2658        i += CompactibleFreeListSpace::IndexSetStride) {
2659     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2660            "Counter inconsistency");
2661     if (_global_num_workers[i] > 0) {
2662       // Need to smooth wrt historical average
2663       if (ResizeOldPLAB) {
2664         _blocks_to_claim[i].sample(
2665           MAX2(CMSOldPLABMin,
2666           MIN2(CMSOldPLABMax,
2667                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2668       }
2669       // Reset counters for next round
2670       _global_num_workers[i] = 0;
2671       _global_num_blocks[i] = 0;
2672       if (PrintOldPLAB) {
2673         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2674                                i, (size_t)_blocks_to_claim[i].average());
2675       }
2676     }
2677   }
2678 }
2679 
2680 // If this is changed in the future to allow parallel
2681 // access, one would need to take the FL locks and,
2682 // depending on how it is used, stagger access from
2683 // parallel threads to reduce contention.
2684 void CFLS_LAB::retire(int tid) {
2685   // We run this single threaded with the world stopped;
2686   // so no need for locks and such.
2687   NOT_PRODUCT(Thread* t = Thread::current();)
2688   assert(Thread::current()->is_VM_thread(), "Error");
2689   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2690        i < CompactibleFreeListSpace::IndexSetSize;
2691        i += CompactibleFreeListSpace::IndexSetStride) {
2692     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2693            "Can't retire more than what we obtained");
2694     if (_num_blocks[i] > 0) {
2695       size_t num_retire =  _indexedFreeList[i].count();
2696       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2697       {
2698         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2699         //                Mutex::_no_safepoint_check_flag);
2700 
2701         // Update globals stats for num_blocks used
2702         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2703         _global_num_workers[i]++;
2704         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2705         if (num_retire > 0) {
2706           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2707           // Reset this list.
2708           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2709           _indexedFreeList[i].set_size(i);
2710         }
2711       }
2712       if (PrintOldPLAB) {
2713         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2714                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2715       }
2716       // Reset stats for next round
2717       _num_blocks[i]         = 0;
2718     }
2719   }
2720 }
2721 
2722 // Used by par_get_chunk_of_blocks() for the chunks from the
2723 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2724 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2725 // to the free list "fl".  "n" is the maximum number of chunks to
2726 // be added to "fl".
2727 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2728 
2729   // We'll try all multiples of word_sz in the indexed set, starting with
2730   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2731   // then try getting a big chunk and splitting it.
2732   {
2733     bool found;
2734     int  k;
2735     size_t cur_sz;
2736     for (k = 1, cur_sz = k * word_sz, found = false;
2737          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2738          (CMSSplitIndexedFreeListBlocks || k <= 1);
2739          k++, cur_sz = k * word_sz) {
2740       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2741       fl_for_cur_sz.set_size(cur_sz);
2742       {
2743         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2744                         Mutex::_no_safepoint_check_flag);
2745         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2746         if (gfl->count() != 0) {
2747           // nn is the number of chunks of size cur_sz that
2748           // we'd need to split k-ways each, in order to create
2749           // "n" chunks of size word_sz each.
2750           const size_t nn = MAX2(n/k, (size_t)1);
2751           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2752           found = true;
2753           if (k > 1) {
2754             // Update split death stats for the cur_sz-size blocks list:
2755             // we increment the split death count by the number of blocks
2756             // we just took from the cur_sz-size blocks list and which
2757             // we will be splitting below.
2758             ssize_t deaths = gfl->split_deaths() +
2759                              fl_for_cur_sz.count();
2760             gfl->set_split_deaths(deaths);
2761           }
2762         }
2763       }
2764       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2765       if (found) {
2766         if (k == 1) {
2767           fl->prepend(&fl_for_cur_sz);
2768         } else {
2769           // Divide each block on fl_for_cur_sz up k ways.
2770           FreeChunk* fc;
2771           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2772             // Must do this in reverse order, so that anybody attempting to
2773             // access the main chunk sees it as a single free block until we
2774             // change it.
2775             size_t fc_size = fc->size();
2776             assert(fc->is_free(), "Error");
2777             for (int i = k-1; i >= 0; i--) {
2778               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2779               assert((i != 0) ||
2780                         ((fc == ffc) && ffc->is_free() &&
2781                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2782                         "Counting error");
2783               ffc->set_size(word_sz);
2784               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2785               ffc->link_next(NULL);
2786               // Above must occur before BOT is updated below.
2787               OrderAccess::storestore();
2788               // splitting from the right, fc_size == i * word_sz
2789               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2790               fc_size -= word_sz;
2791               assert(fc_size == i*word_sz, "Error");
2792               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2793               _bt.verify_single_block((HeapWord*)fc, fc_size);
2794               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2795               // Push this on "fl".
2796               fl->return_chunk_at_head(ffc);
2797             }
2798             // TRAP
2799             assert(fl->tail()->next() == NULL, "List invariant.");
2800           }
2801         }
2802         // Update birth stats for this block size.
2803         size_t num = fl->count();
2804         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2805                         Mutex::_no_safepoint_check_flag);
2806         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2807         _indexedFreeList[word_sz].set_split_births(births);
2808         return true;
2809       }
2810     }
2811     return found;
2812   }
2813 }
2814 
2815 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2816 
2817   FreeChunk* fc = NULL;
2818   FreeChunk* rem_fc = NULL;
2819   size_t rem;
2820   {
2821     MutexLockerEx x(parDictionaryAllocLock(),
2822                     Mutex::_no_safepoint_check_flag);
2823     while (n > 0) {
2824       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2825                                   FreeBlockDictionary<FreeChunk>::atLeast);
2826       if (fc != NULL) {
2827         break;
2828       } else {
2829         n--;
2830       }
2831     }
2832     if (fc == NULL) return NULL;
2833     // Otherwise, split up that block.
2834     assert((ssize_t)n >= 1, "Control point invariant");
2835     assert(fc->is_free(), "Error: should be a free block");
2836     _bt.verify_single_block((HeapWord*)fc, fc->size());
2837     const size_t nn = fc->size() / word_sz;
2838     n = MIN2(nn, n);
2839     assert((ssize_t)n >= 1, "Control point invariant");
2840     rem = fc->size() - n * word_sz;
2841     // If there is a remainder, and it's too small, allocate one fewer.
2842     if (rem > 0 && rem < MinChunkSize) {
2843       n--; rem += word_sz;
2844     }
2845     // Note that at this point we may have n == 0.
2846     assert((ssize_t)n >= 0, "Control point invariant");
2847 
2848     // If n is 0, the chunk fc that was found is not large
2849     // enough to leave a viable remainder.  We are unable to
2850     // allocate even one block.  Return fc to the
2851     // dictionary and return, leaving "fl" empty.
2852     if (n == 0) {
2853       returnChunkToDictionary(fc);
2854       return NULL;
2855     }
2856 
2857     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2858     dictionary()->dict_census_update(fc->size(),
2859                                      true /*split*/,
2860                                      false /*birth*/);
2861 
2862     // First return the remainder, if any.
2863     // Note that we hold the lock until we decide if we're going to give
2864     // back the remainder to the dictionary, since a concurrent allocation
2865     // may otherwise see the heap as empty.  (We're willing to take that
2866     // hit if the block is a small block.)
2867     if (rem > 0) {
2868       size_t prefix_size = n * word_sz;
2869       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2870       rem_fc->set_size(rem);
2871       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2872       rem_fc->link_next(NULL);
2873       // Above must occur before BOT is updated below.
2874       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2875       OrderAccess::storestore();
2876       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2877       assert(fc->is_free(), "Error");
2878       fc->set_size(prefix_size);
2879       if (rem >= IndexSetSize) {
2880         returnChunkToDictionary(rem_fc);
2881         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2882         rem_fc = NULL;
2883       }
2884       // Otherwise, return it to the small list below.
2885     }
2886   }
2887   if (rem_fc != NULL) {
2888     MutexLockerEx x(_indexedFreeListParLocks[rem],
2889                     Mutex::_no_safepoint_check_flag);
2890     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2891     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2892     smallSplitBirth(rem);
2893   }
2894   assert(n * word_sz == fc->size(),
2895     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2896     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2897     fc->size(), n, word_sz));
2898   return fc;
2899 }
2900 
2901 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2902 
2903   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2904 
2905   if (fc == NULL) {
2906     return;
2907   }
2908 
2909   size_t n = fc->size() / word_sz;
2910 
2911   assert((ssize_t)n > 0, "Consistency");
2912   // Now do the splitting up.
2913   // Must do this in reverse order, so that anybody attempting to
2914   // access the main chunk sees it as a single free block until we
2915   // change it.
2916   size_t fc_size = n * word_sz;
2917   // All but first chunk in this loop
2918   for (ssize_t i = n-1; i > 0; i--) {
2919     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2920     ffc->set_size(word_sz);
2921     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2922     ffc->link_next(NULL);
2923     // Above must occur before BOT is updated below.
2924     OrderAccess::storestore();
2925     // splitting from the right, fc_size == (n - i + 1) * wordsize
2926     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2927     fc_size -= word_sz;
2928     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2929     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2930     _bt.verify_single_block((HeapWord*)fc, fc_size);
2931     // Push this on "fl".
2932     fl->return_chunk_at_head(ffc);
2933   }
2934   // First chunk
2935   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2936   // The blocks above should show their new sizes before the first block below
2937   fc->set_size(word_sz);
2938   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2939   fc->link_next(NULL);
2940   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2941   _bt.verify_single_block((HeapWord*)fc, fc->size());
2942   fl->return_chunk_at_head(fc);
2943 
2944   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2945   {
2946     // Update the stats for this block size.
2947     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2948                     Mutex::_no_safepoint_check_flag);
2949     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2950     _indexedFreeList[word_sz].set_split_births(births);
2951     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2952     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2953   }
2954 
2955   // TRAP
2956   assert(fl->tail()->next() == NULL, "List invariant.");
2957 }
2958 
2959 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2960   assert(fl->count() == 0, "Precondition.");
2961   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2962          "Precondition");
2963 
2964   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2965     // Got it
2966     return;
2967   }
2968 
2969   // Otherwise, we'll split a block from the dictionary.
2970   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2971 }
2972 
2973 // Set up the space's par_seq_tasks structure for work claiming
2974 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2975 // XXX Need to suitably abstract and generalize this and the next
2976 // method into one.
2977 void
2978 CompactibleFreeListSpace::
2979 initialize_sequential_subtasks_for_rescan(int n_threads) {
2980   // The "size" of each task is fixed according to rescan_task_size.
2981   assert(n_threads > 0, "Unexpected n_threads argument");
2982   const size_t task_size = rescan_task_size();
2983   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2984   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2985   assert(n_tasks == 0 ||
2986          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2987           (used_region().start() + n_tasks*task_size >= used_region().end())),
2988          "n_tasks calculation incorrect");
2989   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2990   assert(!pst->valid(), "Clobbering existing data?");
2991   // Sets the condition for completion of the subtask (how many threads
2992   // need to finish in order to be done).
2993   pst->set_n_threads(n_threads);
2994   pst->set_n_tasks((int)n_tasks);
2995 }
2996 
2997 // Set up the space's par_seq_tasks structure for work claiming
2998 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2999 void
3000 CompactibleFreeListSpace::
3001 initialize_sequential_subtasks_for_marking(int n_threads,
3002                                            HeapWord* low) {
3003   // The "size" of each task is fixed according to rescan_task_size.
3004   assert(n_threads > 0, "Unexpected n_threads argument");
3005   const size_t task_size = marking_task_size();
3006   assert(task_size > CardTableModRefBS::card_size_in_words &&
3007          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3008          "Otherwise arithmetic below would be incorrect");
3009   MemRegion span = _gen->reserved();
3010   if (low != NULL) {
3011     if (span.contains(low)) {
3012       // Align low down to  a card boundary so that
3013       // we can use block_offset_careful() on span boundaries.
3014       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3015                                  CardTableModRefBS::card_size);
3016       // Clip span prefix at aligned_low
3017       span = span.intersection(MemRegion(aligned_low, span.end()));
3018     } else if (low > span.end()) {
3019       span = MemRegion(low, low);  // Null region
3020     } // else use entire span
3021   }
3022   assert(span.is_empty() ||
3023          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3024         "span should start at a card boundary");
3025   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3026   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3027   assert(n_tasks == 0 ||
3028          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3029           (span.start() + n_tasks*task_size >= span.end())),
3030          "n_tasks calculation incorrect");
3031   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3032   assert(!pst->valid(), "Clobbering existing data?");
3033   // Sets the condition for completion of the subtask (how many threads
3034   // need to finish in order to be done).
3035   pst->set_n_threads(n_threads);
3036   pst->set_n_tasks((int)n_tasks);
3037 }