1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  87                 Monitor::_safepoint_check_sometimes),
  88   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  89                           "CompactibleFreeListSpace._dict_par_lock", true,
  90                           Monitor::_safepoint_check_never),
  91   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSRescanMultiple),
  93   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  94                     CMSConcMarkMultiple),
  95   _collector(NULL),
  96   _preconsumptionDirtyCardClosure(NULL)
  97 {
  98   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  99          "FreeChunk is larger than expected");
 100   _bt.set_space(this);
 101   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 102   // We have all of "mr", all of which we place in the dictionary
 103   // as one big chunk. We'll need to decide here which of several
 104   // possible alternative dictionary implementations to use. For
 105   // now the choice is easy, since we have only one working
 106   // implementation, namely, the simple binary tree (splaying
 107   // temporarily disabled).
 108   switch (dictionaryChoice) {
 109     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 110       _dictionary = new AFLBinaryTreeDictionary(mr);
 111       break;
 112     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 113     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 114     default:
 115       warning("dictionaryChoice: selected option not understood; using"
 116               " default BinaryTreeDictionary implementation instead.");
 117   }
 118   assert(_dictionary != NULL, "CMS dictionary initialization");
 119   // The indexed free lists are initially all empty and are lazily
 120   // filled in on demand. Initialize the array elements to NULL.
 121   initializeIndexedFreeListArray();
 122 
 123   // Not using adaptive free lists assumes that allocation is first
 124   // from the linAB's.  Also a cms perm gen which can be compacted
 125   // has to have the klass's klassKlass allocated at a lower
 126   // address in the heap than the klass so that the klassKlass is
 127   // moved to its new location before the klass is moved.
 128   // Set the _refillSize for the linear allocation blocks
 129   if (!use_adaptive_freelists) {
 130     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 131                                            FreeBlockDictionary<FreeChunk>::atLeast);
 132     // The small linAB initially has all the space and will allocate
 133     // a chunk of any size.
 134     HeapWord* addr = (HeapWord*) fc;
 135     _smallLinearAllocBlock.set(addr, fc->size() ,
 136       1024*SmallForLinearAlloc, fc->size());
 137     // Note that _unallocated_block is not updated here.
 138     // Allocations from the linear allocation block should
 139     // update it.
 140   } else {
 141     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 142                                SmallForLinearAlloc);
 143   }
 144   // CMSIndexedFreeListReplenish should be at least 1
 145   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 146   _promoInfo.setSpace(this);
 147   if (UseCMSBestFit) {
 148     _fitStrategy = FreeBlockBestFitFirst;
 149   } else {
 150     _fitStrategy = FreeBlockStrategyNone;
 151   }
 152   check_free_list_consistency();
 153 
 154   // Initialize locks for parallel case.
 155   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 156     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 157                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 158     DEBUG_ONLY(
 159       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160     )
 161   }
 162   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163 }
 164 
 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
 166 // update the block offset table.  Removed initialize_threshold call because
 167 // CFLS does not use a block offset array for contiguous spaces.
 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 169                                     CompactPoint* cp, HeapWord* compact_top) {
 170   // q is alive
 171   // First check if we should switch compaction space
 172   assert(this == cp->space, "'this' should be current compaction space.");
 173   size_t compaction_max_size = pointer_delta(end(), compact_top);
 174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 175     "virtual adjustObjectSize_v() method is not correct");
 176   size_t adjusted_size = adjustObjectSize(size);
 177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 178          "no small fragments allowed");
 179   assert(minimum_free_block_size() == MinChunkSize,
 180          "for de-virtualized reference below");
 181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 182   if (adjusted_size + MinChunkSize > compaction_max_size &&
 183       adjusted_size != compaction_max_size) {
 184     do {
 185       // switch to next compaction space
 186       cp->space->set_compaction_top(compact_top);
 187       cp->space = cp->space->next_compaction_space();
 188       if (cp->space == NULL) {
 189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 190         assert(cp->gen != NULL, "compaction must succeed");
 191         cp->space = cp->gen->first_compaction_space();
 192         assert(cp->space != NULL, "generation must have a first compaction space");
 193       }
 194       compact_top = cp->space->bottom();
 195       cp->space->set_compaction_top(compact_top);
 196       // The correct adjusted_size may not be the same as that for this method
 197       // (i.e., cp->space may no longer be "this" so adjust the size again.
 198       // Use the virtual method which is not used above to save the virtual
 199       // dispatch.
 200       adjusted_size = cp->space->adjust_object_size_v(size);
 201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 203     } while (adjusted_size > compaction_max_size);
 204   }
 205 
 206   // store the forwarding pointer into the mark word
 207   if ((HeapWord*)q != compact_top) {
 208     q->forward_to(oop(compact_top));
 209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 210   } else {
 211     // if the object isn't moving we can just set the mark to the default
 212     // mark and handle it specially later on.
 213     q->init_mark();
 214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 215   }
 216 
 217   compact_top += adjusted_size;
 218 
 219   // we need to update the offset table so that the beginnings of objects can be
 220   // found during scavenge.  Note that we are updating the offset table based on
 221   // where the object will be once the compaction phase finishes.
 222 
 223   // Always call cross_threshold().  A contiguous space can only call it when
 224   // the compaction_top exceeds the current threshold but not for an
 225   // non-contiguous space.
 226   cp->threshold =
 227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 228   return compact_top;
 229 }
 230 
 231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 232 // and use of single_block instead of alloc_block.  The name here is not really
 233 // appropriate - maybe a more general name could be invented for both the
 234 // contiguous and noncontiguous spaces.
 235 
 236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 237   _bt.single_block(start, the_end);
 238   return end();
 239 }
 240 
 241 // Initialize them to NULL.
 242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 243   for (size_t i = 0; i < IndexSetSize; i++) {
 244     // Note that on platforms where objects are double word aligned,
 245     // the odd array elements are not used.  It is convenient, however,
 246     // to map directly from the object size to the array element.
 247     _indexedFreeList[i].reset(IndexSetSize);
 248     _indexedFreeList[i].set_size(i);
 249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 253   }
 254 }
 255 
 256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 257   for (size_t i = 1; i < IndexSetSize; i++) {
 258     assert(_indexedFreeList[i].size() == (size_t) i,
 259       "Indexed free list sizes are incorrect");
 260     _indexedFreeList[i].reset(IndexSetSize);
 261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 265   }
 266 }
 267 
 268 void CompactibleFreeListSpace::reset(MemRegion mr) {
 269   resetIndexedFreeListArray();
 270   dictionary()->reset();
 271   if (BlockOffsetArrayUseUnallocatedBlock) {
 272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 273     // Everything's allocated until proven otherwise.
 274     _bt.set_unallocated_block(end());
 275   }
 276   if (!mr.is_empty()) {
 277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 278     _bt.single_block(mr.start(), mr.word_size());
 279     FreeChunk* fc = (FreeChunk*) mr.start();
 280     fc->set_size(mr.word_size());
 281     if (mr.word_size() >= IndexSetSize ) {
 282       returnChunkToDictionary(fc);
 283     } else {
 284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 286     }
 287     coalBirth(mr.word_size());
 288   }
 289   _promoInfo.reset();
 290   _smallLinearAllocBlock._ptr = NULL;
 291   _smallLinearAllocBlock._word_size = 0;
 292 }
 293 
 294 void CompactibleFreeListSpace::reset_after_compaction() {
 295   // Reset the space to the new reality - one free chunk.
 296   MemRegion mr(compaction_top(), end());
 297   reset(mr);
 298   // Now refill the linear allocation block(s) if possible.
 299   if (_adaptive_freelists) {
 300     refillLinearAllocBlocksIfNeeded();
 301   } else {
 302     // Place as much of mr in the linAB as we can get,
 303     // provided it was big enough to go into the dictionary.
 304     FreeChunk* fc = dictionary()->find_largest_dict();
 305     if (fc != NULL) {
 306       assert(fc->size() == mr.word_size(),
 307              "Why was the chunk broken up?");
 308       removeChunkFromDictionary(fc);
 309       HeapWord* addr = (HeapWord*) fc;
 310       _smallLinearAllocBlock.set(addr, fc->size() ,
 311         1024*SmallForLinearAlloc, fc->size());
 312       // Note that _unallocated_block is not updated here.
 313     }
 314   }
 315 }
 316 
 317 // Walks the entire dictionary, returning a coterminal
 318 // chunk, if it exists. Use with caution since it involves
 319 // a potentially complete walk of a potentially large tree.
 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 321 
 322   assert_lock_strong(&_freelistLock);
 323 
 324   return dictionary()->find_chunk_ends_at(end());
 325 }
 326 
 327 
 328 #ifndef PRODUCT
 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 332   }
 333 }
 334 
 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 336   size_t sum = 0;
 337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 339   }
 340   return sum;
 341 }
 342 
 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 344   size_t count = 0;
 345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 346     debug_only(
 347       ssize_t total_list_count = 0;
 348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 349          fc = fc->next()) {
 350         total_list_count++;
 351       }
 352       assert(total_list_count ==  _indexedFreeList[i].count(),
 353         "Count in list is incorrect");
 354     )
 355     count += _indexedFreeList[i].count();
 356   }
 357   return count;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::totalCount() {
 361   size_t num = totalCountInIndexedFreeLists();
 362   num +=  dictionary()->total_count();
 363   if (_smallLinearAllocBlock._word_size != 0) {
 364     num++;
 365   }
 366   return num;
 367 }
 368 #endif
 369 
 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 371   FreeChunk* fc = (FreeChunk*) p;
 372   return fc->is_free();
 373 }
 374 
 375 size_t CompactibleFreeListSpace::used() const {
 376   return capacity() - free();
 377 }
 378 
 379 size_t CompactibleFreeListSpace::free() const {
 380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 381   // if you do this while the structures are in flux you
 382   // may get an approximate answer only; for instance
 383   // because there is concurrent allocation either
 384   // directly by mutators or for promotion during a GC.
 385   // It's "MT-safe", however, in the sense that you are guaranteed
 386   // not to crash and burn, for instance, because of walking
 387   // pointers that could disappear as you were walking them.
 388   // The approximation is because the various components
 389   // that are read below are not read atomically (and
 390   // further the computation of totalSizeInIndexedFreeLists()
 391   // is itself a non-atomic computation. The normal use of
 392   // this is during a resize operation at the end of GC
 393   // and at that time you are guaranteed to get the
 394   // correct actual value. However, for instance, this is
 395   // also read completely asynchronously by the "perf-sampler"
 396   // that supports jvmstat, and you are apt to see the values
 397   // flicker in such cases.
 398   assert(_dictionary != NULL, "No _dictionary?");
 399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 400           totalSizeInIndexedFreeLists() +
 401           _smallLinearAllocBlock._word_size) * HeapWordSize;
 402 }
 403 
 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 405   assert(_dictionary != NULL, "No _dictionary?");
 406   assert_locked();
 407   size_t res = _dictionary->max_chunk_size();
 408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 409                        (size_t) SmallForLinearAlloc - 1));
 410   // XXX the following could potentially be pretty slow;
 411   // should one, pessimistically for the rare cases when res
 412   // calculated above is less than IndexSetSize,
 413   // just return res calculated above? My reasoning was that
 414   // those cases will be so rare that the extra time spent doesn't
 415   // really matter....
 416   // Note: do not change the loop test i >= res + IndexSetStride
 417   // to i > res below, because i is unsigned and res may be zero.
 418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 419        i -= IndexSetStride) {
 420     if (_indexedFreeList[i].head() != NULL) {
 421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 422       return i;
 423     }
 424   }
 425   return res;
 426 }
 427 
 428 void LinearAllocBlock::print_on(outputStream* st) const {
 429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 431             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 432 }
 433 
 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 435   st->print_cr("COMPACTIBLE FREELIST SPACE");
 436   st->print_cr(" Space:");
 437   Space::print_on(st);
 438 
 439   st->print_cr("promoInfo:");
 440   _promoInfo.print_on(st);
 441 
 442   st->print_cr("_smallLinearAllocBlock");
 443   _smallLinearAllocBlock.print_on(st);
 444 
 445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 446 
 447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 449 }
 450 
 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 452 const {
 453   reportIndexedFreeListStatistics();
 454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 455   gclog_or_tty->print_cr("---------------------------");
 456   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 458     _indexedFreeList[i].print_on(gclog_or_tty);
 459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 460          fc = fc->next()) {
 461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 462                           p2i(fc), p2i((HeapWord*)fc + i),
 463                           fc->cantCoalesce() ? "\t CC" : "");
 464     }
 465   }
 466 }
 467 
 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 469 const {
 470   _promoInfo.print_on(st);
 471 }
 472 
 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 474 const {
 475   _dictionary->report_statistics();
 476   st->print_cr("Layout of Freelists in Tree");
 477   st->print_cr("---------------------------");
 478   _dictionary->print_free_lists(st);
 479 }
 480 
 481 class BlkPrintingClosure: public BlkClosure {
 482   const CMSCollector*             _collector;
 483   const CompactibleFreeListSpace* _sp;
 484   const CMSBitMap*                _live_bit_map;
 485   const bool                      _post_remark;
 486   outputStream*                   _st;
 487 public:
 488   BlkPrintingClosure(const CMSCollector* collector,
 489                      const CompactibleFreeListSpace* sp,
 490                      const CMSBitMap* live_bit_map,
 491                      outputStream* st):
 492     _collector(collector),
 493     _sp(sp),
 494     _live_bit_map(live_bit_map),
 495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 496     _st(st) { }
 497   size_t do_blk(HeapWord* addr);
 498 };
 499 
 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 501   size_t sz = _sp->block_size_no_stall(addr, _collector);
 502   assert(sz != 0, "Should always be able to compute a size");
 503   if (_sp->block_is_obj(addr)) {
 504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 506       p2i(addr),
 507       dead ? "dead" : "live",
 508       sz,
 509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 510     if (CMSPrintObjectsInDump && !dead) {
 511       oop(addr)->print_on(_st);
 512       _st->print_cr("--------------------------------------");
 513     }
 514   } else { // free block
 515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 516       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 517     if (CMSPrintChunksInDump) {
 518       ((FreeChunk*)addr)->print_on(_st);
 519       _st->print_cr("--------------------------------------");
 520     }
 521   }
 522   return sz;
 523 }
 524 
 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 526   outputStream* st) {
 527   st->print_cr("\n=========================");
 528   st->print_cr("Block layout in CMS Heap:");
 529   st->print_cr("=========================");
 530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 531   blk_iterate(&bpcl);
 532 
 533   st->print_cr("\n=======================================");
 534   st->print_cr("Order & Layout of Promotion Info Blocks");
 535   st->print_cr("=======================================");
 536   print_promo_info_blocks(st);
 537 
 538   st->print_cr("\n===========================");
 539   st->print_cr("Order of Indexed Free Lists");
 540   st->print_cr("=========================");
 541   print_indexed_free_lists(st);
 542 
 543   st->print_cr("\n=================================");
 544   st->print_cr("Order of Free Lists in Dictionary");
 545   st->print_cr("=================================");
 546   print_dictionary_free_lists(st);
 547 }
 548 
 549 
 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 551   assert_lock_strong(&_freelistLock);
 552   assert(PrintFLSStatistics != 0, "Reporting error");
 553   _dictionary->report_statistics();
 554   if (PrintFLSStatistics > 1) {
 555     reportIndexedFreeListStatistics();
 556     size_t total_size = totalSizeInIndexedFreeLists() +
 557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 558     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 559   }
 560 }
 561 
 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 563   assert_lock_strong(&_freelistLock);
 564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 565                       "--------------------------------\n");
 566   size_t total_size = totalSizeInIndexedFreeLists();
 567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 568   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 569   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 570   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 571   if (free_blocks != 0) {
 572     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 573   }
 574 }
 575 
 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 577   size_t res = 0;
 578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 579     debug_only(
 580       ssize_t recount = 0;
 581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 582          fc = fc->next()) {
 583         recount += 1;
 584       }
 585       assert(recount == _indexedFreeList[i].count(),
 586         "Incorrect count in list");
 587     )
 588     res += _indexedFreeList[i].count();
 589   }
 590   return res;
 591 }
 592 
 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 595     if (_indexedFreeList[i].head() != NULL) {
 596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 597       return (size_t)i;
 598     }
 599   }
 600   return 0;
 601 }
 602 
 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 604   HeapWord* prevEnd = end();
 605   assert(prevEnd != value, "unnecessary set_end call");
 606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 607         "New end is below unallocated block");
 608   _end = value;
 609   if (prevEnd != NULL) {
 610     // Resize the underlying block offset table.
 611     _bt.resize(pointer_delta(value, bottom()));
 612     if (value <= prevEnd) {
 613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 614              "New end is below unallocated block");
 615     } else {
 616       // Now, take this new chunk and add it to the free blocks.
 617       // Note that the BOT has not yet been updated for this block.
 618       size_t newFcSize = pointer_delta(value, prevEnd);
 619       // XXX This is REALLY UGLY and should be fixed up. XXX
 620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 621         // Mark the boundary of the new block in BOT
 622         _bt.mark_block(prevEnd, value);
 623         // put it all in the linAB
 624         MutexLockerEx x(parDictionaryAllocLock(),
 625                         Mutex::_no_safepoint_check_flag);
 626         _smallLinearAllocBlock._ptr = prevEnd;
 627         _smallLinearAllocBlock._word_size = newFcSize;
 628         repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         // Births of chunks put into a LinAB are not recorded.  Births
 630         // of chunks as they are allocated out of a LinAB are.
 631       } else {
 632         // Add the block to the free lists, if possible coalescing it
 633         // with the last free block, and update the BOT and census data.
 634         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 635       }
 636     }
 637   }
 638 }
 639 
 640 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 641   CompactibleFreeListSpace* _cfls;
 642   CMSCollector* _collector;
 643 protected:
 644   // Override.
 645 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 646   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 647                                        HeapWord* bottom, HeapWord* top, \
 648                                        ClosureType* cl);                \
 649       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 650                                        HeapWord* bottom, HeapWord* top, \
 651                                        ClosureType* cl);                \
 652     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl)
 655   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 656   walk_mem_region_with_cl_DECL(FilteringClosure);
 657 
 658 public:
 659   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 660                       CMSCollector* collector,
 661                       ExtendedOopClosure* cl,
 662                       CardTableModRefBS::PrecisionStyle precision,
 663                       HeapWord* boundary) :
 664     Filtering_DCTOC(sp, cl, precision, boundary),
 665     _cfls(sp), _collector(collector) {}
 666 };
 667 
 668 // We de-virtualize the block-related calls below, since we know that our
 669 // space is a CompactibleFreeListSpace.
 670 
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 677    if (is_par) {                                                                \
 678      assert(SharedHeap::heap()->n_par_threads() ==                              \
 679             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 680      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 681    } else {                                                                     \
 682      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 683    }                                                                            \
 684 }                                                                               \
 685 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 686                                                       HeapWord* bottom,         \
 687                                                       HeapWord* top,            \
 688                                                       ClosureType* cl) {        \
 689   /* Skip parts that are before "mr", in case "block_start" sent us             \
 690      back too far. */                                                           \
 691   HeapWord* mr_start = mr.start();                                              \
 692   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 693   HeapWord* next = bottom + bot_size;                                           \
 694   while (next < mr_start) {                                                     \
 695     bottom = next;                                                              \
 696     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 697     next = bottom + bot_size;                                                   \
 698   }                                                                             \
 699                                                                                 \
 700   while (bottom < top) {                                                        \
 701     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 702         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 703                     oop(bottom)) &&                                             \
 704         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 705       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 706       bottom += _cfls->adjustObjectSize(word_sz);                               \
 707     } else {                                                                    \
 708       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 709     }                                                                           \
 710   }                                                                             \
 711 }                                                                               \
 712 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 713                                                         HeapWord* bottom,       \
 714                                                         HeapWord* top,          \
 715                                                         ClosureType* cl) {      \
 716   /* Skip parts that are before "mr", in case "block_start" sent us             \
 717      back too far. */                                                           \
 718   HeapWord* mr_start = mr.start();                                              \
 719   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 720   HeapWord* next = bottom + bot_size;                                           \
 721   while (next < mr_start) {                                                     \
 722     bottom = next;                                                              \
 723     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 724     next = bottom + bot_size;                                                   \
 725   }                                                                             \
 726                                                                                 \
 727   while (bottom < top) {                                                        \
 728     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 729         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 730                     oop(bottom)) &&                                             \
 731         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 732       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 733       bottom += _cfls->adjustObjectSize(word_sz);                               \
 734     } else {                                                                    \
 735       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 736     }                                                                           \
 737   }                                                                             \
 738 }
 739 
 740 // (There are only two of these, rather than N, because the split is due
 741 // only to the introduction of the FilteringClosure, a local part of the
 742 // impl of this abstraction.)
 743 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 744 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 745 
 746 DirtyCardToOopClosure*
 747 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 748                                       CardTableModRefBS::PrecisionStyle precision,
 749                                       HeapWord* boundary) {
 750   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 751 }
 752 
 753 
 754 // Note on locking for the space iteration functions:
 755 // since the collector's iteration activities are concurrent with
 756 // allocation activities by mutators, absent a suitable mutual exclusion
 757 // mechanism the iterators may go awry. For instance a block being iterated
 758 // may suddenly be allocated or divided up and part of it allocated and
 759 // so on.
 760 
 761 // Apply the given closure to each block in the space.
 762 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 763   assert_lock_strong(freelistLock());
 764   HeapWord *cur, *limit;
 765   for (cur = bottom(), limit = end(); cur < limit;
 766        cur += cl->do_blk_careful(cur));
 767 }
 768 
 769 // Apply the given closure to each block in the space.
 770 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 771   assert_lock_strong(freelistLock());
 772   HeapWord *cur, *limit;
 773   for (cur = bottom(), limit = end(); cur < limit;
 774        cur += cl->do_blk(cur));
 775 }
 776 
 777 // Apply the given closure to each oop in the space.
 778 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 779   assert_lock_strong(freelistLock());
 780   HeapWord *cur, *limit;
 781   size_t curSize;
 782   for (cur = bottom(), limit = end(); cur < limit;
 783        cur += curSize) {
 784     curSize = block_size(cur);
 785     if (block_is_obj(cur)) {
 786       oop(cur)->oop_iterate(cl);
 787     }
 788   }
 789 }
 790 
 791 // NOTE: In the following methods, in order to safely be able to
 792 // apply the closure to an object, we need to be sure that the
 793 // object has been initialized. We are guaranteed that an object
 794 // is initialized if we are holding the Heap_lock with the
 795 // world stopped.
 796 void CompactibleFreeListSpace::verify_objects_initialized() const {
 797   if (is_init_completed()) {
 798     assert_locked_or_safepoint(Heap_lock);
 799     if (Universe::is_fully_initialized()) {
 800       guarantee(SafepointSynchronize::is_at_safepoint(),
 801                 "Required for objects to be initialized");
 802     }
 803   } // else make a concession at vm start-up
 804 }
 805 
 806 // Apply the given closure to each object in the space
 807 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 808   assert_lock_strong(freelistLock());
 809   NOT_PRODUCT(verify_objects_initialized());
 810   HeapWord *cur, *limit;
 811   size_t curSize;
 812   for (cur = bottom(), limit = end(); cur < limit;
 813        cur += curSize) {
 814     curSize = block_size(cur);
 815     if (block_is_obj(cur)) {
 816       blk->do_object(oop(cur));
 817     }
 818   }
 819 }
 820 
 821 // Apply the given closure to each live object in the space
 822 //   The usage of CompactibleFreeListSpace
 823 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 824 // objects in the space with references to objects that are no longer
 825 // valid.  For example, an object may reference another object
 826 // that has already been sweep up (collected).  This method uses
 827 // obj_is_alive() to determine whether it is safe to apply the closure to
 828 // an object.  See obj_is_alive() for details on how liveness of an
 829 // object is decided.
 830 
 831 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 832   assert_lock_strong(freelistLock());
 833   NOT_PRODUCT(verify_objects_initialized());
 834   HeapWord *cur, *limit;
 835   size_t curSize;
 836   for (cur = bottom(), limit = end(); cur < limit;
 837        cur += curSize) {
 838     curSize = block_size(cur);
 839     if (block_is_obj(cur) && obj_is_alive(cur)) {
 840       blk->do_object(oop(cur));
 841     }
 842   }
 843 }
 844 
 845 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 846                                                   UpwardsObjectClosure* cl) {
 847   assert_locked(freelistLock());
 848   NOT_PRODUCT(verify_objects_initialized());
 849   assert(!mr.is_empty(), "Should be non-empty");
 850   // We use MemRegion(bottom(), end()) rather than used_region() below
 851   // because the two are not necessarily equal for some kinds of
 852   // spaces, in particular, certain kinds of free list spaces.
 853   // We could use the more complicated but more precise:
 854   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 855   // but the slight imprecision seems acceptable in the assertion check.
 856   assert(MemRegion(bottom(), end()).contains(mr),
 857          "Should be within used space");
 858   HeapWord* prev = cl->previous();   // max address from last time
 859   if (prev >= mr.end()) { // nothing to do
 860     return;
 861   }
 862   // This assert will not work when we go from cms space to perm
 863   // space, and use same closure. Easy fix deferred for later. XXX YSR
 864   // assert(prev == NULL || contains(prev), "Should be within space");
 865 
 866   bool last_was_obj_array = false;
 867   HeapWord *blk_start_addr, *region_start_addr;
 868   if (prev > mr.start()) {
 869     region_start_addr = prev;
 870     blk_start_addr    = prev;
 871     // The previous invocation may have pushed "prev" beyond the
 872     // last allocated block yet there may be still be blocks
 873     // in this region due to a particular coalescing policy.
 874     // Relax the assertion so that the case where the unallocated
 875     // block is maintained and "prev" is beyond the unallocated
 876     // block does not cause the assertion to fire.
 877     assert((BlockOffsetArrayUseUnallocatedBlock &&
 878             (!is_in(prev))) ||
 879            (blk_start_addr == block_start(region_start_addr)), "invariant");
 880   } else {
 881     region_start_addr = mr.start();
 882     blk_start_addr    = block_start(region_start_addr);
 883   }
 884   HeapWord* region_end_addr = mr.end();
 885   MemRegion derived_mr(region_start_addr, region_end_addr);
 886   while (blk_start_addr < region_end_addr) {
 887     const size_t size = block_size(blk_start_addr);
 888     if (block_is_obj(blk_start_addr)) {
 889       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 890     } else {
 891       last_was_obj_array = false;
 892     }
 893     blk_start_addr += size;
 894   }
 895   if (!last_was_obj_array) {
 896     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 897            "Should be within (closed) used space");
 898     assert(blk_start_addr > prev, "Invariant");
 899     cl->set_previous(blk_start_addr); // min address for next time
 900   }
 901 }
 902 
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 912   ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   // Can't use used_region() below because it may not necessarily
 915   // be the same as [bottom(),end()); although we could
 916   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 917   // that appears too cumbersome, so we just do the simpler check
 918   // in the assertion below.
 919   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 920          "mr should be non-empty and within used space");
 921   HeapWord *addr, *end;
 922   size_t size;
 923   for (addr = block_start_careful(mr.start()), end  = mr.end();
 924        addr < end; addr += size) {
 925     FreeChunk* fc = (FreeChunk*)addr;
 926     if (fc->is_free()) {
 927       // Since we hold the free list lock, which protects direct
 928       // allocation in this generation by mutators, a free object
 929       // will remain free throughout this iteration code.
 930       size = fc->size();
 931     } else {
 932       // Note that the object need not necessarily be initialized,
 933       // because (for instance) the free list lock does NOT protect
 934       // object initialization. The closure application below must
 935       // therefore be correct in the face of uninitialized objects.
 936       size = cl->do_object_careful_m(oop(addr), mr);
 937       if (size == 0) {
 938         // An unparsable object found. Signal early termination.
 939         return addr;
 940       }
 941     }
 942   }
 943   return NULL;
 944 }
 945 
 946 
 947 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 948   NOT_PRODUCT(verify_objects_initialized());
 949   return _bt.block_start(p);
 950 }
 951 
 952 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 953   return _bt.block_start_careful(p);
 954 }
 955 
 956 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 957   NOT_PRODUCT(verify_objects_initialized());
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   while (true) {
 962     // We must do this until we get a consistent view of the object.
 963     if (FreeChunk::indicatesFreeChunk(p)) {
 964       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 965       size_t res = fc->size();
 966 
 967       // Bugfix for systems with weak memory model (PPC64/IA64). The
 968       // block's free bit was set and we have read the size of the
 969       // block. Acquire and check the free bit again. If the block is
 970       // still free, the read size is correct.
 971       OrderAccess::acquire();
 972 
 973       // If the object is still a free chunk, return the size, else it
 974       // has been allocated so try again.
 975       if (FreeChunk::indicatesFreeChunk(p)) {
 976         assert(res != 0, "Block size should not be 0");
 977         return res;
 978       }
 979     } else {
 980       // must read from what 'p' points to in each loop.
 981       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       }
 997     }
 998   }
 999 }
1000 
1001 // TODO: Now that is_parsable is gone, we should combine these two functions.
1002 // A variant of the above that uses the Printezis bits for
1003 // unparsable but allocated objects. This avoids any possible
1004 // stalls waiting for mutators to initialize objects, and is
1005 // thus potentially faster than the variant above. However,
1006 // this variant may return a zero size for a block that is
1007 // under mutation and for which a consistent size cannot be
1008 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1009 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1010                                                      const CMSCollector* c)
1011 const {
1012   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1013   // This must be volatile, or else there is a danger that the compiler
1014   // will compile the code below into a sometimes-infinite loop, by keeping
1015   // the value read the first time in a register.
1016   DEBUG_ONLY(uint loops = 0;)
1017   while (true) {
1018     // We must do this until we get a consistent view of the object.
1019     if (FreeChunk::indicatesFreeChunk(p)) {
1020       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1021       size_t res = fc->size();
1022 
1023       // Bugfix for systems with weak memory model (PPC64/IA64). The
1024       // free bit of the block was set and we have read the size of
1025       // the block. Acquire and check the free bit again. If the
1026       // block is still free, the read size is correct.
1027       OrderAccess::acquire();
1028 
1029       if (FreeChunk::indicatesFreeChunk(p)) {
1030         assert(res != 0, "Block size should not be 0");
1031         assert(loops == 0, "Should be 0");
1032         return res;
1033       }
1034     } else {
1035       // must read from what 'p' points to in each loop.
1036       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1037       // We trust the size of any object that has a non-NULL
1038       // klass and (for those in the perm gen) is parsable
1039       // -- irrespective of its conc_safe-ty.
1040       if (k != NULL) {
1041         assert(k->is_klass(), "Should really be klass oop.");
1042         oop o = (oop)p;
1043         assert(o->is_oop(), "Should be an oop");
1044 
1045         // Bugfix for systems with weak memory model (PPC64/IA64).
1046         // The object o may be an array. Acquire to make sure that the array
1047         // size (third word) is consistent.
1048         OrderAccess::acquire();
1049 
1050         size_t res = o->size_given_klass(k);
1051         res = adjustObjectSize(res);
1052         assert(res != 0, "Block size should not be 0");
1053         return res;
1054       } else {
1055         // May return 0 if P-bits not present.
1056         return c->block_size_if_printezis_bits(p);
1057       }
1058     }
1059     assert(loops == 0, "Can loop at most once");
1060     DEBUG_ONLY(loops++;)
1061   }
1062 }
1063 
1064 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1065   NOT_PRODUCT(verify_objects_initialized());
1066   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1067   FreeChunk* fc = (FreeChunk*)p;
1068   if (fc->is_free()) {
1069     return fc->size();
1070   } else {
1071     // Ignore mark word because this may be a recently promoted
1072     // object whose mark word is used to chain together grey
1073     // objects (the last one would have a null value).
1074     assert(oop(p)->is_oop(true), "Should be an oop");
1075     return adjustObjectSize(oop(p)->size());
1076   }
1077 }
1078 
1079 // This implementation assumes that the property of "being an object" is
1080 // stable.  But being a free chunk may not be (because of parallel
1081 // promotion.)
1082 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1083   FreeChunk* fc = (FreeChunk*)p;
1084   assert(is_in_reserved(p), "Should be in space");
1085   // When doing a mark-sweep-compact of the CMS generation, this
1086   // assertion may fail because prepare_for_compaction() uses
1087   // space that is garbage to maintain information on ranges of
1088   // live objects so that these live ranges can be moved as a whole.
1089   // Comment out this assertion until that problem can be solved
1090   // (i.e., that the block start calculation may look at objects
1091   // at address below "p" in finding the object that contains "p"
1092   // and those objects (if garbage) may have been modified to hold
1093   // live range information.
1094   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1095   //        "Should be a block boundary");
1096   if (FreeChunk::indicatesFreeChunk(p)) return false;
1097   Klass* k = oop(p)->klass_or_null();
1098   if (k != NULL) {
1099     // Ignore mark word because it may have been used to
1100     // chain together promoted objects (the last one
1101     // would have a null value).
1102     assert(oop(p)->is_oop(true), "Should be an oop");
1103     return true;
1104   } else {
1105     return false;  // Was not an object at the start of collection.
1106   }
1107 }
1108 
1109 // Check if the object is alive. This fact is checked either by consulting
1110 // the main marking bitmap in the sweeping phase or, if it's a permanent
1111 // generation and we're not in the sweeping phase, by checking the
1112 // perm_gen_verify_bit_map where we store the "deadness" information if
1113 // we did not sweep the perm gen in the most recent previous GC cycle.
1114 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1115   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1116          "Else races are possible");
1117   assert(block_is_obj(p), "The address should point to an object");
1118 
1119   // If we're sweeping, we use object liveness information from the main bit map
1120   // for both perm gen and old gen.
1121   // We don't need to lock the bitmap (live_map or dead_map below), because
1122   // EITHER we are in the middle of the sweeping phase, and the
1123   // main marking bit map (live_map below) is locked,
1124   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1125   // is stable, because it's mutated only in the sweeping phase.
1126   // NOTE: This method is also used by jmap where, if class unloading is
1127   // off, the results can return "false" for legitimate perm objects,
1128   // when we are not in the midst of a sweeping phase, which can result
1129   // in jmap not reporting certain perm gen objects. This will be moot
1130   // if/when the perm gen goes away in the future.
1131   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1132     CMSBitMap* live_map = _collector->markBitMap();
1133     return live_map->par_isMarked((HeapWord*) p);
1134   }
1135   return true;
1136 }
1137 
1138 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1139   FreeChunk* fc = (FreeChunk*)p;
1140   assert(is_in_reserved(p), "Should be in space");
1141   assert(_bt.block_start(p) == p, "Should be a block boundary");
1142   if (!fc->is_free()) {
1143     // Ignore mark word because it may have been used to
1144     // chain together promoted objects (the last one
1145     // would have a null value).
1146     assert(oop(p)->is_oop(true), "Should be an oop");
1147     return true;
1148   }
1149   return false;
1150 }
1151 
1152 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1153 // approximate answer if you don't hold the freelistlock when you call this.
1154 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1155   size_t size = 0;
1156   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1157     debug_only(
1158       // We may be calling here without the lock in which case we
1159       // won't do this modest sanity check.
1160       if (freelistLock()->owned_by_self()) {
1161         size_t total_list_size = 0;
1162         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1163           fc = fc->next()) {
1164           total_list_size += i;
1165         }
1166         assert(total_list_size == i * _indexedFreeList[i].count(),
1167                "Count in list is incorrect");
1168       }
1169     )
1170     size += i * _indexedFreeList[i].count();
1171   }
1172   return size;
1173 }
1174 
1175 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1176   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1177   return allocate(size);
1178 }
1179 
1180 HeapWord*
1181 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1182   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1183 }
1184 
1185 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1186   assert_lock_strong(freelistLock());
1187   HeapWord* res = NULL;
1188   assert(size == adjustObjectSize(size),
1189          "use adjustObjectSize() before calling into allocate()");
1190 
1191   if (_adaptive_freelists) {
1192     res = allocate_adaptive_freelists(size);
1193   } else {  // non-adaptive free lists
1194     res = allocate_non_adaptive_freelists(size);
1195   }
1196 
1197   if (res != NULL) {
1198     // check that res does lie in this space!
1199     assert(is_in_reserved(res), "Not in this space!");
1200     assert(is_aligned((void*)res), "alignment check");
1201 
1202     FreeChunk* fc = (FreeChunk*)res;
1203     fc->markNotFree();
1204     assert(!fc->is_free(), "shouldn't be marked free");
1205     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1206     // Verify that the block offset table shows this to
1207     // be a single block, but not one which is unallocated.
1208     _bt.verify_single_block(res, size);
1209     _bt.verify_not_unallocated(res, size);
1210     // mangle a just allocated object with a distinct pattern.
1211     debug_only(fc->mangleAllocated(size));
1212   }
1213 
1214   return res;
1215 }
1216 
1217 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1218   HeapWord* res = NULL;
1219   // try and use linear allocation for smaller blocks
1220   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1221     // if successful, the following also adjusts block offset table
1222     res = getChunkFromSmallLinearAllocBlock(size);
1223   }
1224   // Else triage to indexed lists for smaller sizes
1225   if (res == NULL) {
1226     if (size < SmallForDictionary) {
1227       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1228     } else {
1229       // else get it from the big dictionary; if even this doesn't
1230       // work we are out of luck.
1231       res = (HeapWord*)getChunkFromDictionaryExact(size);
1232     }
1233   }
1234 
1235   return res;
1236 }
1237 
1238 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1239   assert_lock_strong(freelistLock());
1240   HeapWord* res = NULL;
1241   assert(size == adjustObjectSize(size),
1242          "use adjustObjectSize() before calling into allocate()");
1243 
1244   // Strategy
1245   //   if small
1246   //     exact size from small object indexed list if small
1247   //     small or large linear allocation block (linAB) as appropriate
1248   //     take from lists of greater sized chunks
1249   //   else
1250   //     dictionary
1251   //     small or large linear allocation block if it has the space
1252   // Try allocating exact size from indexTable first
1253   if (size < IndexSetSize) {
1254     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1255     if(res != NULL) {
1256       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1257         "Not removed from free list");
1258       // no block offset table adjustment is necessary on blocks in
1259       // the indexed lists.
1260 
1261     // Try allocating from the small LinAB
1262     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1263         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1264         // if successful, the above also adjusts block offset table
1265         // Note that this call will refill the LinAB to
1266         // satisfy the request.  This is different that
1267         // evm.
1268         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1269     } else {
1270       // Raid the exact free lists larger than size, even if they are not
1271       // overpopulated.
1272       res = (HeapWord*) getChunkFromGreater(size);
1273     }
1274   } else {
1275     // Big objects get allocated directly from the dictionary.
1276     res = (HeapWord*) getChunkFromDictionaryExact(size);
1277     if (res == NULL) {
1278       // Try hard not to fail since an allocation failure will likely
1279       // trigger a synchronous GC.  Try to get the space from the
1280       // allocation blocks.
1281       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1282     }
1283   }
1284 
1285   return res;
1286 }
1287 
1288 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1289 // when promoting obj.
1290 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1291   // Depending on the object size, expansion may require refilling either a
1292   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1293   // is added because the dictionary may over-allocate to avoid fragmentation.
1294   size_t space = obj_size;
1295   if (!_adaptive_freelists) {
1296     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1297   }
1298   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1299   return space;
1300 }
1301 
1302 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1303   FreeChunk* ret;
1304 
1305   assert(numWords >= MinChunkSize, "Size is less than minimum");
1306   assert(linearAllocationWouldFail() || bestFitFirst(),
1307     "Should not be here");
1308 
1309   size_t i;
1310   size_t currSize = numWords + MinChunkSize;
1311   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1312   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1313     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1314     if (fl->head()) {
1315       ret = getFromListGreater(fl, numWords);
1316       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1317       return ret;
1318     }
1319   }
1320 
1321   currSize = MAX2((size_t)SmallForDictionary,
1322                   (size_t)(numWords + MinChunkSize));
1323 
1324   /* Try to get a chunk that satisfies request, while avoiding
1325      fragmentation that can't be handled. */
1326   {
1327     ret =  dictionary()->get_chunk(currSize);
1328     if (ret != NULL) {
1329       assert(ret->size() - numWords >= MinChunkSize,
1330              "Chunk is too small");
1331       _bt.allocated((HeapWord*)ret, ret->size());
1332       /* Carve returned chunk. */
1333       (void) splitChunkAndReturnRemainder(ret, numWords);
1334       /* Label this as no longer a free chunk. */
1335       assert(ret->is_free(), "This chunk should be free");
1336       ret->link_prev(NULL);
1337     }
1338     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1339     return ret;
1340   }
1341   ShouldNotReachHere();
1342 }
1343 
1344 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1345   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1346   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1347 }
1348 
1349 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1350   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1351          (_smallLinearAllocBlock._word_size == fc->size()),
1352          "Linear allocation block shows incorrect size");
1353   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1354           (_smallLinearAllocBlock._word_size == fc->size()));
1355 }
1356 
1357 // Check if the purported free chunk is present either as a linear
1358 // allocation block, the size-indexed table of (smaller) free blocks,
1359 // or the larger free blocks kept in the binary tree dictionary.
1360 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1361   if (verify_chunk_is_linear_alloc_block(fc)) {
1362     return true;
1363   } else if (fc->size() < IndexSetSize) {
1364     return verifyChunkInIndexedFreeLists(fc);
1365   } else {
1366     return dictionary()->verify_chunk_in_free_list(fc);
1367   }
1368 }
1369 
1370 #ifndef PRODUCT
1371 void CompactibleFreeListSpace::assert_locked() const {
1372   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1373 }
1374 
1375 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1376   CMSLockVerifier::assert_locked(lock);
1377 }
1378 #endif
1379 
1380 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1381   // In the parallel case, the main thread holds the free list lock
1382   // on behalf the parallel threads.
1383   FreeChunk* fc;
1384   {
1385     // If GC is parallel, this might be called by several threads.
1386     // This should be rare enough that the locking overhead won't affect
1387     // the sequential code.
1388     MutexLockerEx x(parDictionaryAllocLock(),
1389                     Mutex::_no_safepoint_check_flag);
1390     fc = getChunkFromDictionary(size);
1391   }
1392   if (fc != NULL) {
1393     fc->dontCoalesce();
1394     assert(fc->is_free(), "Should be free, but not coalescable");
1395     // Verify that the block offset table shows this to
1396     // be a single block, but not one which is unallocated.
1397     _bt.verify_single_block((HeapWord*)fc, fc->size());
1398     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1399   }
1400   return fc;
1401 }
1402 
1403 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1404   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1405   assert_locked();
1406 
1407   // if we are tracking promotions, then first ensure space for
1408   // promotion (including spooling space for saving header if necessary).
1409   // then allocate and copy, then track promoted info if needed.
1410   // When tracking (see PromotionInfo::track()), the mark word may
1411   // be displaced and in this case restoration of the mark word
1412   // occurs in the (oop_since_save_marks_)iterate phase.
1413   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1414     return NULL;
1415   }
1416   // Call the allocate(size_t, bool) form directly to avoid the
1417   // additional call through the allocate(size_t) form.  Having
1418   // the compile inline the call is problematic because allocate(size_t)
1419   // is a virtual method.
1420   HeapWord* res = allocate(adjustObjectSize(obj_size));
1421   if (res != NULL) {
1422     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1423     // if we should be tracking promotions, do so.
1424     if (_promoInfo.tracking()) {
1425         _promoInfo.track((PromotedObject*)res);
1426     }
1427   }
1428   return oop(res);
1429 }
1430 
1431 HeapWord*
1432 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1433   assert_locked();
1434   assert(size >= MinChunkSize, "minimum chunk size");
1435   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1436     "maximum from smallLinearAllocBlock");
1437   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1438 }
1439 
1440 HeapWord*
1441 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1442                                                        size_t size) {
1443   assert_locked();
1444   assert(size >= MinChunkSize, "too small");
1445   HeapWord* res = NULL;
1446   // Try to do linear allocation from blk, making sure that
1447   if (blk->_word_size == 0) {
1448     // We have probably been unable to fill this either in the prologue or
1449     // when it was exhausted at the last linear allocation. Bail out until
1450     // next time.
1451     assert(blk->_ptr == NULL, "consistency check");
1452     return NULL;
1453   }
1454   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1455   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1456   if (res != NULL) return res;
1457 
1458   // about to exhaust this linear allocation block
1459   if (blk->_word_size == size) { // exactly satisfied
1460     res = blk->_ptr;
1461     _bt.allocated(res, blk->_word_size);
1462   } else if (size + MinChunkSize <= blk->_refillSize) {
1463     size_t sz = blk->_word_size;
1464     // Update _unallocated_block if the size is such that chunk would be
1465     // returned to the indexed free list.  All other chunks in the indexed
1466     // free lists are allocated from the dictionary so that _unallocated_block
1467     // has already been adjusted for them.  Do it here so that the cost
1468     // for all chunks added back to the indexed free lists.
1469     if (sz < SmallForDictionary) {
1470       _bt.allocated(blk->_ptr, sz);
1471     }
1472     // Return the chunk that isn't big enough, and then refill below.
1473     addChunkToFreeLists(blk->_ptr, sz);
1474     split_birth(sz);
1475     // Don't keep statistics on adding back chunk from a LinAB.
1476   } else {
1477     // A refilled block would not satisfy the request.
1478     return NULL;
1479   }
1480 
1481   blk->_ptr = NULL; blk->_word_size = 0;
1482   refillLinearAllocBlock(blk);
1483   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1484          "block was replenished");
1485   if (res != NULL) {
1486     split_birth(size);
1487     repairLinearAllocBlock(blk);
1488   } else if (blk->_ptr != NULL) {
1489     res = blk->_ptr;
1490     size_t blk_size = blk->_word_size;
1491     blk->_word_size -= size;
1492     blk->_ptr  += size;
1493     split_birth(size);
1494     repairLinearAllocBlock(blk);
1495     // Update BOT last so that other (parallel) GC threads see a consistent
1496     // view of the BOT and free blocks.
1497     // Above must occur before BOT is updated below.
1498     OrderAccess::storestore();
1499     _bt.split_block(res, blk_size, size);  // adjust block offset table
1500   }
1501   return res;
1502 }
1503 
1504 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1505                                         LinearAllocBlock* blk,
1506                                         size_t size) {
1507   assert_locked();
1508   assert(size >= MinChunkSize, "too small");
1509 
1510   HeapWord* res = NULL;
1511   // This is the common case.  Keep it simple.
1512   if (blk->_word_size >= size + MinChunkSize) {
1513     assert(blk->_ptr != NULL, "consistency check");
1514     res = blk->_ptr;
1515     // Note that the BOT is up-to-date for the linAB before allocation.  It
1516     // indicates the start of the linAB.  The split_block() updates the
1517     // BOT for the linAB after the allocation (indicates the start of the
1518     // next chunk to be allocated).
1519     size_t blk_size = blk->_word_size;
1520     blk->_word_size -= size;
1521     blk->_ptr  += size;
1522     split_birth(size);
1523     repairLinearAllocBlock(blk);
1524     // Update BOT last so that other (parallel) GC threads see a consistent
1525     // view of the BOT and free blocks.
1526     // Above must occur before BOT is updated below.
1527     OrderAccess::storestore();
1528     _bt.split_block(res, blk_size, size);  // adjust block offset table
1529     _bt.allocated(res, size);
1530   }
1531   return res;
1532 }
1533 
1534 FreeChunk*
1535 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1536   assert_locked();
1537   assert(size < SmallForDictionary, "just checking");
1538   FreeChunk* res;
1539   res = _indexedFreeList[size].get_chunk_at_head();
1540   if (res == NULL) {
1541     res = getChunkFromIndexedFreeListHelper(size);
1542   }
1543   _bt.verify_not_unallocated((HeapWord*) res, size);
1544   assert(res == NULL || res->size() == size, "Incorrect block size");
1545   return res;
1546 }
1547 
1548 FreeChunk*
1549 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1550   bool replenish) {
1551   assert_locked();
1552   FreeChunk* fc = NULL;
1553   if (size < SmallForDictionary) {
1554     assert(_indexedFreeList[size].head() == NULL ||
1555       _indexedFreeList[size].surplus() <= 0,
1556       "List for this size should be empty or under populated");
1557     // Try best fit in exact lists before replenishing the list
1558     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1559       // Replenish list.
1560       //
1561       // Things tried that failed.
1562       //   Tried allocating out of the two LinAB's first before
1563       // replenishing lists.
1564       //   Tried small linAB of size 256 (size in indexed list)
1565       // and replenishing indexed lists from the small linAB.
1566       //
1567       FreeChunk* newFc = NULL;
1568       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1569       if (replenish_size < SmallForDictionary) {
1570         // Do not replenish from an underpopulated size.
1571         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1572             _indexedFreeList[replenish_size].head() != NULL) {
1573           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1574         } else if (bestFitFirst()) {
1575           newFc = bestFitSmall(replenish_size);
1576         }
1577       }
1578       if (newFc == NULL && replenish_size > size) {
1579         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1580         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1581       }
1582       // Note: The stats update re split-death of block obtained above
1583       // will be recorded below precisely when we know we are going to
1584       // be actually splitting it into more than one pieces below.
1585       if (newFc != NULL) {
1586         if  (replenish || CMSReplenishIntermediate) {
1587           // Replenish this list and return one block to caller.
1588           size_t i;
1589           FreeChunk *curFc, *nextFc;
1590           size_t num_blk = newFc->size() / size;
1591           assert(num_blk >= 1, "Smaller than requested?");
1592           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1593           if (num_blk > 1) {
1594             // we are sure we will be splitting the block just obtained
1595             // into multiple pieces; record the split-death of the original
1596             splitDeath(replenish_size);
1597           }
1598           // carve up and link blocks 0, ..., num_blk - 2
1599           // The last chunk is not added to the lists but is returned as the
1600           // free chunk.
1601           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1602                i = 0;
1603                i < (num_blk - 1);
1604                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1605                i++) {
1606             curFc->set_size(size);
1607             // Don't record this as a return in order to try and
1608             // determine the "returns" from a GC.
1609             _bt.verify_not_unallocated((HeapWord*) fc, size);
1610             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1611             _bt.mark_block((HeapWord*)curFc, size);
1612             split_birth(size);
1613             // Don't record the initial population of the indexed list
1614             // as a split birth.
1615           }
1616 
1617           // check that the arithmetic was OK above
1618           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1619             "inconsistency in carving newFc");
1620           curFc->set_size(size);
1621           _bt.mark_block((HeapWord*)curFc, size);
1622           split_birth(size);
1623           fc = curFc;
1624         } else {
1625           // Return entire block to caller
1626           fc = newFc;
1627         }
1628       }
1629     }
1630   } else {
1631     // Get a free chunk from the free chunk dictionary to be returned to
1632     // replenish the indexed free list.
1633     fc = getChunkFromDictionaryExact(size);
1634   }
1635   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1636   return fc;
1637 }
1638 
1639 FreeChunk*
1640 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1641   assert_locked();
1642   FreeChunk* fc = _dictionary->get_chunk(size,
1643                                          FreeBlockDictionary<FreeChunk>::atLeast);
1644   if (fc == NULL) {
1645     return NULL;
1646   }
1647   _bt.allocated((HeapWord*)fc, fc->size());
1648   if (fc->size() >= size + MinChunkSize) {
1649     fc = splitChunkAndReturnRemainder(fc, size);
1650   }
1651   assert(fc->size() >= size, "chunk too small");
1652   assert(fc->size() < size + MinChunkSize, "chunk too big");
1653   _bt.verify_single_block((HeapWord*)fc, fc->size());
1654   return fc;
1655 }
1656 
1657 FreeChunk*
1658 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1659   assert_locked();
1660   FreeChunk* fc = _dictionary->get_chunk(size,
1661                                          FreeBlockDictionary<FreeChunk>::atLeast);
1662   if (fc == NULL) {
1663     return fc;
1664   }
1665   _bt.allocated((HeapWord*)fc, fc->size());
1666   if (fc->size() == size) {
1667     _bt.verify_single_block((HeapWord*)fc, size);
1668     return fc;
1669   }
1670   assert(fc->size() > size, "get_chunk() guarantee");
1671   if (fc->size() < size + MinChunkSize) {
1672     // Return the chunk to the dictionary and go get a bigger one.
1673     returnChunkToDictionary(fc);
1674     fc = _dictionary->get_chunk(size + MinChunkSize,
1675                                 FreeBlockDictionary<FreeChunk>::atLeast);
1676     if (fc == NULL) {
1677       return NULL;
1678     }
1679     _bt.allocated((HeapWord*)fc, fc->size());
1680   }
1681   assert(fc->size() >= size + MinChunkSize, "tautology");
1682   fc = splitChunkAndReturnRemainder(fc, size);
1683   assert(fc->size() == size, "chunk is wrong size");
1684   _bt.verify_single_block((HeapWord*)fc, size);
1685   return fc;
1686 }
1687 
1688 void
1689 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1690   assert_locked();
1691 
1692   size_t size = chunk->size();
1693   _bt.verify_single_block((HeapWord*)chunk, size);
1694   // adjust _unallocated_block downward, as necessary
1695   _bt.freed((HeapWord*)chunk, size);
1696   _dictionary->return_chunk(chunk);
1697 #ifndef PRODUCT
1698   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1699     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1700     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1701     tl->verify_stats();
1702   }
1703 #endif // PRODUCT
1704 }
1705 
1706 void
1707 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1708   assert_locked();
1709   size_t size = fc->size();
1710   _bt.verify_single_block((HeapWord*) fc, size);
1711   _bt.verify_not_unallocated((HeapWord*) fc, size);
1712   if (_adaptive_freelists) {
1713     _indexedFreeList[size].return_chunk_at_tail(fc);
1714   } else {
1715     _indexedFreeList[size].return_chunk_at_head(fc);
1716   }
1717 #ifndef PRODUCT
1718   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1719      _indexedFreeList[size].verify_stats();
1720   }
1721 #endif // PRODUCT
1722 }
1723 
1724 // Add chunk to end of last block -- if it's the largest
1725 // block -- and update BOT and census data. We would
1726 // of course have preferred to coalesce it with the
1727 // last block, but it's currently less expensive to find the
1728 // largest block than it is to find the last.
1729 void
1730 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1731   HeapWord* chunk, size_t     size) {
1732   // check that the chunk does lie in this space!
1733   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1734   // One of the parallel gc task threads may be here
1735   // whilst others are allocating.
1736   Mutex* lock = &_parDictionaryAllocLock;
1737   FreeChunk* ec;
1738   {
1739     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1740     ec = dictionary()->find_largest_dict();  // get largest block
1741     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1742       // It's a coterminal block - we can coalesce.
1743       size_t old_size = ec->size();
1744       coalDeath(old_size);
1745       removeChunkFromDictionary(ec);
1746       size += old_size;
1747     } else {
1748       ec = (FreeChunk*)chunk;
1749     }
1750   }
1751   ec->set_size(size);
1752   debug_only(ec->mangleFreed(size));
1753   if (size < SmallForDictionary) {
1754     lock = _indexedFreeListParLocks[size];
1755   }
1756   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1757   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1758   // record the birth under the lock since the recording involves
1759   // manipulation of the list on which the chunk lives and
1760   // if the chunk is allocated and is the last on the list,
1761   // the list can go away.
1762   coalBirth(size);
1763 }
1764 
1765 void
1766 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1767                                               size_t     size) {
1768   // check that the chunk does lie in this space!
1769   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1770   assert_locked();
1771   _bt.verify_single_block(chunk, size);
1772 
1773   FreeChunk* fc = (FreeChunk*) chunk;
1774   fc->set_size(size);
1775   debug_only(fc->mangleFreed(size));
1776   if (size < SmallForDictionary) {
1777     returnChunkToFreeList(fc);
1778   } else {
1779     returnChunkToDictionary(fc);
1780   }
1781 }
1782 
1783 void
1784 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1785   size_t size, bool coalesced) {
1786   assert_locked();
1787   assert(chunk != NULL, "null chunk");
1788   if (coalesced) {
1789     // repair BOT
1790     _bt.single_block(chunk, size);
1791   }
1792   addChunkToFreeLists(chunk, size);
1793 }
1794 
1795 // We _must_ find the purported chunk on our free lists;
1796 // we assert if we don't.
1797 void
1798 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1799   size_t size = fc->size();
1800   assert_locked();
1801   debug_only(verifyFreeLists());
1802   if (size < SmallForDictionary) {
1803     removeChunkFromIndexedFreeList(fc);
1804   } else {
1805     removeChunkFromDictionary(fc);
1806   }
1807   _bt.verify_single_block((HeapWord*)fc, size);
1808   debug_only(verifyFreeLists());
1809 }
1810 
1811 void
1812 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1813   size_t size = fc->size();
1814   assert_locked();
1815   assert(fc != NULL, "null chunk");
1816   _bt.verify_single_block((HeapWord*)fc, size);
1817   _dictionary->remove_chunk(fc);
1818   // adjust _unallocated_block upward, as necessary
1819   _bt.allocated((HeapWord*)fc, size);
1820 }
1821 
1822 void
1823 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1824   assert_locked();
1825   size_t size = fc->size();
1826   _bt.verify_single_block((HeapWord*)fc, size);
1827   NOT_PRODUCT(
1828     if (FLSVerifyIndexTable) {
1829       verifyIndexedFreeList(size);
1830     }
1831   )
1832   _indexedFreeList[size].remove_chunk(fc);
1833   NOT_PRODUCT(
1834     if (FLSVerifyIndexTable) {
1835       verifyIndexedFreeList(size);
1836     }
1837   )
1838 }
1839 
1840 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1841   /* A hint is the next larger size that has a surplus.
1842      Start search at a size large enough to guarantee that
1843      the excess is >= MIN_CHUNK. */
1844   size_t start = align_object_size(numWords + MinChunkSize);
1845   if (start < IndexSetSize) {
1846     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1847     size_t    hint = _indexedFreeList[start].hint();
1848     while (hint < IndexSetSize) {
1849       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1850       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1851       if (fl->surplus() > 0 && fl->head() != NULL) {
1852         // Found a list with surplus, reset original hint
1853         // and split out a free chunk which is returned.
1854         _indexedFreeList[start].set_hint(hint);
1855         FreeChunk* res = getFromListGreater(fl, numWords);
1856         assert(res == NULL || res->is_free(),
1857           "Should be returning a free chunk");
1858         return res;
1859       }
1860       hint = fl->hint(); /* keep looking */
1861     }
1862     /* None found. */
1863     it[start].set_hint(IndexSetSize);
1864   }
1865   return NULL;
1866 }
1867 
1868 /* Requires fl->size >= numWords + MinChunkSize */
1869 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1870   size_t numWords) {
1871   FreeChunk *curr = fl->head();
1872   size_t oldNumWords = curr->size();
1873   assert(numWords >= MinChunkSize, "Word size is too small");
1874   assert(curr != NULL, "List is empty");
1875   assert(oldNumWords >= numWords + MinChunkSize,
1876         "Size of chunks in the list is too small");
1877 
1878   fl->remove_chunk(curr);
1879   // recorded indirectly by splitChunkAndReturnRemainder -
1880   // smallSplit(oldNumWords, numWords);
1881   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1882   // Does anything have to be done for the remainder in terms of
1883   // fixing the card table?
1884   assert(new_chunk == NULL || new_chunk->is_free(),
1885     "Should be returning a free chunk");
1886   return new_chunk;
1887 }
1888 
1889 FreeChunk*
1890 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1891   size_t new_size) {
1892   assert_locked();
1893   size_t size = chunk->size();
1894   assert(size > new_size, "Split from a smaller block?");
1895   assert(is_aligned(chunk), "alignment problem");
1896   assert(size == adjustObjectSize(size), "alignment problem");
1897   size_t rem_sz = size - new_size;
1898   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1899   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1900   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1901   assert(is_aligned(ffc), "alignment problem");
1902   ffc->set_size(rem_sz);
1903   ffc->link_next(NULL);
1904   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1905   // Above must occur before BOT is updated below.
1906   // adjust block offset table
1907   OrderAccess::storestore();
1908   assert(chunk->is_free() && ffc->is_free(), "Error");
1909   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1910   if (rem_sz < SmallForDictionary) {
1911     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1912     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1913     assert(!is_par ||
1914            (SharedHeap::heap()->n_par_threads() ==
1915             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1916     returnChunkToFreeList(ffc);
1917     split(size, rem_sz);
1918     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1919   } else {
1920     returnChunkToDictionary(ffc);
1921     split(size, rem_sz);
1922   }
1923   chunk->set_size(new_size);
1924   return chunk;
1925 }
1926 
1927 void
1928 CompactibleFreeListSpace::sweep_completed() {
1929   // Now that space is probably plentiful, refill linear
1930   // allocation blocks as needed.
1931   refillLinearAllocBlocksIfNeeded();
1932 }
1933 
1934 void
1935 CompactibleFreeListSpace::gc_prologue() {
1936   assert_locked();
1937   if (PrintFLSStatistics != 0) {
1938     gclog_or_tty->print("Before GC:\n");
1939     reportFreeListStatistics();
1940   }
1941   refillLinearAllocBlocksIfNeeded();
1942 }
1943 
1944 void
1945 CompactibleFreeListSpace::gc_epilogue() {
1946   assert_locked();
1947   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1948     if (_smallLinearAllocBlock._word_size == 0)
1949       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1950   }
1951   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1952   _promoInfo.stopTrackingPromotions();
1953   repairLinearAllocationBlocks();
1954   // Print Space's stats
1955   if (PrintFLSStatistics != 0) {
1956     gclog_or_tty->print("After GC:\n");
1957     reportFreeListStatistics();
1958   }
1959 }
1960 
1961 // Iteration support, mostly delegated from a CMS generation
1962 
1963 void CompactibleFreeListSpace::save_marks() {
1964   assert(Thread::current()->is_VM_thread(),
1965          "Global variable should only be set when single-threaded");
1966   // Mark the "end" of the used space at the time of this call;
1967   // note, however, that promoted objects from this point
1968   // on are tracked in the _promoInfo below.
1969   set_saved_mark_word(unallocated_block());
1970 #ifdef ASSERT
1971   // Check the sanity of save_marks() etc.
1972   MemRegion ur    = used_region();
1973   MemRegion urasm = used_region_at_save_marks();
1974   assert(ur.contains(urasm),
1975          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1976                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1977                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1978 #endif
1979   // inform allocator that promotions should be tracked.
1980   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1981   _promoInfo.startTrackingPromotions();
1982 }
1983 
1984 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1985   assert(_promoInfo.tracking(), "No preceding save_marks?");
1986   assert(SharedHeap::heap()->n_par_threads() == 0,
1987          "Shouldn't be called if using parallel gc.");
1988   return _promoInfo.noPromotions();
1989 }
1990 
1991 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1992                                                                             \
1993 void CompactibleFreeListSpace::                                             \
1994 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1995   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1996          "Shouldn't be called (yet) during parallel part of gc.");          \
1997   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1998   /*                                                                        \
1999    * This also restores any displaced headers and removes the elements from \
2000    * the iteration set as they are processed, so that we have a clean slate \
2001    * at the end of the iteration. Note, thus, that if new objects are       \
2002    * promoted as a result of the iteration they are iterated over as well.  \
2003    */                                                                       \
2004   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2005 }
2006 
2007 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2008 
2009 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2010   return _smallLinearAllocBlock._word_size == 0;
2011 }
2012 
2013 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2014   // Fix up linear allocation blocks to look like free blocks
2015   repairLinearAllocBlock(&_smallLinearAllocBlock);
2016 }
2017 
2018 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2019   assert_locked();
2020   if (blk->_ptr != NULL) {
2021     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2022            "Minimum block size requirement");
2023     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2024     fc->set_size(blk->_word_size);
2025     fc->link_prev(NULL);   // mark as free
2026     fc->dontCoalesce();
2027     assert(fc->is_free(), "just marked it free");
2028     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2029   }
2030 }
2031 
2032 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2033   assert_locked();
2034   if (_smallLinearAllocBlock._ptr == NULL) {
2035     assert(_smallLinearAllocBlock._word_size == 0,
2036       "Size of linAB should be zero if the ptr is NULL");
2037     // Reset the linAB refill and allocation size limit.
2038     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2039   }
2040   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2041 }
2042 
2043 void
2044 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2045   assert_locked();
2046   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2047          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2048          "blk invariant");
2049   if (blk->_ptr == NULL) {
2050     refillLinearAllocBlock(blk);
2051   }
2052   if (PrintMiscellaneous && Verbose) {
2053     if (blk->_word_size == 0) {
2054       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2055     }
2056   }
2057 }
2058 
2059 void
2060 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2061   assert_locked();
2062   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2063          "linear allocation block should be empty");
2064   FreeChunk* fc;
2065   if (blk->_refillSize < SmallForDictionary &&
2066       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2067     // A linAB's strategy might be to use small sizes to reduce
2068     // fragmentation but still get the benefits of allocation from a
2069     // linAB.
2070   } else {
2071     fc = getChunkFromDictionary(blk->_refillSize);
2072   }
2073   if (fc != NULL) {
2074     blk->_ptr  = (HeapWord*)fc;
2075     blk->_word_size = fc->size();
2076     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2077   }
2078 }
2079 
2080 // Support for concurrent collection policy decisions.
2081 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2082   // In the future we might want to add in fragmentation stats --
2083   // including erosion of the "mountain" into this decision as well.
2084   return !adaptive_freelists() && linearAllocationWouldFail();
2085 }
2086 
2087 // Support for compaction
2088 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2089   scan_and_forward(this, cp);
2090   // Prepare_for_compaction() uses the space between live objects
2091   // so that later phase can skip dead space quickly.  So verification
2092   // of the free lists doesn't work after.
2093 }
2094 
2095 void CompactibleFreeListSpace::adjust_pointers() {
2096   // In other versions of adjust_pointers(), a bail out
2097   // based on the amount of live data in the generation
2098   // (i.e., if 0, bail out) may be used.
2099   // Cannot test used() == 0 here because the free lists have already
2100   // been mangled by the compaction.
2101 
2102   scan_and_adjust_pointers(this);
2103   // See note about verification in prepare_for_compaction().
2104 }
2105 
2106 void CompactibleFreeListSpace::compact() {
2107   scan_and_compact(this);
2108 }
2109 
2110 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2111 // where fbs is free block sizes
2112 double CompactibleFreeListSpace::flsFrag() const {
2113   size_t itabFree = totalSizeInIndexedFreeLists();
2114   double frag = 0.0;
2115   size_t i;
2116 
2117   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2118     double sz  = i;
2119     frag      += _indexedFreeList[i].count() * (sz * sz);
2120   }
2121 
2122   double totFree = itabFree +
2123                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2124   if (totFree > 0) {
2125     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2126             (totFree * totFree));
2127     frag = (double)1.0  - frag;
2128   } else {
2129     assert(frag == 0.0, "Follows from totFree == 0");
2130   }
2131   return frag;
2132 }
2133 
2134 void CompactibleFreeListSpace::beginSweepFLCensus(
2135   float inter_sweep_current,
2136   float inter_sweep_estimate,
2137   float intra_sweep_estimate) {
2138   assert_locked();
2139   size_t i;
2140   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2141     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2142     if (PrintFLSStatistics > 1) {
2143       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2144     }
2145     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2146     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2147     fl->set_before_sweep(fl->count());
2148     fl->set_bfr_surp(fl->surplus());
2149   }
2150   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2151                                     inter_sweep_current,
2152                                     inter_sweep_estimate,
2153                                     intra_sweep_estimate);
2154 }
2155 
2156 void CompactibleFreeListSpace::setFLSurplus() {
2157   assert_locked();
2158   size_t i;
2159   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2160     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2161     fl->set_surplus(fl->count() -
2162                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2163   }
2164 }
2165 
2166 void CompactibleFreeListSpace::setFLHints() {
2167   assert_locked();
2168   size_t i;
2169   size_t h = IndexSetSize;
2170   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2171     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2172     fl->set_hint(h);
2173     if (fl->surplus() > 0) {
2174       h = i;
2175     }
2176   }
2177 }
2178 
2179 void CompactibleFreeListSpace::clearFLCensus() {
2180   assert_locked();
2181   size_t i;
2182   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2183     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2184     fl->set_prev_sweep(fl->count());
2185     fl->set_coal_births(0);
2186     fl->set_coal_deaths(0);
2187     fl->set_split_births(0);
2188     fl->set_split_deaths(0);
2189   }
2190 }
2191 
2192 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2193   if (PrintFLSStatistics > 0) {
2194     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2195     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2196                            p2i(largestAddr));
2197   }
2198   setFLSurplus();
2199   setFLHints();
2200   if (PrintGC && PrintFLSCensus > 0) {
2201     printFLCensus(sweep_count);
2202   }
2203   clearFLCensus();
2204   assert_locked();
2205   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2206 }
2207 
2208 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2209   if (size < SmallForDictionary) {
2210     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2211     return (fl->coal_desired() < 0) ||
2212            ((int)fl->count() > fl->coal_desired());
2213   } else {
2214     return dictionary()->coal_dict_over_populated(size);
2215   }
2216 }
2217 
2218 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2219   assert(size < SmallForDictionary, "Size too large for indexed list");
2220   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2221   fl->increment_coal_births();
2222   fl->increment_surplus();
2223 }
2224 
2225 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2226   assert(size < SmallForDictionary, "Size too large for indexed list");
2227   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2228   fl->increment_coal_deaths();
2229   fl->decrement_surplus();
2230 }
2231 
2232 void CompactibleFreeListSpace::coalBirth(size_t size) {
2233   if (size  < SmallForDictionary) {
2234     smallCoalBirth(size);
2235   } else {
2236     dictionary()->dict_census_update(size,
2237                                    false /* split */,
2238                                    true /* birth */);
2239   }
2240 }
2241 
2242 void CompactibleFreeListSpace::coalDeath(size_t size) {
2243   if(size  < SmallForDictionary) {
2244     smallCoalDeath(size);
2245   } else {
2246     dictionary()->dict_census_update(size,
2247                                    false /* split */,
2248                                    false /* birth */);
2249   }
2250 }
2251 
2252 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2253   assert(size < SmallForDictionary, "Size too large for indexed list");
2254   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2255   fl->increment_split_births();
2256   fl->increment_surplus();
2257 }
2258 
2259 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2260   assert(size < SmallForDictionary, "Size too large for indexed list");
2261   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2262   fl->increment_split_deaths();
2263   fl->decrement_surplus();
2264 }
2265 
2266 void CompactibleFreeListSpace::split_birth(size_t size) {
2267   if (size  < SmallForDictionary) {
2268     smallSplitBirth(size);
2269   } else {
2270     dictionary()->dict_census_update(size,
2271                                    true /* split */,
2272                                    true /* birth */);
2273   }
2274 }
2275 
2276 void CompactibleFreeListSpace::splitDeath(size_t size) {
2277   if (size  < SmallForDictionary) {
2278     smallSplitDeath(size);
2279   } else {
2280     dictionary()->dict_census_update(size,
2281                                    true /* split */,
2282                                    false /* birth */);
2283   }
2284 }
2285 
2286 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2287   size_t to2 = from - to1;
2288   splitDeath(from);
2289   split_birth(to1);
2290   split_birth(to2);
2291 }
2292 
2293 void CompactibleFreeListSpace::print() const {
2294   print_on(tty);
2295 }
2296 
2297 void CompactibleFreeListSpace::prepare_for_verify() {
2298   assert_locked();
2299   repairLinearAllocationBlocks();
2300   // Verify that the SpoolBlocks look like free blocks of
2301   // appropriate sizes... To be done ...
2302 }
2303 
2304 class VerifyAllBlksClosure: public BlkClosure {
2305  private:
2306   const CompactibleFreeListSpace* _sp;
2307   const MemRegion                 _span;
2308   HeapWord*                       _last_addr;
2309   size_t                          _last_size;
2310   bool                            _last_was_obj;
2311   bool                            _last_was_live;
2312 
2313  public:
2314   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2315     MemRegion span) :  _sp(sp), _span(span),
2316                        _last_addr(NULL), _last_size(0),
2317                        _last_was_obj(false), _last_was_live(false) { }
2318 
2319   virtual size_t do_blk(HeapWord* addr) {
2320     size_t res;
2321     bool   was_obj  = false;
2322     bool   was_live = false;
2323     if (_sp->block_is_obj(addr)) {
2324       was_obj = true;
2325       oop p = oop(addr);
2326       guarantee(p->is_oop(), "Should be an oop");
2327       res = _sp->adjustObjectSize(p->size());
2328       if (_sp->obj_is_alive(addr)) {
2329         was_live = true;
2330         p->verify();
2331       }
2332     } else {
2333       FreeChunk* fc = (FreeChunk*)addr;
2334       res = fc->size();
2335       if (FLSVerifyLists && !fc->cantCoalesce()) {
2336         guarantee(_sp->verify_chunk_in_free_list(fc),
2337                   "Chunk should be on a free list");
2338       }
2339     }
2340     if (res == 0) {
2341       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2342       gclog_or_tty->print_cr(
2343         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2344         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2345         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2346         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2347       _sp->print_on(gclog_or_tty);
2348       guarantee(false, "Seppuku!");
2349     }
2350     _last_addr = addr;
2351     _last_size = res;
2352     _last_was_obj  = was_obj;
2353     _last_was_live = was_live;
2354     return res;
2355   }
2356 };
2357 
2358 class VerifyAllOopsClosure: public OopClosure {
2359  private:
2360   const CMSCollector*             _collector;
2361   const CompactibleFreeListSpace* _sp;
2362   const MemRegion                 _span;
2363   const bool                      _past_remark;
2364   const CMSBitMap*                _bit_map;
2365 
2366  protected:
2367   void do_oop(void* p, oop obj) {
2368     if (_span.contains(obj)) { // the interior oop points into CMS heap
2369       if (!_span.contains(p)) { // reference from outside CMS heap
2370         // Should be a valid object; the first disjunct below allows
2371         // us to sidestep an assertion in block_is_obj() that insists
2372         // that p be in _sp. Note that several generations (and spaces)
2373         // are spanned by _span (CMS heap) above.
2374         guarantee(!_sp->is_in_reserved(obj) ||
2375                   _sp->block_is_obj((HeapWord*)obj),
2376                   "Should be an object");
2377         guarantee(obj->is_oop(), "Should be an oop");
2378         obj->verify();
2379         if (_past_remark) {
2380           // Remark has been completed, the object should be marked
2381           _bit_map->isMarked((HeapWord*)obj);
2382         }
2383       } else { // reference within CMS heap
2384         if (_past_remark) {
2385           // Remark has been completed -- so the referent should have
2386           // been marked, if referring object is.
2387           if (_bit_map->isMarked(_collector->block_start(p))) {
2388             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2389           }
2390         }
2391       }
2392     } else if (_sp->is_in_reserved(p)) {
2393       // the reference is from FLS, and points out of FLS
2394       guarantee(obj->is_oop(), "Should be an oop");
2395       obj->verify();
2396     }
2397   }
2398 
2399   template <class T> void do_oop_work(T* p) {
2400     T heap_oop = oopDesc::load_heap_oop(p);
2401     if (!oopDesc::is_null(heap_oop)) {
2402       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2403       do_oop(p, obj);
2404     }
2405   }
2406 
2407  public:
2408   VerifyAllOopsClosure(const CMSCollector* collector,
2409     const CompactibleFreeListSpace* sp, MemRegion span,
2410     bool past_remark, CMSBitMap* bit_map) :
2411     _collector(collector), _sp(sp), _span(span),
2412     _past_remark(past_remark), _bit_map(bit_map) { }
2413 
2414   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2415   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2416 };
2417 
2418 void CompactibleFreeListSpace::verify() const {
2419   assert_lock_strong(&_freelistLock);
2420   verify_objects_initialized();
2421   MemRegion span = _collector->_span;
2422   bool past_remark = (_collector->abstract_state() ==
2423                       CMSCollector::Sweeping);
2424 
2425   ResourceMark rm;
2426   HandleMark  hm;
2427 
2428   // Check integrity of CFL data structures
2429   _promoInfo.verify();
2430   _dictionary->verify();
2431   if (FLSVerifyIndexTable) {
2432     verifyIndexedFreeLists();
2433   }
2434   // Check integrity of all objects and free blocks in space
2435   {
2436     VerifyAllBlksClosure cl(this, span);
2437     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2438   }
2439   // Check that all references in the heap to FLS
2440   // are to valid objects in FLS or that references in
2441   // FLS are to valid objects elsewhere in the heap
2442   if (FLSVerifyAllHeapReferences)
2443   {
2444     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2445       _collector->markBitMap());
2446     CollectedHeap* ch = Universe::heap();
2447 
2448     // Iterate over all oops in the heap. Uses the _no_header version
2449     // since we are not interested in following the klass pointers.
2450     ch->oop_iterate_no_header(&cl);
2451   }
2452 
2453   if (VerifyObjectStartArray) {
2454     // Verify the block offset table
2455     _bt.verify();
2456   }
2457 }
2458 
2459 #ifndef PRODUCT
2460 void CompactibleFreeListSpace::verifyFreeLists() const {
2461   if (FLSVerifyLists) {
2462     _dictionary->verify();
2463     verifyIndexedFreeLists();
2464   } else {
2465     if (FLSVerifyDictionary) {
2466       _dictionary->verify();
2467     }
2468     if (FLSVerifyIndexTable) {
2469       verifyIndexedFreeLists();
2470     }
2471   }
2472 }
2473 #endif
2474 
2475 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2476   size_t i = 0;
2477   for (; i < IndexSetStart; i++) {
2478     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2479   }
2480   for (; i < IndexSetSize; i++) {
2481     verifyIndexedFreeList(i);
2482   }
2483 }
2484 
2485 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2486   FreeChunk* fc   =  _indexedFreeList[size].head();
2487   FreeChunk* tail =  _indexedFreeList[size].tail();
2488   size_t    num = _indexedFreeList[size].count();
2489   size_t      n = 0;
2490   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2491             "Slot should have been empty");
2492   for (; fc != NULL; fc = fc->next(), n++) {
2493     guarantee(fc->size() == size, "Size inconsistency");
2494     guarantee(fc->is_free(), "!free?");
2495     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2496     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2497   }
2498   guarantee(n == num, "Incorrect count");
2499 }
2500 
2501 #ifndef PRODUCT
2502 void CompactibleFreeListSpace::check_free_list_consistency() const {
2503   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2504     "Some sizes can't be allocated without recourse to"
2505     " linear allocation buffers");
2506   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2507     "else MIN_TREE_CHUNK_SIZE is wrong");
2508   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2509   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2510 }
2511 #endif
2512 
2513 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2514   assert_lock_strong(&_freelistLock);
2515   AdaptiveFreeList<FreeChunk> total;
2516   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2517   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2518   size_t total_free = 0;
2519   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2520     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2521     total_free += fl->count() * fl->size();
2522     if (i % (40*IndexSetStride) == 0) {
2523       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2524     }
2525     fl->print_on(gclog_or_tty);
2526     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2527     total.set_surplus(    total.surplus()     + fl->surplus()    );
2528     total.set_desired(    total.desired()     + fl->desired()    );
2529     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2530     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2531     total.set_count(      total.count()       + fl->count()      );
2532     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2533     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2534     total.set_split_births(total.split_births() + fl->split_births());
2535     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2536   }
2537   total.print_on(gclog_or_tty, "TOTAL");
2538   gclog_or_tty->print_cr("Total free in indexed lists "
2539                          SIZE_FORMAT " words", total_free);
2540   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2541     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2542             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2543     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2544   _dictionary->print_dict_census();
2545 }
2546 
2547 ///////////////////////////////////////////////////////////////////////////
2548 // CFLS_LAB
2549 ///////////////////////////////////////////////////////////////////////////
2550 
2551 #define VECTOR_257(x)                                                                                  \
2552   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2553   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2559      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2560      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2561      x }
2562 
2563 // Initialize with default setting for CMS, _not_
2564 // generic OldPLABSize, whose static default is different; if overridden at the
2565 // command-line, this will get reinitialized via a call to
2566 // modify_initialization() below.
2567 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2568   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2569 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2570 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2571 
2572 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2573   _cfls(cfls)
2574 {
2575   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2576   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2577        i < CompactibleFreeListSpace::IndexSetSize;
2578        i += CompactibleFreeListSpace::IndexSetStride) {
2579     _indexedFreeList[i].set_size(i);
2580     _num_blocks[i] = 0;
2581   }
2582 }
2583 
2584 static bool _CFLS_LAB_modified = false;
2585 
2586 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2587   assert(!_CFLS_LAB_modified, "Call only once");
2588   _CFLS_LAB_modified = true;
2589   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2590        i < CompactibleFreeListSpace::IndexSetSize;
2591        i += CompactibleFreeListSpace::IndexSetStride) {
2592     _blocks_to_claim[i].modify(n, wt, true /* force */);
2593   }
2594 }
2595 
2596 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2597   FreeChunk* res;
2598   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2599   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2600     // This locking manages sync with other large object allocations.
2601     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2602                     Mutex::_no_safepoint_check_flag);
2603     res = _cfls->getChunkFromDictionaryExact(word_sz);
2604     if (res == NULL) return NULL;
2605   } else {
2606     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2607     if (fl->count() == 0) {
2608       // Attempt to refill this local free list.
2609       get_from_global_pool(word_sz, fl);
2610       // If it didn't work, give up.
2611       if (fl->count() == 0) return NULL;
2612     }
2613     res = fl->get_chunk_at_head();
2614     assert(res != NULL, "Why was count non-zero?");
2615   }
2616   res->markNotFree();
2617   assert(!res->is_free(), "shouldn't be marked free");
2618   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2619   // mangle a just allocated object with a distinct pattern.
2620   debug_only(res->mangleAllocated(word_sz));
2621   return (HeapWord*)res;
2622 }
2623 
2624 // Get a chunk of blocks of the right size and update related
2625 // book-keeping stats
2626 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2627   // Get the #blocks we want to claim
2628   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2629   assert(n_blks > 0, "Error");
2630   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2631   // In some cases, when the application has a phase change,
2632   // there may be a sudden and sharp shift in the object survival
2633   // profile, and updating the counts at the end of a scavenge
2634   // may not be quick enough, giving rise to large scavenge pauses
2635   // during these phase changes. It is beneficial to detect such
2636   // changes on-the-fly during a scavenge and avoid such a phase-change
2637   // pothole. The following code is a heuristic attempt to do that.
2638   // It is protected by a product flag until we have gained
2639   // enough experience with this heuristic and fine-tuned its behavior.
2640   // WARNING: This might increase fragmentation if we overreact to
2641   // small spikes, so some kind of historical smoothing based on
2642   // previous experience with the greater reactivity might be useful.
2643   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2644   // default.
2645   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2646     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2647     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2648     n_blks = MIN2(n_blks, CMSOldPLABMax);
2649   }
2650   assert(n_blks > 0, "Error");
2651   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2652   // Update stats table entry for this block size
2653   _num_blocks[word_sz] += fl->count();
2654 }
2655 
2656 void CFLS_LAB::compute_desired_plab_size() {
2657   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2658        i < CompactibleFreeListSpace::IndexSetSize;
2659        i += CompactibleFreeListSpace::IndexSetStride) {
2660     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2661            "Counter inconsistency");
2662     if (_global_num_workers[i] > 0) {
2663       // Need to smooth wrt historical average
2664       if (ResizeOldPLAB) {
2665         _blocks_to_claim[i].sample(
2666           MAX2((size_t)CMSOldPLABMin,
2667           MIN2((size_t)CMSOldPLABMax,
2668                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2669       }
2670       // Reset counters for next round
2671       _global_num_workers[i] = 0;
2672       _global_num_blocks[i] = 0;
2673       if (PrintOldPLAB) {
2674         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2675                                i, (size_t)_blocks_to_claim[i].average());
2676       }
2677     }
2678   }
2679 }
2680 
2681 // If this is changed in the future to allow parallel
2682 // access, one would need to take the FL locks and,
2683 // depending on how it is used, stagger access from
2684 // parallel threads to reduce contention.
2685 void CFLS_LAB::retire(int tid) {
2686   // We run this single threaded with the world stopped;
2687   // so no need for locks and such.
2688   NOT_PRODUCT(Thread* t = Thread::current();)
2689   assert(Thread::current()->is_VM_thread(), "Error");
2690   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2691        i < CompactibleFreeListSpace::IndexSetSize;
2692        i += CompactibleFreeListSpace::IndexSetStride) {
2693     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2694            "Can't retire more than what we obtained");
2695     if (_num_blocks[i] > 0) {
2696       size_t num_retire =  _indexedFreeList[i].count();
2697       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2698       {
2699         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2700         //                Mutex::_no_safepoint_check_flag);
2701 
2702         // Update globals stats for num_blocks used
2703         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2704         _global_num_workers[i]++;
2705         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2706         if (num_retire > 0) {
2707           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2708           // Reset this list.
2709           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2710           _indexedFreeList[i].set_size(i);
2711         }
2712       }
2713       if (PrintOldPLAB) {
2714         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2715                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2716       }
2717       // Reset stats for next round
2718       _num_blocks[i]         = 0;
2719     }
2720   }
2721 }
2722 
2723 // Used by par_get_chunk_of_blocks() for the chunks from the
2724 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2725 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2726 // to the free list "fl".  "n" is the maximum number of chunks to
2727 // be added to "fl".
2728 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2729 
2730   // We'll try all multiples of word_sz in the indexed set, starting with
2731   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2732   // then try getting a big chunk and splitting it.
2733   {
2734     bool found;
2735     int  k;
2736     size_t cur_sz;
2737     for (k = 1, cur_sz = k * word_sz, found = false;
2738          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2739          (CMSSplitIndexedFreeListBlocks || k <= 1);
2740          k++, cur_sz = k * word_sz) {
2741       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2742       fl_for_cur_sz.set_size(cur_sz);
2743       {
2744         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2745                         Mutex::_no_safepoint_check_flag);
2746         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2747         if (gfl->count() != 0) {
2748           // nn is the number of chunks of size cur_sz that
2749           // we'd need to split k-ways each, in order to create
2750           // "n" chunks of size word_sz each.
2751           const size_t nn = MAX2(n/k, (size_t)1);
2752           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2753           found = true;
2754           if (k > 1) {
2755             // Update split death stats for the cur_sz-size blocks list:
2756             // we increment the split death count by the number of blocks
2757             // we just took from the cur_sz-size blocks list and which
2758             // we will be splitting below.
2759             ssize_t deaths = gfl->split_deaths() +
2760                              fl_for_cur_sz.count();
2761             gfl->set_split_deaths(deaths);
2762           }
2763         }
2764       }
2765       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2766       if (found) {
2767         if (k == 1) {
2768           fl->prepend(&fl_for_cur_sz);
2769         } else {
2770           // Divide each block on fl_for_cur_sz up k ways.
2771           FreeChunk* fc;
2772           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2773             // Must do this in reverse order, so that anybody attempting to
2774             // access the main chunk sees it as a single free block until we
2775             // change it.
2776             size_t fc_size = fc->size();
2777             assert(fc->is_free(), "Error");
2778             for (int i = k-1; i >= 0; i--) {
2779               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2780               assert((i != 0) ||
2781                         ((fc == ffc) && ffc->is_free() &&
2782                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2783                         "Counting error");
2784               ffc->set_size(word_sz);
2785               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2786               ffc->link_next(NULL);
2787               // Above must occur before BOT is updated below.
2788               OrderAccess::storestore();
2789               // splitting from the right, fc_size == i * word_sz
2790               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2791               fc_size -= word_sz;
2792               assert(fc_size == i*word_sz, "Error");
2793               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2794               _bt.verify_single_block((HeapWord*)fc, fc_size);
2795               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2796               // Push this on "fl".
2797               fl->return_chunk_at_head(ffc);
2798             }
2799             // TRAP
2800             assert(fl->tail()->next() == NULL, "List invariant.");
2801           }
2802         }
2803         // Update birth stats for this block size.
2804         size_t num = fl->count();
2805         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2806                         Mutex::_no_safepoint_check_flag);
2807         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2808         _indexedFreeList[word_sz].set_split_births(births);
2809         return true;
2810       }
2811     }
2812     return found;
2813   }
2814 }
2815 
2816 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2817 
2818   FreeChunk* fc = NULL;
2819   FreeChunk* rem_fc = NULL;
2820   size_t rem;
2821   {
2822     MutexLockerEx x(parDictionaryAllocLock(),
2823                     Mutex::_no_safepoint_check_flag);
2824     while (n > 0) {
2825       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2826                                   FreeBlockDictionary<FreeChunk>::atLeast);
2827       if (fc != NULL) {
2828         break;
2829       } else {
2830         n--;
2831       }
2832     }
2833     if (fc == NULL) return NULL;
2834     // Otherwise, split up that block.
2835     assert((ssize_t)n >= 1, "Control point invariant");
2836     assert(fc->is_free(), "Error: should be a free block");
2837     _bt.verify_single_block((HeapWord*)fc, fc->size());
2838     const size_t nn = fc->size() / word_sz;
2839     n = MIN2(nn, n);
2840     assert((ssize_t)n >= 1, "Control point invariant");
2841     rem = fc->size() - n * word_sz;
2842     // If there is a remainder, and it's too small, allocate one fewer.
2843     if (rem > 0 && rem < MinChunkSize) {
2844       n--; rem += word_sz;
2845     }
2846     // Note that at this point we may have n == 0.
2847     assert((ssize_t)n >= 0, "Control point invariant");
2848 
2849     // If n is 0, the chunk fc that was found is not large
2850     // enough to leave a viable remainder.  We are unable to
2851     // allocate even one block.  Return fc to the
2852     // dictionary and return, leaving "fl" empty.
2853     if (n == 0) {
2854       returnChunkToDictionary(fc);
2855       return NULL;
2856     }
2857 
2858     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2859     dictionary()->dict_census_update(fc->size(),
2860                                      true /*split*/,
2861                                      false /*birth*/);
2862 
2863     // First return the remainder, if any.
2864     // Note that we hold the lock until we decide if we're going to give
2865     // back the remainder to the dictionary, since a concurrent allocation
2866     // may otherwise see the heap as empty.  (We're willing to take that
2867     // hit if the block is a small block.)
2868     if (rem > 0) {
2869       size_t prefix_size = n * word_sz;
2870       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2871       rem_fc->set_size(rem);
2872       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2873       rem_fc->link_next(NULL);
2874       // Above must occur before BOT is updated below.
2875       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2876       OrderAccess::storestore();
2877       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2878       assert(fc->is_free(), "Error");
2879       fc->set_size(prefix_size);
2880       if (rem >= IndexSetSize) {
2881         returnChunkToDictionary(rem_fc);
2882         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2883         rem_fc = NULL;
2884       }
2885       // Otherwise, return it to the small list below.
2886     }
2887   }
2888   if (rem_fc != NULL) {
2889     MutexLockerEx x(_indexedFreeListParLocks[rem],
2890                     Mutex::_no_safepoint_check_flag);
2891     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2892     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2893     smallSplitBirth(rem);
2894   }
2895   assert(n * word_sz == fc->size(),
2896     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2897     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2898     fc->size(), n, word_sz));
2899   return fc;
2900 }
2901 
2902 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2903 
2904   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2905 
2906   if (fc == NULL) {
2907     return;
2908   }
2909 
2910   size_t n = fc->size() / word_sz;
2911 
2912   assert((ssize_t)n > 0, "Consistency");
2913   // Now do the splitting up.
2914   // Must do this in reverse order, so that anybody attempting to
2915   // access the main chunk sees it as a single free block until we
2916   // change it.
2917   size_t fc_size = n * word_sz;
2918   // All but first chunk in this loop
2919   for (ssize_t i = n-1; i > 0; i--) {
2920     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2921     ffc->set_size(word_sz);
2922     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2923     ffc->link_next(NULL);
2924     // Above must occur before BOT is updated below.
2925     OrderAccess::storestore();
2926     // splitting from the right, fc_size == (n - i + 1) * wordsize
2927     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2928     fc_size -= word_sz;
2929     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2930     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2931     _bt.verify_single_block((HeapWord*)fc, fc_size);
2932     // Push this on "fl".
2933     fl->return_chunk_at_head(ffc);
2934   }
2935   // First chunk
2936   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2937   // The blocks above should show their new sizes before the first block below
2938   fc->set_size(word_sz);
2939   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2940   fc->link_next(NULL);
2941   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2942   _bt.verify_single_block((HeapWord*)fc, fc->size());
2943   fl->return_chunk_at_head(fc);
2944 
2945   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2946   {
2947     // Update the stats for this block size.
2948     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2949                     Mutex::_no_safepoint_check_flag);
2950     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2951     _indexedFreeList[word_sz].set_split_births(births);
2952     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2953     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2954   }
2955 
2956   // TRAP
2957   assert(fl->tail()->next() == NULL, "List invariant.");
2958 }
2959 
2960 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2961   assert(fl->count() == 0, "Precondition.");
2962   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2963          "Precondition");
2964 
2965   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2966     // Got it
2967     return;
2968   }
2969 
2970   // Otherwise, we'll split a block from the dictionary.
2971   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2972 }
2973 
2974 // Set up the space's par_seq_tasks structure for work claiming
2975 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2976 // XXX Need to suitably abstract and generalize this and the next
2977 // method into one.
2978 void
2979 CompactibleFreeListSpace::
2980 initialize_sequential_subtasks_for_rescan(int n_threads) {
2981   // The "size" of each task is fixed according to rescan_task_size.
2982   assert(n_threads > 0, "Unexpected n_threads argument");
2983   const size_t task_size = rescan_task_size();
2984   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2985   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2986   assert(n_tasks == 0 ||
2987          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2988           (used_region().start() + n_tasks*task_size >= used_region().end())),
2989          "n_tasks calculation incorrect");
2990   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2991   assert(!pst->valid(), "Clobbering existing data?");
2992   // Sets the condition for completion of the subtask (how many threads
2993   // need to finish in order to be done).
2994   pst->set_n_threads(n_threads);
2995   pst->set_n_tasks((int)n_tasks);
2996 }
2997 
2998 // Set up the space's par_seq_tasks structure for work claiming
2999 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3000 void
3001 CompactibleFreeListSpace::
3002 initialize_sequential_subtasks_for_marking(int n_threads,
3003                                            HeapWord* low) {
3004   // The "size" of each task is fixed according to rescan_task_size.
3005   assert(n_threads > 0, "Unexpected n_threads argument");
3006   const size_t task_size = marking_task_size();
3007   assert(task_size > CardTableModRefBS::card_size_in_words &&
3008          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3009          "Otherwise arithmetic below would be incorrect");
3010   MemRegion span = _gen->reserved();
3011   if (low != NULL) {
3012     if (span.contains(low)) {
3013       // Align low down to  a card boundary so that
3014       // we can use block_offset_careful() on span boundaries.
3015       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3016                                  CardTableModRefBS::card_size);
3017       // Clip span prefix at aligned_low
3018       span = span.intersection(MemRegion(aligned_low, span.end()));
3019     } else if (low > span.end()) {
3020       span = MemRegion(low, low);  // Null region
3021     } // else use entire span
3022   }
3023   assert(span.is_empty() ||
3024          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3025         "span should start at a card boundary");
3026   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3027   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3028   assert(n_tasks == 0 ||
3029          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3030           (span.start() + n_tasks*task_size >= span.end())),
3031          "n_tasks calculation incorrect");
3032   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3033   assert(!pst->valid(), "Clobbering existing data?");
3034   // Sets the condition for completion of the subtask (how many threads
3035   // need to finish in order to be done).
3036   pst->set_n_threads(n_threads);
3037   pst->set_n_tasks((int)n_tasks);
3038 }