1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61   #define numQuanta(x,y) ((x+y-1)/y)
  62   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
  63 
  64   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  65   IndexSetStart  = MinChunkSize;
  66   IndexSetStride = MinObjAlignment;
  67 }
  68 
  69 // Constructor
  70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  71   MemRegion mr, bool use_adaptive_freelists,
  72   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  73   _dictionaryChoice(dictionaryChoice),
  74   _adaptive_freelists(use_adaptive_freelists),
  75   _bt(bs, mr),
  76   // free list locks are in the range of values taken by _lockRank
  77   // This range currently is [_leaf+2, _leaf+3]
  78   // Note: this requires that CFLspace c'tors
  79   // are called serially in the order in which the locks are
  80   // are acquired in the program text. This is true today.
  81   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  82   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  83                           "CompactibleFreeListSpace._dict_par_lock", true),
  84   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  85                     CMSRescanMultiple),
  86   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  87                     CMSConcMarkMultiple),
  88   _collector(NULL)
  89 {
  90   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  91     "FreeChunk is larger than expected");
  92   _bt.set_space(this);
  93   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  94   // We have all of "mr", all of which we place in the dictionary
  95   // as one big chunk. We'll need to decide here which of several
  96   // possible alternative dictionary implementations to use. For
  97   // now the choice is easy, since we have only one working
  98   // implementation, namely, the simple binary tree (splaying
  99   // temporarily disabled).
 100   switch (dictionaryChoice) {
 101     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 102     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 103     default:
 104       warning("dictionaryChoice: selected option not understood; using"
 105               " default BinaryTreeDictionary implementation instead.");
 106     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 107       _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
 108       break;
 109   }
 110   assert(_dictionary != NULL, "CMS dictionary initialization");
 111   // The indexed free lists are initially all empty and are lazily
 112   // filled in on demand. Initialize the array elements to NULL.
 113   initializeIndexedFreeListArray();
 114 
 115   // Not using adaptive free lists assumes that allocation is first
 116   // from the linAB's.  Also a cms perm gen which can be compacted
 117   // has to have the klass's klassKlass allocated at a lower
 118   // address in the heap than the klass so that the klassKlass is
 119   // moved to its new location before the klass is moved.
 120   // Set the _refillSize for the linear allocation blocks
 121   if (!use_adaptive_freelists) {
 122     FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
 123     // The small linAB initially has all the space and will allocate
 124     // a chunk of any size.
 125     HeapWord* addr = (HeapWord*) fc;
 126     _smallLinearAllocBlock.set(addr, fc->size() ,
 127       1024*SmallForLinearAlloc, fc->size());
 128     // Note that _unallocated_block is not updated here.
 129     // Allocations from the linear allocation block should
 130     // update it.
 131   } else {
 132     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 133                                SmallForLinearAlloc);
 134   }
 135   // CMSIndexedFreeListReplenish should be at least 1
 136   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 137   _promoInfo.setSpace(this);
 138   if (UseCMSBestFit) {
 139     _fitStrategy = FreeBlockBestFitFirst;
 140   } else {
 141     _fitStrategy = FreeBlockStrategyNone;
 142   }
 143   check_free_list_consistency();
 144 
 145   // Initialize locks for parallel case.
 146 
 147   if (CollectedHeap::use_parallel_gc_threads()) {
 148     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 149       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 150                                               "a freelist par lock",
 151                                               true);
 152       if (_indexedFreeListParLocks[i] == NULL)
 153         vm_exit_during_initialization("Could not allocate a par lock");
 154       DEBUG_ONLY(
 155         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 156       )
 157     }
 158     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 159   }
 160 }
 161 
 162 // Like CompactibleSpace forward() but always calls cross_threshold() to
 163 // update the block offset table.  Removed initialize_threshold call because
 164 // CFLS does not use a block offset array for contiguous spaces.
 165 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 166                                     CompactPoint* cp, HeapWord* compact_top) {
 167   // q is alive
 168   // First check if we should switch compaction space
 169   assert(this == cp->space, "'this' should be current compaction space.");
 170   size_t compaction_max_size = pointer_delta(end(), compact_top);
 171   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 172     "virtual adjustObjectSize_v() method is not correct");
 173   size_t adjusted_size = adjustObjectSize(size);
 174   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 175          "no small fragments allowed");
 176   assert(minimum_free_block_size() == MinChunkSize,
 177          "for de-virtualized reference below");
 178   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 179   if (adjusted_size + MinChunkSize > compaction_max_size &&
 180       adjusted_size != compaction_max_size) {
 181     do {
 182       // switch to next compaction space
 183       cp->space->set_compaction_top(compact_top);
 184       cp->space = cp->space->next_compaction_space();
 185       if (cp->space == NULL) {
 186         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 187         assert(cp->gen != NULL, "compaction must succeed");
 188         cp->space = cp->gen->first_compaction_space();
 189         assert(cp->space != NULL, "generation must have a first compaction space");
 190       }
 191       compact_top = cp->space->bottom();
 192       cp->space->set_compaction_top(compact_top);
 193       // The correct adjusted_size may not be the same as that for this method
 194       // (i.e., cp->space may no longer be "this" so adjust the size again.
 195       // Use the virtual method which is not used above to save the virtual
 196       // dispatch.
 197       adjusted_size = cp->space->adjust_object_size_v(size);
 198       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 199       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 200     } while (adjusted_size > compaction_max_size);
 201   }
 202 
 203   // store the forwarding pointer into the mark word
 204   if ((HeapWord*)q != compact_top) {
 205     q->forward_to(oop(compact_top));
 206     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 207   } else {
 208     // if the object isn't moving we can just set the mark to the default
 209     // mark and handle it specially later on.
 210     q->init_mark();
 211     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 212   }
 213 
 214   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
 215   compact_top += adjusted_size;
 216 
 217   // we need to update the offset table so that the beginnings of objects can be
 218   // found during scavenge.  Note that we are updating the offset table based on
 219   // where the object will be once the compaction phase finishes.
 220 
 221   // Always call cross_threshold().  A contiguous space can only call it when
 222   // the compaction_top exceeds the current threshold but not for an
 223   // non-contiguous space.
 224   cp->threshold =
 225     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 226   return compact_top;
 227 }
 228 
 229 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 230 // and use of single_block instead of alloc_block.  The name here is not really
 231 // appropriate - maybe a more general name could be invented for both the
 232 // contiguous and noncontiguous spaces.
 233 
 234 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 235   _bt.single_block(start, the_end);
 236   return end();
 237 }
 238 
 239 // Initialize them to NULL.
 240 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 241   for (size_t i = 0; i < IndexSetSize; i++) {
 242     // Note that on platforms where objects are double word aligned,
 243     // the odd array elements are not used.  It is convenient, however,
 244     // to map directly from the object size to the array element.
 245     _indexedFreeList[i].reset(IndexSetSize);
 246     _indexedFreeList[i].set_size(i);
 247     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 248     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 249     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 250     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 251   }
 252 }
 253 
 254 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 255   for (size_t i = 1; i < IndexSetSize; i++) {
 256     assert(_indexedFreeList[i].size() == (size_t) i,
 257       "Indexed free list sizes are incorrect");
 258     _indexedFreeList[i].reset(IndexSetSize);
 259     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 260     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 261     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 262     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 263   }
 264 }
 265 
 266 void CompactibleFreeListSpace::reset(MemRegion mr) {
 267   resetIndexedFreeListArray();
 268   dictionary()->reset();
 269   if (BlockOffsetArrayUseUnallocatedBlock) {
 270     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 271     // Everything's allocated until proven otherwise.
 272     _bt.set_unallocated_block(end());
 273   }
 274   if (!mr.is_empty()) {
 275     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 276     _bt.single_block(mr.start(), mr.word_size());
 277     FreeChunk* fc = (FreeChunk*) mr.start();
 278     fc->set_size(mr.word_size());
 279     if (mr.word_size() >= IndexSetSize ) {
 280       returnChunkToDictionary(fc);
 281     } else {
 282       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 283       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 284     }
 285   }
 286   _promoInfo.reset();
 287   _smallLinearAllocBlock._ptr = NULL;
 288   _smallLinearAllocBlock._word_size = 0;
 289 }
 290 
 291 void CompactibleFreeListSpace::reset_after_compaction() {
 292   // Reset the space to the new reality - one free chunk.
 293   MemRegion mr(compaction_top(), end());
 294   reset(mr);
 295   // Now refill the linear allocation block(s) if possible.
 296   if (_adaptive_freelists) {
 297     refillLinearAllocBlocksIfNeeded();
 298   } else {
 299     // Place as much of mr in the linAB as we can get,
 300     // provided it was big enough to go into the dictionary.
 301     FreeChunk* fc = dictionary()->find_largest_dict();
 302     if (fc != NULL) {
 303       assert(fc->size() == mr.word_size(),
 304              "Why was the chunk broken up?");
 305       removeChunkFromDictionary(fc);
 306       HeapWord* addr = (HeapWord*) fc;
 307       _smallLinearAllocBlock.set(addr, fc->size() ,
 308         1024*SmallForLinearAlloc, fc->size());
 309       // Note that _unallocated_block is not updated here.
 310     }
 311   }
 312 }
 313 
 314 // Walks the entire dictionary, returning a coterminal
 315 // chunk, if it exists. Use with caution since it involves
 316 // a potentially complete walk of a potentially large tree.
 317 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 318 
 319   assert_lock_strong(&_freelistLock);
 320 
 321   return dictionary()->find_chunk_ends_at(end());
 322 }
 323 
 324 
 325 #ifndef PRODUCT
 326 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 327   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 328     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 329   }
 330 }
 331 
 332 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 333   size_t sum = 0;
 334   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 335     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 336   }
 337   return sum;
 338 }
 339 
 340 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 341   size_t count = 0;
 342   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 343     debug_only(
 344       ssize_t total_list_count = 0;
 345       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 346          fc = fc->next()) {
 347         total_list_count++;
 348       }
 349       assert(total_list_count ==  _indexedFreeList[i].count(),
 350         "Count in list is incorrect");
 351     )
 352     count += _indexedFreeList[i].count();
 353   }
 354   return count;
 355 }
 356 
 357 size_t CompactibleFreeListSpace::totalCount() {
 358   size_t num = totalCountInIndexedFreeLists();
 359   num +=  dictionary()->total_count();
 360   if (_smallLinearAllocBlock._word_size != 0) {
 361     num++;
 362   }
 363   return num;
 364 }
 365 #endif
 366 
 367 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 368   FreeChunk* fc = (FreeChunk*) p;
 369   return fc->is_free();
 370 }
 371 
 372 size_t CompactibleFreeListSpace::used() const {
 373   return capacity() - free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::free() const {
 377   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 378   // if you do this while the structures are in flux you
 379   // may get an approximate answer only; for instance
 380   // because there is concurrent allocation either
 381   // directly by mutators or for promotion during a GC.
 382   // It's "MT-safe", however, in the sense that you are guaranteed
 383   // not to crash and burn, for instance, because of walking
 384   // pointers that could disappear as you were walking them.
 385   // The approximation is because the various components
 386   // that are read below are not read atomically (and
 387   // further the computation of totalSizeInIndexedFreeLists()
 388   // is itself a non-atomic computation. The normal use of
 389   // this is during a resize operation at the end of GC
 390   // and at that time you are guaranteed to get the
 391   // correct actual value. However, for instance, this is
 392   // also read completely asynchronously by the "perf-sampler"
 393   // that supports jvmstat, and you are apt to see the values
 394   // flicker in such cases.
 395   assert(_dictionary != NULL, "No _dictionary?");
 396   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 397           totalSizeInIndexedFreeLists() +
 398           _smallLinearAllocBlock._word_size) * HeapWordSize;
 399 }
 400 
 401 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 402   assert(_dictionary != NULL, "No _dictionary?");
 403   assert_locked();
 404   size_t res = _dictionary->max_chunk_size();
 405   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 406                        (size_t) SmallForLinearAlloc - 1));
 407   // XXX the following could potentially be pretty slow;
 408   // should one, pesimally for the rare cases when res
 409   // caclulated above is less than IndexSetSize,
 410   // just return res calculated above? My reasoning was that
 411   // those cases will be so rare that the extra time spent doesn't
 412   // really matter....
 413   // Note: do not change the loop test i >= res + IndexSetStride
 414   // to i > res below, because i is unsigned and res may be zero.
 415   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 416        i -= IndexSetStride) {
 417     if (_indexedFreeList[i].head() != NULL) {
 418       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 419       return i;
 420     }
 421   }
 422   return res;
 423 }
 424 
 425 void LinearAllocBlock::print_on(outputStream* st) const {
 426   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 427             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 428             _ptr, _word_size, _refillSize, _allocation_size_limit);
 429 }
 430 
 431 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 432   st->print_cr("COMPACTIBLE FREELIST SPACE");
 433   st->print_cr(" Space:");
 434   Space::print_on(st);
 435 
 436   st->print_cr("promoInfo:");
 437   _promoInfo.print_on(st);
 438 
 439   st->print_cr("_smallLinearAllocBlock");
 440   _smallLinearAllocBlock.print_on(st);
 441 
 442   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 443 
 444   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 445                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 446 }
 447 
 448 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 449 const {
 450   reportIndexedFreeListStatistics();
 451   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 452   gclog_or_tty->print_cr("---------------------------");
 453   FreeList<FreeChunk>::print_labels_on(st, "size");
 454   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 455     _indexedFreeList[i].print_on(gclog_or_tty);
 456     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 457          fc = fc->next()) {
 458       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 459                           fc, (HeapWord*)fc + i,
 460                           fc->cantCoalesce() ? "\t CC" : "");
 461     }
 462   }
 463 }
 464 
 465 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 466 const {
 467   _promoInfo.print_on(st);
 468 }
 469 
 470 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 471 const {
 472   _dictionary->report_statistics();
 473   st->print_cr("Layout of Freelists in Tree");
 474   st->print_cr("---------------------------");
 475   _dictionary->print_free_lists(st);
 476 }
 477 
 478 class BlkPrintingClosure: public BlkClosure {
 479   const CMSCollector*             _collector;
 480   const CompactibleFreeListSpace* _sp;
 481   const CMSBitMap*                _live_bit_map;
 482   const bool                      _post_remark;
 483   outputStream*                   _st;
 484 public:
 485   BlkPrintingClosure(const CMSCollector* collector,
 486                      const CompactibleFreeListSpace* sp,
 487                      const CMSBitMap* live_bit_map,
 488                      outputStream* st):
 489     _collector(collector),
 490     _sp(sp),
 491     _live_bit_map(live_bit_map),
 492     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 493     _st(st) { }
 494   size_t do_blk(HeapWord* addr);
 495 };
 496 
 497 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 498   size_t sz = _sp->block_size_no_stall(addr, _collector);
 499   assert(sz != 0, "Should always be able to compute a size");
 500   if (_sp->block_is_obj(addr)) {
 501     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 502     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 503       addr,
 504       dead ? "dead" : "live",
 505       sz,
 506       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 507     if (CMSPrintObjectsInDump && !dead) {
 508       oop(addr)->print_on(_st);
 509       _st->print_cr("--------------------------------------");
 510     }
 511   } else { // free block
 512     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 513       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 514     if (CMSPrintChunksInDump) {
 515       ((FreeChunk*)addr)->print_on(_st);
 516       _st->print_cr("--------------------------------------");
 517     }
 518   }
 519   return sz;
 520 }
 521 
 522 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 523   outputStream* st) {
 524   st->print_cr("\n=========================");
 525   st->print_cr("Block layout in CMS Heap:");
 526   st->print_cr("=========================");
 527   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 528   blk_iterate(&bpcl);
 529 
 530   st->print_cr("\n=======================================");
 531   st->print_cr("Order & Layout of Promotion Info Blocks");
 532   st->print_cr("=======================================");
 533   print_promo_info_blocks(st);
 534 
 535   st->print_cr("\n===========================");
 536   st->print_cr("Order of Indexed Free Lists");
 537   st->print_cr("=========================");
 538   print_indexed_free_lists(st);
 539 
 540   st->print_cr("\n=================================");
 541   st->print_cr("Order of Free Lists in Dictionary");
 542   st->print_cr("=================================");
 543   print_dictionary_free_lists(st);
 544 }
 545 
 546 
 547 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 548   assert_lock_strong(&_freelistLock);
 549   assert(PrintFLSStatistics != 0, "Reporting error");
 550   _dictionary->report_statistics();
 551   if (PrintFLSStatistics > 1) {
 552     reportIndexedFreeListStatistics();
 553     size_t total_size = totalSizeInIndexedFreeLists() +
 554                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 555     gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
 556   }
 557 }
 558 
 559 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 560   assert_lock_strong(&_freelistLock);
 561   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 562                       "--------------------------------\n");
 563   size_t total_size = totalSizeInIndexedFreeLists();
 564   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 565   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 566   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 567   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 568   if (free_blocks != 0) {
 569     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 570   }
 571 }
 572 
 573 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 574   size_t res = 0;
 575   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 576     debug_only(
 577       ssize_t recount = 0;
 578       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 579          fc = fc->next()) {
 580         recount += 1;
 581       }
 582       assert(recount == _indexedFreeList[i].count(),
 583         "Incorrect count in list");
 584     )
 585     res += _indexedFreeList[i].count();
 586   }
 587   return res;
 588 }
 589 
 590 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 591   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 592     if (_indexedFreeList[i].head() != NULL) {
 593       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 594       return (size_t)i;
 595     }
 596   }
 597   return 0;
 598 }
 599 
 600 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 601   HeapWord* prevEnd = end();
 602   assert(prevEnd != value, "unnecessary set_end call");
 603   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 604         "New end is below unallocated block");
 605   _end = value;
 606   if (prevEnd != NULL) {
 607     // Resize the underlying block offset table.
 608     _bt.resize(pointer_delta(value, bottom()));
 609     if (value <= prevEnd) {
 610       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 611              "New end is below unallocated block");
 612     } else {
 613       // Now, take this new chunk and add it to the free blocks.
 614       // Note that the BOT has not yet been updated for this block.
 615       size_t newFcSize = pointer_delta(value, prevEnd);
 616       // XXX This is REALLY UGLY and should be fixed up. XXX
 617       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 618         // Mark the boundary of the new block in BOT
 619         _bt.mark_block(prevEnd, value);
 620         // put it all in the linAB
 621         if (ParallelGCThreads == 0) {
 622           _smallLinearAllocBlock._ptr = prevEnd;
 623           _smallLinearAllocBlock._word_size = newFcSize;
 624           repairLinearAllocBlock(&_smallLinearAllocBlock);
 625         } else { // ParallelGCThreads > 0
 626           MutexLockerEx x(parDictionaryAllocLock(),
 627                           Mutex::_no_safepoint_check_flag);
 628           _smallLinearAllocBlock._ptr = prevEnd;
 629           _smallLinearAllocBlock._word_size = newFcSize;
 630           repairLinearAllocBlock(&_smallLinearAllocBlock);
 631         }
 632         // Births of chunks put into a LinAB are not recorded.  Births
 633         // of chunks as they are allocated out of a LinAB are.
 634       } else {
 635         // Add the block to the free lists, if possible coalescing it
 636         // with the last free block, and update the BOT and census data.
 637         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 638       }
 639     }
 640   }
 641 }
 642 
 643 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 644   CompactibleFreeListSpace* _cfls;
 645   CMSCollector* _collector;
 646 protected:
 647   // Override.
 648 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 649   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 650                                        HeapWord* bottom, HeapWord* top, \
 651                                        ClosureType* cl);                \
 652       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl);                \
 655     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 656                                        HeapWord* bottom, HeapWord* top, \
 657                                        ClosureType* cl)
 658   walk_mem_region_with_cl_DECL(OopClosure);
 659   walk_mem_region_with_cl_DECL(FilteringClosure);
 660 
 661 public:
 662   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 663                       CMSCollector* collector,
 664                       OopClosure* cl,
 665                       CardTableModRefBS::PrecisionStyle precision,
 666                       HeapWord* boundary) :
 667     Filtering_DCTOC(sp, cl, precision, boundary),
 668     _cfls(sp), _collector(collector) {}
 669 };
 670 
 671 // We de-virtualize the block-related calls below, since we know that our
 672 // space is a CompactibleFreeListSpace.
 673 
 674 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 675 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 676                                                  HeapWord* bottom,              \
 677                                                  HeapWord* top,                 \
 678                                                  ClosureType* cl) {             \
 679    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 680    if (is_par) {                                                                \
 681      assert(SharedHeap::heap()->n_par_threads() ==                              \
 682             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 683      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 684    } else {                                                                     \
 685      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 686    }                                                                            \
 687 }                                                                               \
 688 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 689                                                       HeapWord* bottom,         \
 690                                                       HeapWord* top,            \
 691                                                       ClosureType* cl) {        \
 692   /* Skip parts that are before "mr", in case "block_start" sent us             \
 693      back too far. */                                                           \
 694   HeapWord* mr_start = mr.start();                                              \
 695   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 696   HeapWord* next = bottom + bot_size;                                           \
 697   while (next < mr_start) {                                                     \
 698     bottom = next;                                                              \
 699     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 700     next = bottom + bot_size;                                                   \
 701   }                                                                             \
 702                                                                                 \
 703   while (bottom < top) {                                                        \
 704     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 705         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 706                     oop(bottom)) &&                                             \
 707         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 708       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 709       bottom += _cfls->adjustObjectSize(word_sz);                               \
 710     } else {                                                                    \
 711       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 712     }                                                                           \
 713   }                                                                             \
 714 }                                                                               \
 715 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 716                                                         HeapWord* bottom,       \
 717                                                         HeapWord* top,          \
 718                                                         ClosureType* cl) {      \
 719   /* Skip parts that are before "mr", in case "block_start" sent us             \
 720      back too far. */                                                           \
 721   HeapWord* mr_start = mr.start();                                              \
 722   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 723   HeapWord* next = bottom + bot_size;                                           \
 724   while (next < mr_start) {                                                     \
 725     bottom = next;                                                              \
 726     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 727     next = bottom + bot_size;                                                   \
 728   }                                                                             \
 729                                                                                 \
 730   while (bottom < top) {                                                        \
 731     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 732         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 733                     oop(bottom)) &&                                             \
 734         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 735       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 736       bottom += _cfls->adjustObjectSize(word_sz);                               \
 737     } else {                                                                    \
 738       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 739     }                                                                           \
 740   }                                                                             \
 741 }
 742 
 743 // (There are only two of these, rather than N, because the split is due
 744 // only to the introduction of the FilteringClosure, a local part of the
 745 // impl of this abstraction.)
 746 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 747 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 748 
 749 DirtyCardToOopClosure*
 750 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 751                                       CardTableModRefBS::PrecisionStyle precision,
 752                                       HeapWord* boundary) {
 753   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 754 }
 755 
 756 
 757 // Note on locking for the space iteration functions:
 758 // since the collector's iteration activities are concurrent with
 759 // allocation activities by mutators, absent a suitable mutual exclusion
 760 // mechanism the iterators may go awry. For instace a block being iterated
 761 // may suddenly be allocated or divided up and part of it allocated and
 762 // so on.
 763 
 764 // Apply the given closure to each block in the space.
 765 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 766   assert_lock_strong(freelistLock());
 767   HeapWord *cur, *limit;
 768   for (cur = bottom(), limit = end(); cur < limit;
 769        cur += cl->do_blk_careful(cur));
 770 }
 771 
 772 // Apply the given closure to each block in the space.
 773 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 774   assert_lock_strong(freelistLock());
 775   HeapWord *cur, *limit;
 776   for (cur = bottom(), limit = end(); cur < limit;
 777        cur += cl->do_blk(cur));
 778 }
 779 
 780 // Apply the given closure to each oop in the space.
 781 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 782   assert_lock_strong(freelistLock());
 783   HeapWord *cur, *limit;
 784   size_t curSize;
 785   for (cur = bottom(), limit = end(); cur < limit;
 786        cur += curSize) {
 787     curSize = block_size(cur);
 788     if (block_is_obj(cur)) {
 789       oop(cur)->oop_iterate(cl);
 790     }
 791   }
 792 }
 793 
 794 // Apply the given closure to each oop in the space \intersect memory region.
 795 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 796   assert_lock_strong(freelistLock());
 797   if (is_empty()) {
 798     return;
 799   }
 800   MemRegion cur = MemRegion(bottom(), end());
 801   mr = mr.intersection(cur);
 802   if (mr.is_empty()) {
 803     return;
 804   }
 805   if (mr.equals(cur)) {
 806     oop_iterate(cl);
 807     return;
 808   }
 809   assert(mr.end() <= end(), "just took an intersection above");
 810   HeapWord* obj_addr = block_start(mr.start());
 811   HeapWord* t = mr.end();
 812 
 813   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 814   if (block_is_obj(obj_addr)) {
 815     // Handle first object specially.
 816     oop obj = oop(obj_addr);
 817     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 818   } else {
 819     FreeChunk* fc = (FreeChunk*)obj_addr;
 820     obj_addr += fc->size();
 821   }
 822   while (obj_addr < t) {
 823     HeapWord* obj = obj_addr;
 824     obj_addr += block_size(obj_addr);
 825     // If "obj_addr" is not greater than top, then the
 826     // entire object "obj" is within the region.
 827     if (obj_addr <= t) {
 828       if (block_is_obj(obj)) {
 829         oop(obj)->oop_iterate(cl);
 830       }
 831     } else {
 832       // "obj" extends beyond end of region
 833       if (block_is_obj(obj)) {
 834         oop(obj)->oop_iterate(&smr_blk);
 835       }
 836       break;
 837     }
 838   }
 839 }
 840 
 841 // NOTE: In the following methods, in order to safely be able to
 842 // apply the closure to an object, we need to be sure that the
 843 // object has been initialized. We are guaranteed that an object
 844 // is initialized if we are holding the Heap_lock with the
 845 // world stopped.
 846 void CompactibleFreeListSpace::verify_objects_initialized() const {
 847   if (is_init_completed()) {
 848     assert_locked_or_safepoint(Heap_lock);
 849     if (Universe::is_fully_initialized()) {
 850       guarantee(SafepointSynchronize::is_at_safepoint(),
 851                 "Required for objects to be initialized");
 852     }
 853   } // else make a concession at vm start-up
 854 }
 855 
 856 // Apply the given closure to each object in the space
 857 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 858   assert_lock_strong(freelistLock());
 859   NOT_PRODUCT(verify_objects_initialized());
 860   HeapWord *cur, *limit;
 861   size_t curSize;
 862   for (cur = bottom(), limit = end(); cur < limit;
 863        cur += curSize) {
 864     curSize = block_size(cur);
 865     if (block_is_obj(cur)) {
 866       blk->do_object(oop(cur));
 867     }
 868   }
 869 }
 870 
 871 // Apply the given closure to each live object in the space
 872 //   The usage of CompactibleFreeListSpace
 873 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 874 // objects in the space with references to objects that are no longer
 875 // valid.  For example, an object may reference another object
 876 // that has already been sweep up (collected).  This method uses
 877 // obj_is_alive() to determine whether it is safe to apply the closure to
 878 // an object.  See obj_is_alive() for details on how liveness of an
 879 // object is decided.
 880 
 881 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 882   assert_lock_strong(freelistLock());
 883   NOT_PRODUCT(verify_objects_initialized());
 884   HeapWord *cur, *limit;
 885   size_t curSize;
 886   for (cur = bottom(), limit = end(); cur < limit;
 887        cur += curSize) {
 888     curSize = block_size(cur);
 889     if (block_is_obj(cur) && obj_is_alive(cur)) {
 890       blk->do_object(oop(cur));
 891     }
 892   }
 893 }
 894 
 895 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 896                                                   UpwardsObjectClosure* cl) {
 897   assert_locked(freelistLock());
 898   NOT_PRODUCT(verify_objects_initialized());
 899   Space::object_iterate_mem(mr, cl);
 900 }
 901 
 902 // Callers of this iterator beware: The closure application should
 903 // be robust in the face of uninitialized objects and should (always)
 904 // return a correct size so that the next addr + size below gives us a
 905 // valid block boundary. [See for instance,
 906 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 907 // in ConcurrentMarkSweepGeneration.cpp.]
 908 HeapWord*
 909 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 910   assert_lock_strong(freelistLock());
 911   HeapWord *addr, *last;
 912   size_t size;
 913   for (addr = bottom(), last  = end();
 914        addr < last; addr += size) {
 915     FreeChunk* fc = (FreeChunk*)addr;
 916     if (fc->is_free()) {
 917       // Since we hold the free list lock, which protects direct
 918       // allocation in this generation by mutators, a free object
 919       // will remain free throughout this iteration code.
 920       size = fc->size();
 921     } else {
 922       // Note that the object need not necessarily be initialized,
 923       // because (for instance) the free list lock does NOT protect
 924       // object initialization. The closure application below must
 925       // therefore be correct in the face of uninitialized objects.
 926       size = cl->do_object_careful(oop(addr));
 927       if (size == 0) {
 928         // An unparsable object found. Signal early termination.
 929         return addr;
 930       }
 931     }
 932   }
 933   return NULL;
 934 }
 935 
 936 // Callers of this iterator beware: The closure application should
 937 // be robust in the face of uninitialized objects and should (always)
 938 // return a correct size so that the next addr + size below gives us a
 939 // valid block boundary. [See for instance,
 940 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 941 // in ConcurrentMarkSweepGeneration.cpp.]
 942 HeapWord*
 943 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 944   ObjectClosureCareful* cl) {
 945   assert_lock_strong(freelistLock());
 946   // Can't use used_region() below because it may not necessarily
 947   // be the same as [bottom(),end()); although we could
 948   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 949   // that appears too cumbersome, so we just do the simpler check
 950   // in the assertion below.
 951   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 952          "mr should be non-empty and within used space");
 953   HeapWord *addr, *end;
 954   size_t size;
 955   for (addr = block_start_careful(mr.start()), end  = mr.end();
 956        addr < end; addr += size) {
 957     FreeChunk* fc = (FreeChunk*)addr;
 958     if (fc->is_free()) {
 959       // Since we hold the free list lock, which protects direct
 960       // allocation in this generation by mutators, a free object
 961       // will remain free throughout this iteration code.
 962       size = fc->size();
 963     } else {
 964       // Note that the object need not necessarily be initialized,
 965       // because (for instance) the free list lock does NOT protect
 966       // object initialization. The closure application below must
 967       // therefore be correct in the face of uninitialized objects.
 968       size = cl->do_object_careful_m(oop(addr), mr);
 969       if (size == 0) {
 970         // An unparsable object found. Signal early termination.
 971         return addr;
 972       }
 973     }
 974   }
 975   return NULL;
 976 }
 977 
 978 
 979 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 980   NOT_PRODUCT(verify_objects_initialized());
 981   return _bt.block_start(p);
 982 }
 983 
 984 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 985   return _bt.block_start_careful(p);
 986 }
 987 
 988 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 989   NOT_PRODUCT(verify_objects_initialized());
 990   // This must be volatile, or else there is a danger that the compiler
 991   // will compile the code below into a sometimes-infinite loop, by keeping
 992   // the value read the first time in a register.
 993   while (true) {
 994     // We must do this until we get a consistent view of the object.
 995     if (FreeChunk::indicatesFreeChunk(p)) {
 996       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 997       size_t res = fc->size();
 998       // If the object is still a free chunk, return the size, else it
 999       // has been allocated so try again.
1000       if (FreeChunk::indicatesFreeChunk(p)) {
1001         assert(res != 0, "Block size should not be 0");
1002         return res;
1003       }
1004     } else {
1005       // must read from what 'p' points to in each loop.
1006       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1007       if (k != NULL) {
1008         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
1009         oop o = (oop)p;
1010         assert(o->is_parsable(), "Should be parsable");
1011         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1012         size_t res = o->size_given_klass(k->klass_part());
1013         res = adjustObjectSize(res);
1014         assert(res != 0, "Block size should not be 0");
1015         return res;
1016       }
1017     }
1018   }
1019 }
1020 
1021 // A variant of the above that uses the Printezis bits for
1022 // unparsable but allocated objects. This avoids any possible
1023 // stalls waiting for mutators to initialize objects, and is
1024 // thus potentially faster than the variant above. However,
1025 // this variant may return a zero size for a block that is
1026 // under mutation and for which a consistent size cannot be
1027 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1028 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1029                                                      const CMSCollector* c)
1030 const {
1031   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1032   // This must be volatile, or else there is a danger that the compiler
1033   // will compile the code below into a sometimes-infinite loop, by keeping
1034   // the value read the first time in a register.
1035   DEBUG_ONLY(uint loops = 0;)
1036   while (true) {
1037     // We must do this until we get a consistent view of the object.
1038     if (FreeChunk::indicatesFreeChunk(p)) {
1039       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1040       size_t res = fc->size();
1041       if (FreeChunk::indicatesFreeChunk(p)) {
1042         assert(res != 0, "Block size should not be 0");
1043         assert(loops == 0, "Should be 0");
1044         return res;
1045       }
1046     } else {
1047       // must read from what 'p' points to in each loop.
1048       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1049       // We trust the size of any object that has a non-NULL
1050       // klass and (for those in the perm gen) is parsable
1051       // -- irrespective of its conc_safe-ty.
1052       if (k != NULL && ((oopDesc*)p)->is_parsable()) {
1053         assert(k->is_oop(), "Should really be klass oop.");
1054         oop o = (oop)p;
1055         assert(o->is_oop(), "Should be an oop");
1056         size_t res = o->size_given_klass(k->klass_part());
1057         res = adjustObjectSize(res);
1058         assert(res != 0, "Block size should not be 0");
1059         return res;
1060       } else {
1061         // May return 0 if P-bits not present.
1062         return c->block_size_if_printezis_bits(p);
1063       }
1064     }
1065     assert(loops == 0, "Can loop at most once");
1066     DEBUG_ONLY(loops++;)
1067   }
1068 }
1069 
1070 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1071   NOT_PRODUCT(verify_objects_initialized());
1072   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1073   FreeChunk* fc = (FreeChunk*)p;
1074   if (fc->is_free()) {
1075     return fc->size();
1076   } else {
1077     // Ignore mark word because this may be a recently promoted
1078     // object whose mark word is used to chain together grey
1079     // objects (the last one would have a null value).
1080     assert(oop(p)->is_oop(true), "Should be an oop");
1081     return adjustObjectSize(oop(p)->size());
1082   }
1083 }
1084 
1085 // This implementation assumes that the property of "being an object" is
1086 // stable.  But being a free chunk may not be (because of parallel
1087 // promotion.)
1088 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1089   FreeChunk* fc = (FreeChunk*)p;
1090   assert(is_in_reserved(p), "Should be in space");
1091   // When doing a mark-sweep-compact of the CMS generation, this
1092   // assertion may fail because prepare_for_compaction() uses
1093   // space that is garbage to maintain information on ranges of
1094   // live objects so that these live ranges can be moved as a whole.
1095   // Comment out this assertion until that problem can be solved
1096   // (i.e., that the block start calculation may look at objects
1097   // at address below "p" in finding the object that contains "p"
1098   // and those objects (if garbage) may have been modified to hold
1099   // live range information.
1100   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1101   //        "Should be a block boundary");
1102   if (FreeChunk::indicatesFreeChunk(p)) return false;
1103   klassOop k = oop(p)->klass_or_null();
1104   if (k != NULL) {
1105     // Ignore mark word because it may have been used to
1106     // chain together promoted objects (the last one
1107     // would have a null value).
1108     assert(oop(p)->is_oop(true), "Should be an oop");
1109     return true;
1110   } else {
1111     return false;  // Was not an object at the start of collection.
1112   }
1113 }
1114 
1115 // Check if the object is alive. This fact is checked either by consulting
1116 // the main marking bitmap in the sweeping phase or, if it's a permanent
1117 // generation and we're not in the sweeping phase, by checking the
1118 // perm_gen_verify_bit_map where we store the "deadness" information if
1119 // we did not sweep the perm gen in the most recent previous GC cycle.
1120 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1121   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1122          "Else races are possible");
1123   assert(block_is_obj(p), "The address should point to an object");
1124 
1125   // If we're sweeping, we use object liveness information from the main bit map
1126   // for both perm gen and old gen.
1127   // We don't need to lock the bitmap (live_map or dead_map below), because
1128   // EITHER we are in the middle of the sweeping phase, and the
1129   // main marking bit map (live_map below) is locked,
1130   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1131   // is stable, because it's mutated only in the sweeping phase.
1132   // NOTE: This method is also used by jmap where, if class unloading is
1133   // off, the results can return "false" for legitimate perm objects,
1134   // when we are not in the midst of a sweeping phase, which can result
1135   // in jmap not reporting certain perm gen objects. This will be moot
1136   // if/when the perm gen goes away in the future.
1137   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1138     CMSBitMap* live_map = _collector->markBitMap();
1139     return live_map->par_isMarked((HeapWord*) p);
1140   } else {
1141     // If we're not currently sweeping and we haven't swept the perm gen in
1142     // the previous concurrent cycle then we may have dead but unswept objects
1143     // in the perm gen. In this case, we use the "deadness" information
1144     // that we had saved in perm_gen_verify_bit_map at the last sweep.
1145     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1146       if (_collector->verifying()) {
1147         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1148         // Object is marked in the dead_map bitmap at the previous sweep
1149         // when we know that it's dead; if the bitmap is not allocated then
1150         // the object is alive.
1151         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1152                || !dead_map->par_isMarked((HeapWord*) p);
1153       } else {
1154         return false; // We can't say for sure if it's live, so we say that it's dead.
1155       }
1156     }
1157   }
1158   return true;
1159 }
1160 
1161 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1162   FreeChunk* fc = (FreeChunk*)p;
1163   assert(is_in_reserved(p), "Should be in space");
1164   assert(_bt.block_start(p) == p, "Should be a block boundary");
1165   if (!fc->is_free()) {
1166     // Ignore mark word because it may have been used to
1167     // chain together promoted objects (the last one
1168     // would have a null value).
1169     assert(oop(p)->is_oop(true), "Should be an oop");
1170     return true;
1171   }
1172   return false;
1173 }
1174 
1175 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1176 // approximate answer if you don't hold the freelistlock when you call this.
1177 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1178   size_t size = 0;
1179   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1180     debug_only(
1181       // We may be calling here without the lock in which case we
1182       // won't do this modest sanity check.
1183       if (freelistLock()->owned_by_self()) {
1184         size_t total_list_size = 0;
1185         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1186           fc = fc->next()) {
1187           total_list_size += i;
1188         }
1189         assert(total_list_size == i * _indexedFreeList[i].count(),
1190                "Count in list is incorrect");
1191       }
1192     )
1193     size += i * _indexedFreeList[i].count();
1194   }
1195   return size;
1196 }
1197 
1198 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1199   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1200   return allocate(size);
1201 }
1202 
1203 HeapWord*
1204 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1205   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1206 }
1207 
1208 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1209   assert_lock_strong(freelistLock());
1210   HeapWord* res = NULL;
1211   assert(size == adjustObjectSize(size),
1212          "use adjustObjectSize() before calling into allocate()");
1213 
1214   if (_adaptive_freelists) {
1215     res = allocate_adaptive_freelists(size);
1216   } else {  // non-adaptive free lists
1217     res = allocate_non_adaptive_freelists(size);
1218   }
1219 
1220   if (res != NULL) {
1221     // check that res does lie in this space!
1222     assert(is_in_reserved(res), "Not in this space!");
1223     assert(is_aligned((void*)res), "alignment check");
1224 
1225     FreeChunk* fc = (FreeChunk*)res;
1226     fc->markNotFree();
1227     assert(!fc->is_free(), "shouldn't be marked free");
1228     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1229     // Verify that the block offset table shows this to
1230     // be a single block, but not one which is unallocated.
1231     _bt.verify_single_block(res, size);
1232     _bt.verify_not_unallocated(res, size);
1233     // mangle a just allocated object with a distinct pattern.
1234     debug_only(fc->mangleAllocated(size));
1235   }
1236 
1237   return res;
1238 }
1239 
1240 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1241   HeapWord* res = NULL;
1242   // try and use linear allocation for smaller blocks
1243   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1244     // if successful, the following also adjusts block offset table
1245     res = getChunkFromSmallLinearAllocBlock(size);
1246   }
1247   // Else triage to indexed lists for smaller sizes
1248   if (res == NULL) {
1249     if (size < SmallForDictionary) {
1250       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1251     } else {
1252       // else get it from the big dictionary; if even this doesn't
1253       // work we are out of luck.
1254       res = (HeapWord*)getChunkFromDictionaryExact(size);
1255     }
1256   }
1257 
1258   return res;
1259 }
1260 
1261 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1262   assert_lock_strong(freelistLock());
1263   HeapWord* res = NULL;
1264   assert(size == adjustObjectSize(size),
1265          "use adjustObjectSize() before calling into allocate()");
1266 
1267   // Strategy
1268   //   if small
1269   //     exact size from small object indexed list if small
1270   //     small or large linear allocation block (linAB) as appropriate
1271   //     take from lists of greater sized chunks
1272   //   else
1273   //     dictionary
1274   //     small or large linear allocation block if it has the space
1275   // Try allocating exact size from indexTable first
1276   if (size < IndexSetSize) {
1277     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1278     if(res != NULL) {
1279       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1280         "Not removed from free list");
1281       // no block offset table adjustment is necessary on blocks in
1282       // the indexed lists.
1283 
1284     // Try allocating from the small LinAB
1285     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1286         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1287         // if successful, the above also adjusts block offset table
1288         // Note that this call will refill the LinAB to
1289         // satisfy the request.  This is different that
1290         // evm.
1291         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1292     } else {
1293       // Raid the exact free lists larger than size, even if they are not
1294       // overpopulated.
1295       res = (HeapWord*) getChunkFromGreater(size);
1296     }
1297   } else {
1298     // Big objects get allocated directly from the dictionary.
1299     res = (HeapWord*) getChunkFromDictionaryExact(size);
1300     if (res == NULL) {
1301       // Try hard not to fail since an allocation failure will likely
1302       // trigger a synchronous GC.  Try to get the space from the
1303       // allocation blocks.
1304       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1305     }
1306   }
1307 
1308   return res;
1309 }
1310 
1311 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1312 // when promoting obj.
1313 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1314   // Depending on the object size, expansion may require refilling either a
1315   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1316   // is added because the dictionary may over-allocate to avoid fragmentation.
1317   size_t space = obj_size;
1318   if (!_adaptive_freelists) {
1319     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1320   }
1321   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1322   return space;
1323 }
1324 
1325 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1326   FreeChunk* ret;
1327 
1328   assert(numWords >= MinChunkSize, "Size is less than minimum");
1329   assert(linearAllocationWouldFail() || bestFitFirst(),
1330     "Should not be here");
1331 
1332   size_t i;
1333   size_t currSize = numWords + MinChunkSize;
1334   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1335   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1336     FreeList<FreeChunk>* fl = &_indexedFreeList[i];
1337     if (fl->head()) {
1338       ret = getFromListGreater(fl, numWords);
1339       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1340       return ret;
1341     }
1342   }
1343 
1344   currSize = MAX2((size_t)SmallForDictionary,
1345                   (size_t)(numWords + MinChunkSize));
1346 
1347   /* Try to get a chunk that satisfies request, while avoiding
1348      fragmentation that can't be handled. */
1349   {
1350     ret =  dictionary()->get_chunk(currSize);
1351     if (ret != NULL) {
1352       assert(ret->size() - numWords >= MinChunkSize,
1353              "Chunk is too small");
1354       _bt.allocated((HeapWord*)ret, ret->size());
1355       /* Carve returned chunk. */
1356       (void) splitChunkAndReturnRemainder(ret, numWords);
1357       /* Label this as no longer a free chunk. */
1358       assert(ret->is_free(), "This chunk should be free");
1359       ret->link_prev(NULL);
1360     }
1361     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1362     return ret;
1363   }
1364   ShouldNotReachHere();
1365 }
1366 
1367 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1368   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1369   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1370 }
1371 
1372 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1373   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1374          (_smallLinearAllocBlock._word_size == fc->size()),
1375          "Linear allocation block shows incorrect size");
1376   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1377           (_smallLinearAllocBlock._word_size == fc->size()));
1378 }
1379 
1380 // Check if the purported free chunk is present either as a linear
1381 // allocation block, the size-indexed table of (smaller) free blocks,
1382 // or the larger free blocks kept in the binary tree dictionary.
1383 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1384   if (verify_chunk_is_linear_alloc_block(fc)) {
1385     return true;
1386   } else if (fc->size() < IndexSetSize) {
1387     return verifyChunkInIndexedFreeLists(fc);
1388   } else {
1389     return dictionary()->verify_chunk_in_free_list(fc);
1390   }
1391 }
1392 
1393 #ifndef PRODUCT
1394 void CompactibleFreeListSpace::assert_locked() const {
1395   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1396 }
1397 
1398 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1399   CMSLockVerifier::assert_locked(lock);
1400 }
1401 #endif
1402 
1403 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1404   // In the parallel case, the main thread holds the free list lock
1405   // on behalf the parallel threads.
1406   FreeChunk* fc;
1407   {
1408     // If GC is parallel, this might be called by several threads.
1409     // This should be rare enough that the locking overhead won't affect
1410     // the sequential code.
1411     MutexLockerEx x(parDictionaryAllocLock(),
1412                     Mutex::_no_safepoint_check_flag);
1413     fc = getChunkFromDictionary(size);
1414   }
1415   if (fc != NULL) {
1416     fc->dontCoalesce();
1417     assert(fc->is_free(), "Should be free, but not coalescable");
1418     // Verify that the block offset table shows this to
1419     // be a single block, but not one which is unallocated.
1420     _bt.verify_single_block((HeapWord*)fc, fc->size());
1421     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1422   }
1423   return fc;
1424 }
1425 
1426 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1427   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1428   assert_locked();
1429 
1430   // if we are tracking promotions, then first ensure space for
1431   // promotion (including spooling space for saving header if necessary).
1432   // then allocate and copy, then track promoted info if needed.
1433   // When tracking (see PromotionInfo::track()), the mark word may
1434   // be displaced and in this case restoration of the mark word
1435   // occurs in the (oop_since_save_marks_)iterate phase.
1436   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1437     return NULL;
1438   }
1439   // Call the allocate(size_t, bool) form directly to avoid the
1440   // additional call through the allocate(size_t) form.  Having
1441   // the compile inline the call is problematic because allocate(size_t)
1442   // is a virtual method.
1443   HeapWord* res = allocate(adjustObjectSize(obj_size));
1444   if (res != NULL) {
1445     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1446     // if we should be tracking promotions, do so.
1447     if (_promoInfo.tracking()) {
1448         _promoInfo.track((PromotedObject*)res);
1449     }
1450   }
1451   return oop(res);
1452 }
1453 
1454 HeapWord*
1455 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1456   assert_locked();
1457   assert(size >= MinChunkSize, "minimum chunk size");
1458   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1459     "maximum from smallLinearAllocBlock");
1460   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1461 }
1462 
1463 HeapWord*
1464 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1465                                                        size_t size) {
1466   assert_locked();
1467   assert(size >= MinChunkSize, "too small");
1468   HeapWord* res = NULL;
1469   // Try to do linear allocation from blk, making sure that
1470   if (blk->_word_size == 0) {
1471     // We have probably been unable to fill this either in the prologue or
1472     // when it was exhausted at the last linear allocation. Bail out until
1473     // next time.
1474     assert(blk->_ptr == NULL, "consistency check");
1475     return NULL;
1476   }
1477   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1478   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1479   if (res != NULL) return res;
1480 
1481   // about to exhaust this linear allocation block
1482   if (blk->_word_size == size) { // exactly satisfied
1483     res = blk->_ptr;
1484     _bt.allocated(res, blk->_word_size);
1485   } else if (size + MinChunkSize <= blk->_refillSize) {
1486     size_t sz = blk->_word_size;
1487     // Update _unallocated_block if the size is such that chunk would be
1488     // returned to the indexed free list.  All other chunks in the indexed
1489     // free lists are allocated from the dictionary so that _unallocated_block
1490     // has already been adjusted for them.  Do it here so that the cost
1491     // for all chunks added back to the indexed free lists.
1492     if (sz < SmallForDictionary) {
1493       _bt.allocated(blk->_ptr, sz);
1494     }
1495     // Return the chunk that isn't big enough, and then refill below.
1496     addChunkToFreeLists(blk->_ptr, sz);
1497     split_birth(sz);
1498     // Don't keep statistics on adding back chunk from a LinAB.
1499   } else {
1500     // A refilled block would not satisfy the request.
1501     return NULL;
1502   }
1503 
1504   blk->_ptr = NULL; blk->_word_size = 0;
1505   refillLinearAllocBlock(blk);
1506   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1507          "block was replenished");
1508   if (res != NULL) {
1509     split_birth(size);
1510     repairLinearAllocBlock(blk);
1511   } else if (blk->_ptr != NULL) {
1512     res = blk->_ptr;
1513     size_t blk_size = blk->_word_size;
1514     blk->_word_size -= size;
1515     blk->_ptr  += size;
1516     split_birth(size);
1517     repairLinearAllocBlock(blk);
1518     // Update BOT last so that other (parallel) GC threads see a consistent
1519     // view of the BOT and free blocks.
1520     // Above must occur before BOT is updated below.
1521     OrderAccess::storestore();
1522     _bt.split_block(res, blk_size, size);  // adjust block offset table
1523   }
1524   return res;
1525 }
1526 
1527 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1528                                         LinearAllocBlock* blk,
1529                                         size_t size) {
1530   assert_locked();
1531   assert(size >= MinChunkSize, "too small");
1532 
1533   HeapWord* res = NULL;
1534   // This is the common case.  Keep it simple.
1535   if (blk->_word_size >= size + MinChunkSize) {
1536     assert(blk->_ptr != NULL, "consistency check");
1537     res = blk->_ptr;
1538     // Note that the BOT is up-to-date for the linAB before allocation.  It
1539     // indicates the start of the linAB.  The split_block() updates the
1540     // BOT for the linAB after the allocation (indicates the start of the
1541     // next chunk to be allocated).
1542     size_t blk_size = blk->_word_size;
1543     blk->_word_size -= size;
1544     blk->_ptr  += size;
1545     split_birth(size);
1546     repairLinearAllocBlock(blk);
1547     // Update BOT last so that other (parallel) GC threads see a consistent
1548     // view of the BOT and free blocks.
1549     // Above must occur before BOT is updated below.
1550     OrderAccess::storestore();
1551     _bt.split_block(res, blk_size, size);  // adjust block offset table
1552     _bt.allocated(res, size);
1553   }
1554   return res;
1555 }
1556 
1557 FreeChunk*
1558 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1559   assert_locked();
1560   assert(size < SmallForDictionary, "just checking");
1561   FreeChunk* res;
1562   res = _indexedFreeList[size].get_chunk_at_head();
1563   if (res == NULL) {
1564     res = getChunkFromIndexedFreeListHelper(size);
1565   }
1566   _bt.verify_not_unallocated((HeapWord*) res, size);
1567   assert(res == NULL || res->size() == size, "Incorrect block size");
1568   return res;
1569 }
1570 
1571 FreeChunk*
1572 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1573   bool replenish) {
1574   assert_locked();
1575   FreeChunk* fc = NULL;
1576   if (size < SmallForDictionary) {
1577     assert(_indexedFreeList[size].head() == NULL ||
1578       _indexedFreeList[size].surplus() <= 0,
1579       "List for this size should be empty or under populated");
1580     // Try best fit in exact lists before replenishing the list
1581     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1582       // Replenish list.
1583       //
1584       // Things tried that failed.
1585       //   Tried allocating out of the two LinAB's first before
1586       // replenishing lists.
1587       //   Tried small linAB of size 256 (size in indexed list)
1588       // and replenishing indexed lists from the small linAB.
1589       //
1590       FreeChunk* newFc = NULL;
1591       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1592       if (replenish_size < SmallForDictionary) {
1593         // Do not replenish from an underpopulated size.
1594         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1595             _indexedFreeList[replenish_size].head() != NULL) {
1596           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1597         } else if (bestFitFirst()) {
1598           newFc = bestFitSmall(replenish_size);
1599         }
1600       }
1601       if (newFc == NULL && replenish_size > size) {
1602         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1603         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1604       }
1605       // Note: The stats update re split-death of block obtained above
1606       // will be recorded below precisely when we know we are going to
1607       // be actually splitting it into more than one pieces below.
1608       if (newFc != NULL) {
1609         if  (replenish || CMSReplenishIntermediate) {
1610           // Replenish this list and return one block to caller.
1611           size_t i;
1612           FreeChunk *curFc, *nextFc;
1613           size_t num_blk = newFc->size() / size;
1614           assert(num_blk >= 1, "Smaller than requested?");
1615           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1616           if (num_blk > 1) {
1617             // we are sure we will be splitting the block just obtained
1618             // into multiple pieces; record the split-death of the original
1619             splitDeath(replenish_size);
1620           }
1621           // carve up and link blocks 0, ..., num_blk - 2
1622           // The last chunk is not added to the lists but is returned as the
1623           // free chunk.
1624           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1625                i = 0;
1626                i < (num_blk - 1);
1627                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1628                i++) {
1629             curFc->set_size(size);
1630             // Don't record this as a return in order to try and
1631             // determine the "returns" from a GC.
1632             _bt.verify_not_unallocated((HeapWord*) fc, size);
1633             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1634             _bt.mark_block((HeapWord*)curFc, size);
1635             split_birth(size);
1636             // Don't record the initial population of the indexed list
1637             // as a split birth.
1638           }
1639 
1640           // check that the arithmetic was OK above
1641           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1642             "inconsistency in carving newFc");
1643           curFc->set_size(size);
1644           _bt.mark_block((HeapWord*)curFc, size);
1645           split_birth(size);
1646           fc = curFc;
1647         } else {
1648           // Return entire block to caller
1649           fc = newFc;
1650         }
1651       }
1652     }
1653   } else {
1654     // Get a free chunk from the free chunk dictionary to be returned to
1655     // replenish the indexed free list.
1656     fc = getChunkFromDictionaryExact(size);
1657   }
1658   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1659   return fc;
1660 }
1661 
1662 FreeChunk*
1663 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1664   assert_locked();
1665   FreeChunk* fc = _dictionary->get_chunk(size);
1666   if (fc == NULL) {
1667     return NULL;
1668   }
1669   _bt.allocated((HeapWord*)fc, fc->size());
1670   if (fc->size() >= size + MinChunkSize) {
1671     fc = splitChunkAndReturnRemainder(fc, size);
1672   }
1673   assert(fc->size() >= size, "chunk too small");
1674   assert(fc->size() < size + MinChunkSize, "chunk too big");
1675   _bt.verify_single_block((HeapWord*)fc, fc->size());
1676   return fc;
1677 }
1678 
1679 FreeChunk*
1680 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1681   assert_locked();
1682   FreeChunk* fc = _dictionary->get_chunk(size);
1683   if (fc == NULL) {
1684     return fc;
1685   }
1686   _bt.allocated((HeapWord*)fc, fc->size());
1687   if (fc->size() == size) {
1688     _bt.verify_single_block((HeapWord*)fc, size);
1689     return fc;
1690   }
1691   assert(fc->size() > size, "get_chunk() guarantee");
1692   if (fc->size() < size + MinChunkSize) {
1693     // Return the chunk to the dictionary and go get a bigger one.
1694     returnChunkToDictionary(fc);
1695     fc = _dictionary->get_chunk(size + MinChunkSize);
1696     if (fc == NULL) {
1697       return NULL;
1698     }
1699     _bt.allocated((HeapWord*)fc, fc->size());
1700   }
1701   assert(fc->size() >= size + MinChunkSize, "tautology");
1702   fc = splitChunkAndReturnRemainder(fc, size);
1703   assert(fc->size() == size, "chunk is wrong size");
1704   _bt.verify_single_block((HeapWord*)fc, size);
1705   return fc;
1706 }
1707 
1708 void
1709 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1710   assert_locked();
1711 
1712   size_t size = chunk->size();
1713   _bt.verify_single_block((HeapWord*)chunk, size);
1714   // adjust _unallocated_block downward, as necessary
1715   _bt.freed((HeapWord*)chunk, size);
1716   _dictionary->return_chunk(chunk);
1717 #ifndef PRODUCT
1718   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1719     TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
1720   }
1721 #endif // PRODUCT
1722 }
1723 
1724 void
1725 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1726   assert_locked();
1727   size_t size = fc->size();
1728   _bt.verify_single_block((HeapWord*) fc, size);
1729   _bt.verify_not_unallocated((HeapWord*) fc, size);
1730   if (_adaptive_freelists) {
1731     _indexedFreeList[size].return_chunk_at_tail(fc);
1732   } else {
1733     _indexedFreeList[size].return_chunk_at_head(fc);
1734   }
1735 #ifndef PRODUCT
1736   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1737      _indexedFreeList[size].verify_stats();
1738   }
1739 #endif // PRODUCT
1740 }
1741 
1742 // Add chunk to end of last block -- if it's the largest
1743 // block -- and update BOT and census data. We would
1744 // of course have preferred to coalesce it with the
1745 // last block, but it's currently less expensive to find the
1746 // largest block than it is to find the last.
1747 void
1748 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1749   HeapWord* chunk, size_t     size) {
1750   // check that the chunk does lie in this space!
1751   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1752   // One of the parallel gc task threads may be here
1753   // whilst others are allocating.
1754   Mutex* lock = NULL;
1755   if (ParallelGCThreads != 0) {
1756     lock = &_parDictionaryAllocLock;
1757   }
1758   FreeChunk* ec;
1759   {
1760     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1761     ec = dictionary()->find_largest_dict();  // get largest block
1762     if (ec != NULL && ec->end() == chunk) {
1763       // It's a coterminal block - we can coalesce.
1764       size_t old_size = ec->size();
1765       coalDeath(old_size);
1766       removeChunkFromDictionary(ec);
1767       size += old_size;
1768     } else {
1769       ec = (FreeChunk*)chunk;
1770     }
1771   }
1772   ec->set_size(size);
1773   debug_only(ec->mangleFreed(size));
1774   if (size < SmallForDictionary) {
1775     lock = _indexedFreeListParLocks[size];
1776   }
1777   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1778   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1779   // record the birth under the lock since the recording involves
1780   // manipulation of the list on which the chunk lives and
1781   // if the chunk is allocated and is the last on the list,
1782   // the list can go away.
1783   coalBirth(size);
1784 }
1785 
1786 void
1787 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1788                                               size_t     size) {
1789   // check that the chunk does lie in this space!
1790   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1791   assert_locked();
1792   _bt.verify_single_block(chunk, size);
1793 
1794   FreeChunk* fc = (FreeChunk*) chunk;
1795   fc->set_size(size);
1796   debug_only(fc->mangleFreed(size));
1797   if (size < SmallForDictionary) {
1798     returnChunkToFreeList(fc);
1799   } else {
1800     returnChunkToDictionary(fc);
1801   }
1802 }
1803 
1804 void
1805 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1806   size_t size, bool coalesced) {
1807   assert_locked();
1808   assert(chunk != NULL, "null chunk");
1809   if (coalesced) {
1810     // repair BOT
1811     _bt.single_block(chunk, size);
1812   }
1813   addChunkToFreeLists(chunk, size);
1814 }
1815 
1816 // We _must_ find the purported chunk on our free lists;
1817 // we assert if we don't.
1818 void
1819 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1820   size_t size = fc->size();
1821   assert_locked();
1822   debug_only(verifyFreeLists());
1823   if (size < SmallForDictionary) {
1824     removeChunkFromIndexedFreeList(fc);
1825   } else {
1826     removeChunkFromDictionary(fc);
1827   }
1828   _bt.verify_single_block((HeapWord*)fc, size);
1829   debug_only(verifyFreeLists());
1830 }
1831 
1832 void
1833 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1834   size_t size = fc->size();
1835   assert_locked();
1836   assert(fc != NULL, "null chunk");
1837   _bt.verify_single_block((HeapWord*)fc, size);
1838   _dictionary->remove_chunk(fc);
1839   // adjust _unallocated_block upward, as necessary
1840   _bt.allocated((HeapWord*)fc, size);
1841 }
1842 
1843 void
1844 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1845   assert_locked();
1846   size_t size = fc->size();
1847   _bt.verify_single_block((HeapWord*)fc, size);
1848   NOT_PRODUCT(
1849     if (FLSVerifyIndexTable) {
1850       verifyIndexedFreeList(size);
1851     }
1852   )
1853   _indexedFreeList[size].remove_chunk(fc);
1854   NOT_PRODUCT(
1855     if (FLSVerifyIndexTable) {
1856       verifyIndexedFreeList(size);
1857     }
1858   )
1859 }
1860 
1861 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1862   /* A hint is the next larger size that has a surplus.
1863      Start search at a size large enough to guarantee that
1864      the excess is >= MIN_CHUNK. */
1865   size_t start = align_object_size(numWords + MinChunkSize);
1866   if (start < IndexSetSize) {
1867     FreeList<FreeChunk>* it   = _indexedFreeList;
1868     size_t    hint = _indexedFreeList[start].hint();
1869     while (hint < IndexSetSize) {
1870       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1871       FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1872       if (fl->surplus() > 0 && fl->head() != NULL) {
1873         // Found a list with surplus, reset original hint
1874         // and split out a free chunk which is returned.
1875         _indexedFreeList[start].set_hint(hint);
1876         FreeChunk* res = getFromListGreater(fl, numWords);
1877         assert(res == NULL || res->is_free(),
1878           "Should be returning a free chunk");
1879         return res;
1880       }
1881       hint = fl->hint(); /* keep looking */
1882     }
1883     /* None found. */
1884     it[start].set_hint(IndexSetSize);
1885   }
1886   return NULL;
1887 }
1888 
1889 /* Requires fl->size >= numWords + MinChunkSize */
1890 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
1891   size_t numWords) {
1892   FreeChunk *curr = fl->head();
1893   size_t oldNumWords = curr->size();
1894   assert(numWords >= MinChunkSize, "Word size is too small");
1895   assert(curr != NULL, "List is empty");
1896   assert(oldNumWords >= numWords + MinChunkSize,
1897         "Size of chunks in the list is too small");
1898 
1899   fl->remove_chunk(curr);
1900   // recorded indirectly by splitChunkAndReturnRemainder -
1901   // smallSplit(oldNumWords, numWords);
1902   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1903   // Does anything have to be done for the remainder in terms of
1904   // fixing the card table?
1905   assert(new_chunk == NULL || new_chunk->is_free(),
1906     "Should be returning a free chunk");
1907   return new_chunk;
1908 }
1909 
1910 FreeChunk*
1911 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1912   size_t new_size) {
1913   assert_locked();
1914   size_t size = chunk->size();
1915   assert(size > new_size, "Split from a smaller block?");
1916   assert(is_aligned(chunk), "alignment problem");
1917   assert(size == adjustObjectSize(size), "alignment problem");
1918   size_t rem_size = size - new_size;
1919   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1920   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1921   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1922   assert(is_aligned(ffc), "alignment problem");
1923   ffc->set_size(rem_size);
1924   ffc->link_next(NULL);
1925   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1926   // Above must occur before BOT is updated below.
1927   // adjust block offset table
1928   OrderAccess::storestore();
1929   assert(chunk->is_free() && ffc->is_free(), "Error");
1930   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1931   if (rem_size < SmallForDictionary) {
1932     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1933     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1934     assert(!is_par ||
1935            (SharedHeap::heap()->n_par_threads() ==
1936             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1937     returnChunkToFreeList(ffc);
1938     split(size, rem_size);
1939     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1940   } else {
1941     returnChunkToDictionary(ffc);
1942     split(size ,rem_size);
1943   }
1944   chunk->set_size(new_size);
1945   return chunk;
1946 }
1947 
1948 void
1949 CompactibleFreeListSpace::sweep_completed() {
1950   // Now that space is probably plentiful, refill linear
1951   // allocation blocks as needed.
1952   refillLinearAllocBlocksIfNeeded();
1953 }
1954 
1955 void
1956 CompactibleFreeListSpace::gc_prologue() {
1957   assert_locked();
1958   if (PrintFLSStatistics != 0) {
1959     gclog_or_tty->print("Before GC:\n");
1960     reportFreeListStatistics();
1961   }
1962   refillLinearAllocBlocksIfNeeded();
1963 }
1964 
1965 void
1966 CompactibleFreeListSpace::gc_epilogue() {
1967   assert_locked();
1968   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1969     if (_smallLinearAllocBlock._word_size == 0)
1970       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1971   }
1972   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1973   _promoInfo.stopTrackingPromotions();
1974   repairLinearAllocationBlocks();
1975   // Print Space's stats
1976   if (PrintFLSStatistics != 0) {
1977     gclog_or_tty->print("After GC:\n");
1978     reportFreeListStatistics();
1979   }
1980 }
1981 
1982 // Iteration support, mostly delegated from a CMS generation
1983 
1984 void CompactibleFreeListSpace::save_marks() {
1985   assert(Thread::current()->is_VM_thread(),
1986          "Global variable should only be set when single-threaded");
1987   // Mark the "end" of the used space at the time of this call;
1988   // note, however, that promoted objects from this point
1989   // on are tracked in the _promoInfo below.
1990   set_saved_mark_word(unallocated_block());
1991 #ifdef ASSERT
1992   // Check the sanity of save_marks() etc.
1993   MemRegion ur    = used_region();
1994   MemRegion urasm = used_region_at_save_marks();
1995   assert(ur.contains(urasm),
1996          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1997                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1998                  ur.start(), ur.end(), urasm.start(), urasm.end()));
1999 #endif
2000   // inform allocator that promotions should be tracked.
2001   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2002   _promoInfo.startTrackingPromotions();
2003 }
2004 
2005 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2006   assert(_promoInfo.tracking(), "No preceding save_marks?");
2007   assert(SharedHeap::heap()->n_par_threads() == 0,
2008          "Shouldn't be called if using parallel gc.");
2009   return _promoInfo.noPromotions();
2010 }
2011 
2012 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2013                                                                             \
2014 void CompactibleFreeListSpace::                                             \
2015 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2016   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2017          "Shouldn't be called (yet) during parallel part of gc.");          \
2018   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2019   /*                                                                        \
2020    * This also restores any displaced headers and removes the elements from \
2021    * the iteration set as they are processed, so that we have a clean slate \
2022    * at the end of the iteration. Note, thus, that if new objects are       \
2023    * promoted as a result of the iteration they are iterated over as well.  \
2024    */                                                                       \
2025   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2026 }
2027 
2028 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2029 
2030 
2031 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2032   // ugghh... how would one do this efficiently for a non-contiguous space?
2033   guarantee(false, "NYI");
2034 }
2035 
2036 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2037   return _smallLinearAllocBlock._word_size == 0;
2038 }
2039 
2040 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2041   // Fix up linear allocation blocks to look like free blocks
2042   repairLinearAllocBlock(&_smallLinearAllocBlock);
2043 }
2044 
2045 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2046   assert_locked();
2047   if (blk->_ptr != NULL) {
2048     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2049            "Minimum block size requirement");
2050     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2051     fc->set_size(blk->_word_size);
2052     fc->link_prev(NULL);   // mark as free
2053     fc->dontCoalesce();
2054     assert(fc->is_free(), "just marked it free");
2055     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2056   }
2057 }
2058 
2059 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2060   assert_locked();
2061   if (_smallLinearAllocBlock._ptr == NULL) {
2062     assert(_smallLinearAllocBlock._word_size == 0,
2063       "Size of linAB should be zero if the ptr is NULL");
2064     // Reset the linAB refill and allocation size limit.
2065     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2066   }
2067   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2068 }
2069 
2070 void
2071 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2072   assert_locked();
2073   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2074          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2075          "blk invariant");
2076   if (blk->_ptr == NULL) {
2077     refillLinearAllocBlock(blk);
2078   }
2079   if (PrintMiscellaneous && Verbose) {
2080     if (blk->_word_size == 0) {
2081       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2082     }
2083   }
2084 }
2085 
2086 void
2087 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2088   assert_locked();
2089   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2090          "linear allocation block should be empty");
2091   FreeChunk* fc;
2092   if (blk->_refillSize < SmallForDictionary &&
2093       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2094     // A linAB's strategy might be to use small sizes to reduce
2095     // fragmentation but still get the benefits of allocation from a
2096     // linAB.
2097   } else {
2098     fc = getChunkFromDictionary(blk->_refillSize);
2099   }
2100   if (fc != NULL) {
2101     blk->_ptr  = (HeapWord*)fc;
2102     blk->_word_size = fc->size();
2103     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2104   }
2105 }
2106 
2107 // Support for concurrent collection policy decisions.
2108 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2109   // In the future we might want to add in frgamentation stats --
2110   // including erosion of the "mountain" into this decision as well.
2111   return !adaptive_freelists() && linearAllocationWouldFail();
2112 }
2113 
2114 // Support for compaction
2115 
2116 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2117   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2118   // prepare_for_compaction() uses the space between live objects
2119   // so that later phase can skip dead space quickly.  So verification
2120   // of the free lists doesn't work after.
2121 }
2122 
2123 #define obj_size(q) adjustObjectSize(oop(q)->size())
2124 #define adjust_obj_size(s) adjustObjectSize(s)
2125 
2126 void CompactibleFreeListSpace::adjust_pointers() {
2127   // In other versions of adjust_pointers(), a bail out
2128   // based on the amount of live data in the generation
2129   // (i.e., if 0, bail out) may be used.
2130   // Cannot test used() == 0 here because the free lists have already
2131   // been mangled by the compaction.
2132 
2133   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2134   // See note about verification in prepare_for_compaction().
2135 }
2136 
2137 void CompactibleFreeListSpace::compact() {
2138   SCAN_AND_COMPACT(obj_size);
2139 }
2140 
2141 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2142 // where fbs is free block sizes
2143 double CompactibleFreeListSpace::flsFrag() const {
2144   size_t itabFree = totalSizeInIndexedFreeLists();
2145   double frag = 0.0;
2146   size_t i;
2147 
2148   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2149     double sz  = i;
2150     frag      += _indexedFreeList[i].count() * (sz * sz);
2151   }
2152 
2153   double totFree = itabFree +
2154                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2155   if (totFree > 0) {
2156     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2157             (totFree * totFree));
2158     frag = (double)1.0  - frag;
2159   } else {
2160     assert(frag == 0.0, "Follows from totFree == 0");
2161   }
2162   return frag;
2163 }
2164 
2165 void CompactibleFreeListSpace::beginSweepFLCensus(
2166   float inter_sweep_current,
2167   float inter_sweep_estimate,
2168   float intra_sweep_estimate) {
2169   assert_locked();
2170   size_t i;
2171   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2172     FreeList<FreeChunk>* fl = &_indexedFreeList[i];
2173     if (PrintFLSStatistics > 1) {
2174       gclog_or_tty->print("size[%d] : ", i);
2175     }
2176     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2177     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2178     fl->set_before_sweep(fl->count());
2179     fl->set_bfr_surp(fl->surplus());
2180   }
2181   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2182                                     inter_sweep_current,
2183                                     inter_sweep_estimate,
2184                                     intra_sweep_estimate);
2185 }
2186 
2187 void CompactibleFreeListSpace::setFLSurplus() {
2188   assert_locked();
2189   size_t i;
2190   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2191     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2192     fl->set_surplus(fl->count() -
2193                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2194   }
2195 }
2196 
2197 void CompactibleFreeListSpace::setFLHints() {
2198   assert_locked();
2199   size_t i;
2200   size_t h = IndexSetSize;
2201   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2202     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2203     fl->set_hint(h);
2204     if (fl->surplus() > 0) {
2205       h = i;
2206     }
2207   }
2208 }
2209 
2210 void CompactibleFreeListSpace::clearFLCensus() {
2211   assert_locked();
2212   size_t i;
2213   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2214     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2215     fl->set_prev_sweep(fl->count());
2216     fl->set_coal_births(0);
2217     fl->set_coal_deaths(0);
2218     fl->set_split_births(0);
2219     fl->set_split_deaths(0);
2220   }
2221 }
2222 
2223 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2224   if (PrintFLSStatistics > 0) {
2225     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2226     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2227                            largestAddr);
2228   }
2229   setFLSurplus();
2230   setFLHints();
2231   if (PrintGC && PrintFLSCensus > 0) {
2232     printFLCensus(sweep_count);
2233   }
2234   clearFLCensus();
2235   assert_locked();
2236   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2237 }
2238 
2239 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2240   if (size < SmallForDictionary) {
2241     FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2242     return (fl->coal_desired() < 0) ||
2243            ((int)fl->count() > fl->coal_desired());
2244   } else {
2245     return dictionary()->coal_dict_over_populated(size);
2246   }
2247 }
2248 
2249 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2250   assert(size < SmallForDictionary, "Size too large for indexed list");
2251   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2252   fl->increment_coal_births();
2253   fl->increment_surplus();
2254 }
2255 
2256 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2257   assert(size < SmallForDictionary, "Size too large for indexed list");
2258   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2259   fl->increment_coal_deaths();
2260   fl->decrement_surplus();
2261 }
2262 
2263 void CompactibleFreeListSpace::coalBirth(size_t size) {
2264   if (size  < SmallForDictionary) {
2265     smallCoalBirth(size);
2266   } else {
2267     dictionary()->dict_census_udpate(size,
2268                                    false /* split */,
2269                                    true /* birth */);
2270   }
2271 }
2272 
2273 void CompactibleFreeListSpace::coalDeath(size_t size) {
2274   if(size  < SmallForDictionary) {
2275     smallCoalDeath(size);
2276   } else {
2277     dictionary()->dict_census_udpate(size,
2278                                    false /* split */,
2279                                    false /* birth */);
2280   }
2281 }
2282 
2283 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2284   assert(size < SmallForDictionary, "Size too large for indexed list");
2285   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2286   fl->increment_split_births();
2287   fl->increment_surplus();
2288 }
2289 
2290 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2291   assert(size < SmallForDictionary, "Size too large for indexed list");
2292   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2293   fl->increment_split_deaths();
2294   fl->decrement_surplus();
2295 }
2296 
2297 void CompactibleFreeListSpace::split_birth(size_t size) {
2298   if (size  < SmallForDictionary) {
2299     smallSplitBirth(size);
2300   } else {
2301     dictionary()->dict_census_udpate(size,
2302                                    true /* split */,
2303                                    true /* birth */);
2304   }
2305 }
2306 
2307 void CompactibleFreeListSpace::splitDeath(size_t size) {
2308   if (size  < SmallForDictionary) {
2309     smallSplitDeath(size);
2310   } else {
2311     dictionary()->dict_census_udpate(size,
2312                                    true /* split */,
2313                                    false /* birth */);
2314   }
2315 }
2316 
2317 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2318   size_t to2 = from - to1;
2319   splitDeath(from);
2320   split_birth(to1);
2321   split_birth(to2);
2322 }
2323 
2324 void CompactibleFreeListSpace::print() const {
2325   print_on(tty);
2326 }
2327 
2328 void CompactibleFreeListSpace::prepare_for_verify() {
2329   assert_locked();
2330   repairLinearAllocationBlocks();
2331   // Verify that the SpoolBlocks look like free blocks of
2332   // appropriate sizes... To be done ...
2333 }
2334 
2335 class VerifyAllBlksClosure: public BlkClosure {
2336  private:
2337   const CompactibleFreeListSpace* _sp;
2338   const MemRegion                 _span;
2339   HeapWord*                       _last_addr;
2340   size_t                          _last_size;
2341   bool                            _last_was_obj;
2342   bool                            _last_was_live;
2343 
2344  public:
2345   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2346     MemRegion span) :  _sp(sp), _span(span),
2347                        _last_addr(NULL), _last_size(0),
2348                        _last_was_obj(false), _last_was_live(false) { }
2349 
2350   virtual size_t do_blk(HeapWord* addr) {
2351     size_t res;
2352     bool   was_obj  = false;
2353     bool   was_live = false;
2354     if (_sp->block_is_obj(addr)) {
2355       was_obj = true;
2356       oop p = oop(addr);
2357       guarantee(p->is_oop(), "Should be an oop");
2358       res = _sp->adjustObjectSize(p->size());
2359       if (_sp->obj_is_alive(addr)) {
2360         was_live = true;
2361         p->verify();
2362       }
2363     } else {
2364       FreeChunk* fc = (FreeChunk*)addr;
2365       res = fc->size();
2366       if (FLSVerifyLists && !fc->cantCoalesce()) {
2367         guarantee(_sp->verify_chunk_in_free_list(fc),
2368                   "Chunk should be on a free list");
2369       }
2370     }
2371     if (res == 0) {
2372       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2373       gclog_or_tty->print_cr(
2374         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2375         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2376         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2377         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2378       _sp->print_on(gclog_or_tty);
2379       guarantee(false, "Seppuku!");
2380     }
2381     _last_addr = addr;
2382     _last_size = res;
2383     _last_was_obj  = was_obj;
2384     _last_was_live = was_live;
2385     return res;
2386   }
2387 };
2388 
2389 class VerifyAllOopsClosure: public OopClosure {
2390  private:
2391   const CMSCollector*             _collector;
2392   const CompactibleFreeListSpace* _sp;
2393   const MemRegion                 _span;
2394   const bool                      _past_remark;
2395   const CMSBitMap*                _bit_map;
2396 
2397  protected:
2398   void do_oop(void* p, oop obj) {
2399     if (_span.contains(obj)) { // the interior oop points into CMS heap
2400       if (!_span.contains(p)) { // reference from outside CMS heap
2401         // Should be a valid object; the first disjunct below allows
2402         // us to sidestep an assertion in block_is_obj() that insists
2403         // that p be in _sp. Note that several generations (and spaces)
2404         // are spanned by _span (CMS heap) above.
2405         guarantee(!_sp->is_in_reserved(obj) ||
2406                   _sp->block_is_obj((HeapWord*)obj),
2407                   "Should be an object");
2408         guarantee(obj->is_oop(), "Should be an oop");
2409         obj->verify();
2410         if (_past_remark) {
2411           // Remark has been completed, the object should be marked
2412           _bit_map->isMarked((HeapWord*)obj);
2413         }
2414       } else { // reference within CMS heap
2415         if (_past_remark) {
2416           // Remark has been completed -- so the referent should have
2417           // been marked, if referring object is.
2418           if (_bit_map->isMarked(_collector->block_start(p))) {
2419             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2420           }
2421         }
2422       }
2423     } else if (_sp->is_in_reserved(p)) {
2424       // the reference is from FLS, and points out of FLS
2425       guarantee(obj->is_oop(), "Should be an oop");
2426       obj->verify();
2427     }
2428   }
2429 
2430   template <class T> void do_oop_work(T* p) {
2431     T heap_oop = oopDesc::load_heap_oop(p);
2432     if (!oopDesc::is_null(heap_oop)) {
2433       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2434       do_oop(p, obj);
2435     }
2436   }
2437 
2438  public:
2439   VerifyAllOopsClosure(const CMSCollector* collector,
2440     const CompactibleFreeListSpace* sp, MemRegion span,
2441     bool past_remark, CMSBitMap* bit_map) :
2442     OopClosure(), _collector(collector), _sp(sp), _span(span),
2443     _past_remark(past_remark), _bit_map(bit_map) { }
2444 
2445   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2446   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2447 };
2448 
2449 void CompactibleFreeListSpace::verify() const {
2450   assert_lock_strong(&_freelistLock);
2451   verify_objects_initialized();
2452   MemRegion span = _collector->_span;
2453   bool past_remark = (_collector->abstract_state() ==
2454                       CMSCollector::Sweeping);
2455 
2456   ResourceMark rm;
2457   HandleMark  hm;
2458 
2459   // Check integrity of CFL data structures
2460   _promoInfo.verify();
2461   _dictionary->verify();
2462   if (FLSVerifyIndexTable) {
2463     verifyIndexedFreeLists();
2464   }
2465   // Check integrity of all objects and free blocks in space
2466   {
2467     VerifyAllBlksClosure cl(this, span);
2468     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2469   }
2470   // Check that all references in the heap to FLS
2471   // are to valid objects in FLS or that references in
2472   // FLS are to valid objects elsewhere in the heap
2473   if (FLSVerifyAllHeapReferences)
2474   {
2475     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2476       _collector->markBitMap());
2477     CollectedHeap* ch = Universe::heap();
2478     ch->oop_iterate(&cl);              // all oops in generations
2479     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2480   }
2481 
2482   if (VerifyObjectStartArray) {
2483     // Verify the block offset table
2484     _bt.verify();
2485   }
2486 }
2487 
2488 #ifndef PRODUCT
2489 void CompactibleFreeListSpace::verifyFreeLists() const {
2490   if (FLSVerifyLists) {
2491     _dictionary->verify();
2492     verifyIndexedFreeLists();
2493   } else {
2494     if (FLSVerifyDictionary) {
2495       _dictionary->verify();
2496     }
2497     if (FLSVerifyIndexTable) {
2498       verifyIndexedFreeLists();
2499     }
2500   }
2501 }
2502 #endif
2503 
2504 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2505   size_t i = 0;
2506   for (; i < IndexSetStart; i++) {
2507     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2508   }
2509   for (; i < IndexSetSize; i++) {
2510     verifyIndexedFreeList(i);
2511   }
2512 }
2513 
2514 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2515   FreeChunk* fc   =  _indexedFreeList[size].head();
2516   FreeChunk* tail =  _indexedFreeList[size].tail();
2517   size_t    num = _indexedFreeList[size].count();
2518   size_t      n = 0;
2519   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2520             "Slot should have been empty");
2521   for (; fc != NULL; fc = fc->next(), n++) {
2522     guarantee(fc->size() == size, "Size inconsistency");
2523     guarantee(fc->is_free(), "!free?");
2524     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2525     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2526   }
2527   guarantee(n == num, "Incorrect count");
2528 }
2529 
2530 #ifndef PRODUCT
2531 void CompactibleFreeListSpace::check_free_list_consistency() const {
2532   assert(_dictionary->min_size() <= IndexSetSize,
2533     "Some sizes can't be allocated without recourse to"
2534     " linear allocation buffers");
2535   assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
2536     "else MIN_TREE_CHUNK_SIZE is wrong");
2537   assert((IndexSetStride == 2 && IndexSetStart == 4) ||                   // 32-bit
2538          (IndexSetStride == 1 && IndexSetStart == 3), "just checking");   // 64-bit
2539   assert((IndexSetStride != 2) || (IndexSetStart % 2 == 0),
2540       "Some for-loops may be incorrectly initialized");
2541   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2542       "For-loops that iterate over IndexSet with stride 2 may be wrong");
2543 }
2544 #endif
2545 
2546 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2547   assert_lock_strong(&_freelistLock);
2548   FreeList<FreeChunk> total;
2549   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2550   FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2551   size_t total_free = 0;
2552   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2553     const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2554     total_free += fl->count() * fl->size();
2555     if (i % (40*IndexSetStride) == 0) {
2556       FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2557     }
2558     fl->print_on(gclog_or_tty);
2559     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2560     total.set_surplus(    total.surplus()     + fl->surplus()    );
2561     total.set_desired(    total.desired()     + fl->desired()    );
2562     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2563     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2564     total.set_count(      total.count()       + fl->count()      );
2565     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2566     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2567     total.set_split_births(total.split_births() + fl->split_births());
2568     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2569   }
2570   total.print_on(gclog_or_tty, "TOTAL");
2571   gclog_or_tty->print_cr("Total free in indexed lists "
2572                          SIZE_FORMAT " words", total_free);
2573   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2574     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2575             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2576     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2577   _dictionary->print_dict_census();
2578 }
2579 
2580 ///////////////////////////////////////////////////////////////////////////
2581 // CFLS_LAB
2582 ///////////////////////////////////////////////////////////////////////////
2583 
2584 #define VECTOR_257(x)                                                                                  \
2585   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2586   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2587      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2588      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2589      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2590      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2591      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2592      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2593      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2594      x }
2595 
2596 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2597 // OldPLABSize, whose static default is different; if overridden at the
2598 // command-line, this will get reinitialized via a call to
2599 // modify_initialization() below.
2600 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2601   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2602 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2603 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2604 
2605 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2606   _cfls(cfls)
2607 {
2608   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2609   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2610        i < CompactibleFreeListSpace::IndexSetSize;
2611        i += CompactibleFreeListSpace::IndexSetStride) {
2612     _indexedFreeList[i].set_size(i);
2613     _num_blocks[i] = 0;
2614   }
2615 }
2616 
2617 static bool _CFLS_LAB_modified = false;
2618 
2619 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2620   assert(!_CFLS_LAB_modified, "Call only once");
2621   _CFLS_LAB_modified = true;
2622   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2623        i < CompactibleFreeListSpace::IndexSetSize;
2624        i += CompactibleFreeListSpace::IndexSetStride) {
2625     _blocks_to_claim[i].modify(n, wt, true /* force */);
2626   }
2627 }
2628 
2629 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2630   FreeChunk* res;
2631   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2632   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2633     // This locking manages sync with other large object allocations.
2634     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2635                     Mutex::_no_safepoint_check_flag);
2636     res = _cfls->getChunkFromDictionaryExact(word_sz);
2637     if (res == NULL) return NULL;
2638   } else {
2639     FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2640     if (fl->count() == 0) {
2641       // Attempt to refill this local free list.
2642       get_from_global_pool(word_sz, fl);
2643       // If it didn't work, give up.
2644       if (fl->count() == 0) return NULL;
2645     }
2646     res = fl->get_chunk_at_head();
2647     assert(res != NULL, "Why was count non-zero?");
2648   }
2649   res->markNotFree();
2650   assert(!res->is_free(), "shouldn't be marked free");
2651   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2652   // mangle a just allocated object with a distinct pattern.
2653   debug_only(res->mangleAllocated(word_sz));
2654   return (HeapWord*)res;
2655 }
2656 
2657 // Get a chunk of blocks of the right size and update related
2658 // book-keeping stats
2659 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
2660   // Get the #blocks we want to claim
2661   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2662   assert(n_blks > 0, "Error");
2663   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2664   // In some cases, when the application has a phase change,
2665   // there may be a sudden and sharp shift in the object survival
2666   // profile, and updating the counts at the end of a scavenge
2667   // may not be quick enough, giving rise to large scavenge pauses
2668   // during these phase changes. It is beneficial to detect such
2669   // changes on-the-fly during a scavenge and avoid such a phase-change
2670   // pothole. The following code is a heuristic attempt to do that.
2671   // It is protected by a product flag until we have gained
2672   // enough experience with this heuristic and fine-tuned its behaviour.
2673   // WARNING: This might increase fragmentation if we overreact to
2674   // small spikes, so some kind of historical smoothing based on
2675   // previous experience with the greater reactivity might be useful.
2676   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2677   // default.
2678   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2679     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2680     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2681     n_blks = MIN2(n_blks, CMSOldPLABMax);
2682   }
2683   assert(n_blks > 0, "Error");
2684   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2685   // Update stats table entry for this block size
2686   _num_blocks[word_sz] += fl->count();
2687 }
2688 
2689 void CFLS_LAB::compute_desired_plab_size() {
2690   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2691        i < CompactibleFreeListSpace::IndexSetSize;
2692        i += CompactibleFreeListSpace::IndexSetStride) {
2693     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2694            "Counter inconsistency");
2695     if (_global_num_workers[i] > 0) {
2696       // Need to smooth wrt historical average
2697       if (ResizeOldPLAB) {
2698         _blocks_to_claim[i].sample(
2699           MAX2((size_t)CMSOldPLABMin,
2700           MIN2((size_t)CMSOldPLABMax,
2701                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2702       }
2703       // Reset counters for next round
2704       _global_num_workers[i] = 0;
2705       _global_num_blocks[i] = 0;
2706       if (PrintOldPLAB) {
2707         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2708       }
2709     }
2710   }
2711 }
2712 
2713 // If this is changed in the future to allow parallel
2714 // access, one would need to take the FL locks and,
2715 // depending on how it is used, stagger access from
2716 // parallel threads to reduce contention.
2717 void CFLS_LAB::retire(int tid) {
2718   // We run this single threaded with the world stopped;
2719   // so no need for locks and such.
2720   NOT_PRODUCT(Thread* t = Thread::current();)
2721   assert(Thread::current()->is_VM_thread(), "Error");
2722   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2723        i < CompactibleFreeListSpace::IndexSetSize;
2724        i += CompactibleFreeListSpace::IndexSetStride) {
2725     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2726            "Can't retire more than what we obtained");
2727     if (_num_blocks[i] > 0) {
2728       size_t num_retire =  _indexedFreeList[i].count();
2729       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2730       {
2731         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2732         //                Mutex::_no_safepoint_check_flag);
2733 
2734         // Update globals stats for num_blocks used
2735         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2736         _global_num_workers[i]++;
2737         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2738         if (num_retire > 0) {
2739           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2740           // Reset this list.
2741           _indexedFreeList[i] = FreeList<FreeChunk>();
2742           _indexedFreeList[i].set_size(i);
2743         }
2744       }
2745       if (PrintOldPLAB) {
2746         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2747                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2748       }
2749       // Reset stats for next round
2750       _num_blocks[i]         = 0;
2751     }
2752   }
2753 }
2754 
2755 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
2756   assert(fl->count() == 0, "Precondition.");
2757   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2758          "Precondition");
2759 
2760   // We'll try all multiples of word_sz in the indexed set, starting with
2761   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2762   // then try getting a big chunk and splitting it.
2763   {
2764     bool found;
2765     int  k;
2766     size_t cur_sz;
2767     for (k = 1, cur_sz = k * word_sz, found = false;
2768          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2769          (CMSSplitIndexedFreeListBlocks || k <= 1);
2770          k++, cur_sz = k * word_sz) {
2771       FreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2772       fl_for_cur_sz.set_size(cur_sz);
2773       {
2774         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2775                         Mutex::_no_safepoint_check_flag);
2776         FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2777         if (gfl->count() != 0) {
2778           // nn is the number of chunks of size cur_sz that
2779           // we'd need to split k-ways each, in order to create
2780           // "n" chunks of size word_sz each.
2781           const size_t nn = MAX2(n/k, (size_t)1);
2782           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2783           found = true;
2784           if (k > 1) {
2785             // Update split death stats for the cur_sz-size blocks list:
2786             // we increment the split death count by the number of blocks
2787             // we just took from the cur_sz-size blocks list and which
2788             // we will be splitting below.
2789             ssize_t deaths = gfl->split_deaths() +
2790                              fl_for_cur_sz.count();
2791             gfl->set_split_deaths(deaths);
2792           }
2793         }
2794       }
2795       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2796       if (found) {
2797         if (k == 1) {
2798           fl->prepend(&fl_for_cur_sz);
2799         } else {
2800           // Divide each block on fl_for_cur_sz up k ways.
2801           FreeChunk* fc;
2802           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2803             // Must do this in reverse order, so that anybody attempting to
2804             // access the main chunk sees it as a single free block until we
2805             // change it.
2806             size_t fc_size = fc->size();
2807             assert(fc->is_free(), "Error");
2808             for (int i = k-1; i >= 0; i--) {
2809               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2810               assert((i != 0) ||
2811                         ((fc == ffc) && ffc->is_free() &&
2812                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2813                         "Counting error");
2814               ffc->set_size(word_sz);
2815               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2816               ffc->link_next(NULL);
2817               // Above must occur before BOT is updated below.
2818               OrderAccess::storestore();
2819               // splitting from the right, fc_size == i * word_sz
2820               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2821               fc_size -= word_sz;
2822               assert(fc_size == i*word_sz, "Error");
2823               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2824               _bt.verify_single_block((HeapWord*)fc, fc_size);
2825               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2826               // Push this on "fl".
2827               fl->return_chunk_at_head(ffc);
2828             }
2829             // TRAP
2830             assert(fl->tail()->next() == NULL, "List invariant.");
2831           }
2832         }
2833         // Update birth stats for this block size.
2834         size_t num = fl->count();
2835         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2836                         Mutex::_no_safepoint_check_flag);
2837         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2838         _indexedFreeList[word_sz].set_split_births(births);
2839         return;
2840       }
2841     }
2842   }
2843   // Otherwise, we'll split a block from the dictionary.
2844   FreeChunk* fc = NULL;
2845   FreeChunk* rem_fc = NULL;
2846   size_t rem;
2847   {
2848     MutexLockerEx x(parDictionaryAllocLock(),
2849                     Mutex::_no_safepoint_check_flag);
2850     while (n > 0) {
2851       fc = dictionary()->get_chunk(MAX2(n * word_sz,
2852                                   _dictionary->min_size()),
2853                                   FreeBlockDictionary<FreeChunk>::atLeast);
2854       if (fc != NULL) {
2855         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2856         dictionary()->dict_census_udpate(fc->size(),
2857                                        true /*split*/,
2858                                        false /*birth*/);
2859         break;
2860       } else {
2861         n--;
2862       }
2863     }
2864     if (fc == NULL) return;
2865     // Otherwise, split up that block.
2866     assert((ssize_t)n >= 1, "Control point invariant");
2867     assert(fc->is_free(), "Error: should be a free block");
2868     _bt.verify_single_block((HeapWord*)fc, fc->size());
2869     const size_t nn = fc->size() / word_sz;
2870     n = MIN2(nn, n);
2871     assert((ssize_t)n >= 1, "Control point invariant");
2872     rem = fc->size() - n * word_sz;
2873     // If there is a remainder, and it's too small, allocate one fewer.
2874     if (rem > 0 && rem < MinChunkSize) {
2875       n--; rem += word_sz;
2876     }
2877     // Note that at this point we may have n == 0.
2878     assert((ssize_t)n >= 0, "Control point invariant");
2879 
2880     // If n is 0, the chunk fc that was found is not large
2881     // enough to leave a viable remainder.  We are unable to
2882     // allocate even one block.  Return fc to the
2883     // dictionary and return, leaving "fl" empty.
2884     if (n == 0) {
2885       returnChunkToDictionary(fc);
2886       assert(fl->count() == 0, "We never allocated any blocks");
2887       return;
2888     }
2889 
2890     // First return the remainder, if any.
2891     // Note that we hold the lock until we decide if we're going to give
2892     // back the remainder to the dictionary, since a concurrent allocation
2893     // may otherwise see the heap as empty.  (We're willing to take that
2894     // hit if the block is a small block.)
2895     if (rem > 0) {
2896       size_t prefix_size = n * word_sz;
2897       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2898       rem_fc->set_size(rem);
2899       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2900       rem_fc->link_next(NULL);
2901       // Above must occur before BOT is updated below.
2902       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2903       OrderAccess::storestore();
2904       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2905       assert(fc->is_free(), "Error");
2906       fc->set_size(prefix_size);
2907       if (rem >= IndexSetSize) {
2908         returnChunkToDictionary(rem_fc);
2909         dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
2910         rem_fc = NULL;
2911       }
2912       // Otherwise, return it to the small list below.
2913     }
2914   }
2915   if (rem_fc != NULL) {
2916     MutexLockerEx x(_indexedFreeListParLocks[rem],
2917                     Mutex::_no_safepoint_check_flag);
2918     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2919     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2920     smallSplitBirth(rem);
2921   }
2922   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2923   // Now do the splitting up.
2924   // Must do this in reverse order, so that anybody attempting to
2925   // access the main chunk sees it as a single free block until we
2926   // change it.
2927   size_t fc_size = n * word_sz;
2928   // All but first chunk in this loop
2929   for (ssize_t i = n-1; i > 0; i--) {
2930     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2931     ffc->set_size(word_sz);
2932     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2933     ffc->link_next(NULL);
2934     // Above must occur before BOT is updated below.
2935     OrderAccess::storestore();
2936     // splitting from the right, fc_size == (n - i + 1) * wordsize
2937     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2938     fc_size -= word_sz;
2939     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2940     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2941     _bt.verify_single_block((HeapWord*)fc, fc_size);
2942     // Push this on "fl".
2943     fl->return_chunk_at_head(ffc);
2944   }
2945   // First chunk
2946   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2947   // The blocks above should show their new sizes before the first block below
2948   fc->set_size(word_sz);
2949   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2950   fc->link_next(NULL);
2951   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2952   _bt.verify_single_block((HeapWord*)fc, fc->size());
2953   fl->return_chunk_at_head(fc);
2954 
2955   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2956   {
2957     // Update the stats for this block size.
2958     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2959                     Mutex::_no_safepoint_check_flag);
2960     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2961     _indexedFreeList[word_sz].set_split_births(births);
2962     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2963     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2964   }
2965 
2966   // TRAP
2967   assert(fl->tail()->next() == NULL, "List invariant.");
2968 }
2969 
2970 // Set up the space's par_seq_tasks structure for work claiming
2971 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2972 // XXX Need to suitably abstract and generalize this and the next
2973 // method into one.
2974 void
2975 CompactibleFreeListSpace::
2976 initialize_sequential_subtasks_for_rescan(int n_threads) {
2977   // The "size" of each task is fixed according to rescan_task_size.
2978   assert(n_threads > 0, "Unexpected n_threads argument");
2979   const size_t task_size = rescan_task_size();
2980   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2981   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2982   assert(n_tasks == 0 ||
2983          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2984           (used_region().start() + n_tasks*task_size >= used_region().end())),
2985          "n_tasks calculation incorrect");
2986   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2987   assert(!pst->valid(), "Clobbering existing data?");
2988   // Sets the condition for completion of the subtask (how many threads
2989   // need to finish in order to be done).
2990   pst->set_n_threads(n_threads);
2991   pst->set_n_tasks((int)n_tasks);
2992 }
2993 
2994 // Set up the space's par_seq_tasks structure for work claiming
2995 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2996 void
2997 CompactibleFreeListSpace::
2998 initialize_sequential_subtasks_for_marking(int n_threads,
2999                                            HeapWord* low) {
3000   // The "size" of each task is fixed according to rescan_task_size.
3001   assert(n_threads > 0, "Unexpected n_threads argument");
3002   const size_t task_size = marking_task_size();
3003   assert(task_size > CardTableModRefBS::card_size_in_words &&
3004          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3005          "Otherwise arithmetic below would be incorrect");
3006   MemRegion span = _gen->reserved();
3007   if (low != NULL) {
3008     if (span.contains(low)) {
3009       // Align low down to  a card boundary so that
3010       // we can use block_offset_careful() on span boundaries.
3011       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3012                                  CardTableModRefBS::card_size);
3013       // Clip span prefix at aligned_low
3014       span = span.intersection(MemRegion(aligned_low, span.end()));
3015     } else if (low > span.end()) {
3016       span = MemRegion(low, low);  // Null region
3017     } // else use entire span
3018   }
3019   assert(span.is_empty() ||
3020          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3021         "span should start at a card boundary");
3022   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3023   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3024   assert(n_tasks == 0 ||
3025          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3026           (span.start() + n_tasks*task_size >= span.end())),
3027          "n_tasks calculation incorrect");
3028   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3029   assert(!pst->valid(), "Clobbering existing data?");
3030   // Sets the condition for completion of the subtask (how many threads
3031   // need to finish in order to be done).
3032   pst->set_n_threads(n_threads);
3033   pst->set_n_tasks((int)n_tasks);
3034 }