1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94          "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 105       _dictionary = new AFLBinaryTreeDictionary(mr);
 106       break;
 107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 109     default:
 110       warning("dictionaryChoice: selected option not understood; using"
 111               " default BinaryTreeDictionary implementation instead.");
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 126                                            FreeBlockDictionary<FreeChunk>::atLeast);
 127     // The small linAB initially has all the space and will allocate
 128     // a chunk of any size.
 129     HeapWord* addr = (HeapWord*) fc;
 130     _smallLinearAllocBlock.set(addr, fc->size() ,
 131       1024*SmallForLinearAlloc, fc->size());
 132     // Note that _unallocated_block is not updated here.
 133     // Allocations from the linear allocation block should
 134     // update it.
 135   } else {
 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);
 156       DEBUG_ONLY(
 157         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 158       )
 159     }
 160     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 161   }
 162 }
 163 
 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
 165 // update the block offset table.  Removed initialize_threshold call because
 166 // CFLS does not use a block offset array for contiguous spaces.
 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 168                                     CompactPoint* cp, HeapWord* compact_top) {
 169   // q is alive
 170   // First check if we should switch compaction space
 171   assert(this == cp->space, "'this' should be current compaction space.");
 172   size_t compaction_max_size = pointer_delta(end(), compact_top);
 173   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 174     "virtual adjustObjectSize_v() method is not correct");
 175   size_t adjusted_size = adjustObjectSize(size);
 176   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 177          "no small fragments allowed");
 178   assert(minimum_free_block_size() == MinChunkSize,
 179          "for de-virtualized reference below");
 180   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 181   if (adjusted_size + MinChunkSize > compaction_max_size &&
 182       adjusted_size != compaction_max_size) {
 183     do {
 184       // switch to next compaction space
 185       cp->space->set_compaction_top(compact_top);
 186       cp->space = cp->space->next_compaction_space();
 187       if (cp->space == NULL) {
 188         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 189         assert(cp->gen != NULL, "compaction must succeed");
 190         cp->space = cp->gen->first_compaction_space();
 191         assert(cp->space != NULL, "generation must have a first compaction space");
 192       }
 193       compact_top = cp->space->bottom();
 194       cp->space->set_compaction_top(compact_top);
 195       // The correct adjusted_size may not be the same as that for this method
 196       // (i.e., cp->space may no longer be "this" so adjust the size again.
 197       // Use the virtual method which is not used above to save the virtual
 198       // dispatch.
 199       adjusted_size = cp->space->adjust_object_size_v(size);
 200       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 201       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 202     } while (adjusted_size > compaction_max_size);
 203   }
 204 
 205   // store the forwarding pointer into the mark word
 206   if ((HeapWord*)q != compact_top) {
 207     q->forward_to(oop(compact_top));
 208     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 209   } else {
 210     // if the object isn't moving we can just set the mark to the default
 211     // mark and handle it specially later on.
 212     q->init_mark();
 213     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 214   }
 215 
 216   compact_top += adjusted_size;
 217 
 218   // we need to update the offset table so that the beginnings of objects can be
 219   // found during scavenge.  Note that we are updating the offset table based on
 220   // where the object will be once the compaction phase finishes.
 221 
 222   // Always call cross_threshold().  A contiguous space can only call it when
 223   // the compaction_top exceeds the current threshold but not for an
 224   // non-contiguous space.
 225   cp->threshold =
 226     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 227   return compact_top;
 228 }
 229 
 230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 231 // and use of single_block instead of alloc_block.  The name here is not really
 232 // appropriate - maybe a more general name could be invented for both the
 233 // contiguous and noncontiguous spaces.
 234 
 235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 236   _bt.single_block(start, the_end);
 237   return end();
 238 }
 239 
 240 // Initialize them to NULL.
 241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 242   for (size_t i = 0; i < IndexSetSize; i++) {
 243     // Note that on platforms where objects are double word aligned,
 244     // the odd array elements are not used.  It is convenient, however,
 245     // to map directly from the object size to the array element.
 246     _indexedFreeList[i].reset(IndexSetSize);
 247     _indexedFreeList[i].set_size(i);
 248     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 249     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 250     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 252   }
 253 }
 254 
 255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 256   for (size_t i = 1; i < IndexSetSize; i++) {
 257     assert(_indexedFreeList[i].size() == (size_t) i,
 258       "Indexed free list sizes are incorrect");
 259     _indexedFreeList[i].reset(IndexSetSize);
 260     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 261     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 262     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 264   }
 265 }
 266 
 267 void CompactibleFreeListSpace::reset(MemRegion mr) {
 268   resetIndexedFreeListArray();
 269   dictionary()->reset();
 270   if (BlockOffsetArrayUseUnallocatedBlock) {
 271     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 272     // Everything's allocated until proven otherwise.
 273     _bt.set_unallocated_block(end());
 274   }
 275   if (!mr.is_empty()) {
 276     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 277     _bt.single_block(mr.start(), mr.word_size());
 278     FreeChunk* fc = (FreeChunk*) mr.start();
 279     fc->set_size(mr.word_size());
 280     if (mr.word_size() >= IndexSetSize ) {
 281       returnChunkToDictionary(fc);
 282     } else {
 283       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 284       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 285     }
 286     coalBirth(mr.word_size());
 287   }
 288   _promoInfo.reset();
 289   _smallLinearAllocBlock._ptr = NULL;
 290   _smallLinearAllocBlock._word_size = 0;
 291 }
 292 
 293 void CompactibleFreeListSpace::reset_after_compaction() {
 294   // Reset the space to the new reality - one free chunk.
 295   MemRegion mr(compaction_top(), end());
 296   reset(mr);
 297   // Now refill the linear allocation block(s) if possible.
 298   if (_adaptive_freelists) {
 299     refillLinearAllocBlocksIfNeeded();
 300   } else {
 301     // Place as much of mr in the linAB as we can get,
 302     // provided it was big enough to go into the dictionary.
 303     FreeChunk* fc = dictionary()->find_largest_dict();
 304     if (fc != NULL) {
 305       assert(fc->size() == mr.word_size(),
 306              "Why was the chunk broken up?");
 307       removeChunkFromDictionary(fc);
 308       HeapWord* addr = (HeapWord*) fc;
 309       _smallLinearAllocBlock.set(addr, fc->size() ,
 310         1024*SmallForLinearAlloc, fc->size());
 311       // Note that _unallocated_block is not updated here.
 312     }
 313   }
 314 }
 315 
 316 // Walks the entire dictionary, returning a coterminal
 317 // chunk, if it exists. Use with caution since it involves
 318 // a potentially complete walk of a potentially large tree.
 319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 320 
 321   assert_lock_strong(&_freelistLock);
 322 
 323   return dictionary()->find_chunk_ends_at(end());
 324 }
 325 
 326 
 327 #ifndef PRODUCT
 328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 329   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 330     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 331   }
 332 }
 333 
 334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 335   size_t sum = 0;
 336   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 337     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 338   }
 339   return sum;
 340 }
 341 
 342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 343   size_t count = 0;
 344   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 345     debug_only(
 346       ssize_t total_list_count = 0;
 347       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 348          fc = fc->next()) {
 349         total_list_count++;
 350       }
 351       assert(total_list_count ==  _indexedFreeList[i].count(),
 352         "Count in list is incorrect");
 353     )
 354     count += _indexedFreeList[i].count();
 355   }
 356   return count;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::totalCount() {
 360   size_t num = totalCountInIndexedFreeLists();
 361   num +=  dictionary()->total_count();
 362   if (_smallLinearAllocBlock._word_size != 0) {
 363     num++;
 364   }
 365   return num;
 366 }
 367 #endif
 368 
 369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 370   FreeChunk* fc = (FreeChunk*) p;
 371   return fc->is_free();
 372 }
 373 
 374 size_t CompactibleFreeListSpace::used() const {
 375   return capacity() - free();
 376 }
 377 
 378 size_t CompactibleFreeListSpace::free() const {
 379   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 380   // if you do this while the structures are in flux you
 381   // may get an approximate answer only; for instance
 382   // because there is concurrent allocation either
 383   // directly by mutators or for promotion during a GC.
 384   // It's "MT-safe", however, in the sense that you are guaranteed
 385   // not to crash and burn, for instance, because of walking
 386   // pointers that could disappear as you were walking them.
 387   // The approximation is because the various components
 388   // that are read below are not read atomically (and
 389   // further the computation of totalSizeInIndexedFreeLists()
 390   // is itself a non-atomic computation. The normal use of
 391   // this is during a resize operation at the end of GC
 392   // and at that time you are guaranteed to get the
 393   // correct actual value. However, for instance, this is
 394   // also read completely asynchronously by the "perf-sampler"
 395   // that supports jvmstat, and you are apt to see the values
 396   // flicker in such cases.
 397   assert(_dictionary != NULL, "No _dictionary?");
 398   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 399           totalSizeInIndexedFreeLists() +
 400           _smallLinearAllocBlock._word_size) * HeapWordSize;
 401 }
 402 
 403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 404   assert(_dictionary != NULL, "No _dictionary?");
 405   assert_locked();
 406   size_t res = _dictionary->max_chunk_size();
 407   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 408                        (size_t) SmallForLinearAlloc - 1));
 409   // XXX the following could potentially be pretty slow;
 410   // should one, pessimistically for the rare cases when res
 411   // calculated above is less than IndexSetSize,
 412   // just return res calculated above? My reasoning was that
 413   // those cases will be so rare that the extra time spent doesn't
 414   // really matter....
 415   // Note: do not change the loop test i >= res + IndexSetStride
 416   // to i > res below, because i is unsigned and res may be zero.
 417   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 418        i -= IndexSetStride) {
 419     if (_indexedFreeList[i].head() != NULL) {
 420       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 421       return i;
 422     }
 423   }
 424   return res;
 425 }
 426 
 427 void LinearAllocBlock::print_on(outputStream* st) const {
 428   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 429             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 430             _ptr, _word_size, _refillSize, _allocation_size_limit);
 431 }
 432 
 433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 434   st->print_cr("COMPACTIBLE FREELIST SPACE");
 435   st->print_cr(" Space:");
 436   Space::print_on(st);
 437 
 438   st->print_cr("promoInfo:");
 439   _promoInfo.print_on(st);
 440 
 441   st->print_cr("_smallLinearAllocBlock");
 442   _smallLinearAllocBlock.print_on(st);
 443 
 444   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 445 
 446   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 447                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 448 }
 449 
 450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 451 const {
 452   reportIndexedFreeListStatistics();
 453   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 454   gclog_or_tty->print_cr("---------------------------");
 455   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 456   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 457     _indexedFreeList[i].print_on(gclog_or_tty);
 458     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 459          fc = fc->next()) {
 460       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 461                           fc, (HeapWord*)fc + i,
 462                           fc->cantCoalesce() ? "\t CC" : "");
 463     }
 464   }
 465 }
 466 
 467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 468 const {
 469   _promoInfo.print_on(st);
 470 }
 471 
 472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 473 const {
 474   _dictionary->report_statistics();
 475   st->print_cr("Layout of Freelists in Tree");
 476   st->print_cr("---------------------------");
 477   _dictionary->print_free_lists(st);
 478 }
 479 
 480 class BlkPrintingClosure: public BlkClosure {
 481   const CMSCollector*             _collector;
 482   const CompactibleFreeListSpace* _sp;
 483   const CMSBitMap*                _live_bit_map;
 484   const bool                      _post_remark;
 485   outputStream*                   _st;
 486 public:
 487   BlkPrintingClosure(const CMSCollector* collector,
 488                      const CompactibleFreeListSpace* sp,
 489                      const CMSBitMap* live_bit_map,
 490                      outputStream* st):
 491     _collector(collector),
 492     _sp(sp),
 493     _live_bit_map(live_bit_map),
 494     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 495     _st(st) { }
 496   size_t do_blk(HeapWord* addr);
 497 };
 498 
 499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 500   size_t sz = _sp->block_size_no_stall(addr, _collector);
 501   assert(sz != 0, "Should always be able to compute a size");
 502   if (_sp->block_is_obj(addr)) {
 503     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 504     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 505       addr,
 506       dead ? "dead" : "live",
 507       sz,
 508       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 509     if (CMSPrintObjectsInDump && !dead) {
 510       oop(addr)->print_on(_st);
 511       _st->print_cr("--------------------------------------");
 512     }
 513   } else { // free block
 514     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 515       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 516     if (CMSPrintChunksInDump) {
 517       ((FreeChunk*)addr)->print_on(_st);
 518       _st->print_cr("--------------------------------------");
 519     }
 520   }
 521   return sz;
 522 }
 523 
 524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 525   outputStream* st) {
 526   st->print_cr("\n=========================");
 527   st->print_cr("Block layout in CMS Heap:");
 528   st->print_cr("=========================");
 529   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 530   blk_iterate(&bpcl);
 531 
 532   st->print_cr("\n=======================================");
 533   st->print_cr("Order & Layout of Promotion Info Blocks");
 534   st->print_cr("=======================================");
 535   print_promo_info_blocks(st);
 536 
 537   st->print_cr("\n===========================");
 538   st->print_cr("Order of Indexed Free Lists");
 539   st->print_cr("=========================");
 540   print_indexed_free_lists(st);
 541 
 542   st->print_cr("\n=================================");
 543   st->print_cr("Order of Free Lists in Dictionary");
 544   st->print_cr("=================================");
 545   print_dictionary_free_lists(st);
 546 }
 547 
 548 
 549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 550   assert_lock_strong(&_freelistLock);
 551   assert(PrintFLSStatistics != 0, "Reporting error");
 552   _dictionary->report_statistics();
 553   if (PrintFLSStatistics > 1) {
 554     reportIndexedFreeListStatistics();
 555     size_t total_size = totalSizeInIndexedFreeLists() +
 556                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 557     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 558   }
 559 }
 560 
 561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 562   assert_lock_strong(&_freelistLock);
 563   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 564                       "--------------------------------\n");
 565   size_t total_size = totalSizeInIndexedFreeLists();
 566   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 567   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 568   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 569   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 570   if (free_blocks != 0) {
 571     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 572   }
 573 }
 574 
 575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 576   size_t res = 0;
 577   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 578     debug_only(
 579       ssize_t recount = 0;
 580       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 581          fc = fc->next()) {
 582         recount += 1;
 583       }
 584       assert(recount == _indexedFreeList[i].count(),
 585         "Incorrect count in list");
 586     )
 587     res += _indexedFreeList[i].count();
 588   }
 589   return res;
 590 }
 591 
 592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 593   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 594     if (_indexedFreeList[i].head() != NULL) {
 595       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 596       return (size_t)i;
 597     }
 598   }
 599   return 0;
 600 }
 601 
 602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 603   HeapWord* prevEnd = end();
 604   assert(prevEnd != value, "unnecessary set_end call");
 605   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 606         "New end is below unallocated block");
 607   _end = value;
 608   if (prevEnd != NULL) {
 609     // Resize the underlying block offset table.
 610     _bt.resize(pointer_delta(value, bottom()));
 611     if (value <= prevEnd) {
 612       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 613              "New end is below unallocated block");
 614     } else {
 615       // Now, take this new chunk and add it to the free blocks.
 616       // Note that the BOT has not yet been updated for this block.
 617       size_t newFcSize = pointer_delta(value, prevEnd);
 618       // XXX This is REALLY UGLY and should be fixed up. XXX
 619       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 620         // Mark the boundary of the new block in BOT
 621         _bt.mark_block(prevEnd, value);
 622         // put it all in the linAB
 623         if (ParallelGCThreads == 0) {
 624           _smallLinearAllocBlock._ptr = prevEnd;
 625           _smallLinearAllocBlock._word_size = newFcSize;
 626           repairLinearAllocBlock(&_smallLinearAllocBlock);
 627         } else { // ParallelGCThreads > 0
 628           MutexLockerEx x(parDictionaryAllocLock(),
 629                           Mutex::_no_safepoint_check_flag);
 630           _smallLinearAllocBlock._ptr = prevEnd;
 631           _smallLinearAllocBlock._word_size = newFcSize;
 632           repairLinearAllocBlock(&_smallLinearAllocBlock);
 633         }
 634         // Births of chunks put into a LinAB are not recorded.  Births
 635         // of chunks as they are allocated out of a LinAB are.
 636       } else {
 637         // Add the block to the free lists, if possible coalescing it
 638         // with the last free block, and update the BOT and census data.
 639         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 640       }
 641     }
 642   }
 643 }
 644 
 645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 646   CompactibleFreeListSpace* _cfls;
 647   CMSCollector* _collector;
 648 protected:
 649   // Override.
 650 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 651   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 652                                        HeapWord* bottom, HeapWord* top, \
 653                                        ClosureType* cl);                \
 654       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 655                                        HeapWord* bottom, HeapWord* top, \
 656                                        ClosureType* cl);                \
 657     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 658                                        HeapWord* bottom, HeapWord* top, \
 659                                        ClosureType* cl)
 660   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 661   walk_mem_region_with_cl_DECL(FilteringClosure);
 662 
 663 public:
 664   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 665                       CMSCollector* collector,
 666                       ExtendedOopClosure* cl,
 667                       CardTableModRefBS::PrecisionStyle precision,
 668                       HeapWord* boundary) :
 669     Filtering_DCTOC(sp, cl, precision, boundary),
 670     _cfls(sp), _collector(collector) {}
 671 };
 672 
 673 // We de-virtualize the block-related calls below, since we know that our
 674 // space is a CompactibleFreeListSpace.
 675 
 676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 678                                                  HeapWord* bottom,              \
 679                                                  HeapWord* top,                 \
 680                                                  ClosureType* cl) {             \
 681    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 682    if (is_par) {                                                                \
 683      assert(SharedHeap::heap()->n_par_threads() ==                              \
 684             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 685      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 686    } else {                                                                     \
 687      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 688    }                                                                            \
 689 }                                                                               \
 690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 691                                                       HeapWord* bottom,         \
 692                                                       HeapWord* top,            \
 693                                                       ClosureType* cl) {        \
 694   /* Skip parts that are before "mr", in case "block_start" sent us             \
 695      back too far. */                                                           \
 696   HeapWord* mr_start = mr.start();                                              \
 697   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 698   HeapWord* next = bottom + bot_size;                                           \
 699   while (next < mr_start) {                                                     \
 700     bottom = next;                                                              \
 701     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 702     next = bottom + bot_size;                                                   \
 703   }                                                                             \
 704                                                                                 \
 705   while (bottom < top) {                                                        \
 706     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 707         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 708                     oop(bottom)) &&                                             \
 709         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 710       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 711       bottom += _cfls->adjustObjectSize(word_sz);                               \
 712     } else {                                                                    \
 713       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 714     }                                                                           \
 715   }                                                                             \
 716 }                                                                               \
 717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 718                                                         HeapWord* bottom,       \
 719                                                         HeapWord* top,          \
 720                                                         ClosureType* cl) {      \
 721   /* Skip parts that are before "mr", in case "block_start" sent us             \
 722      back too far. */                                                           \
 723   HeapWord* mr_start = mr.start();                                              \
 724   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 725   HeapWord* next = bottom + bot_size;                                           \
 726   while (next < mr_start) {                                                     \
 727     bottom = next;                                                              \
 728     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 729     next = bottom + bot_size;                                                   \
 730   }                                                                             \
 731                                                                                 \
 732   while (bottom < top) {                                                        \
 733     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 734         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 735                     oop(bottom)) &&                                             \
 736         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 737       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 738       bottom += _cfls->adjustObjectSize(word_sz);                               \
 739     } else {                                                                    \
 740       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 741     }                                                                           \
 742   }                                                                             \
 743 }
 744 
 745 // (There are only two of these, rather than N, because the split is due
 746 // only to the introduction of the FilteringClosure, a local part of the
 747 // impl of this abstraction.)
 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 750 
 751 DirtyCardToOopClosure*
 752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 753                                       CardTableModRefBS::PrecisionStyle precision,
 754                                       HeapWord* boundary) {
 755   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 756 }
 757 
 758 
 759 // Note on locking for the space iteration functions:
 760 // since the collector's iteration activities are concurrent with
 761 // allocation activities by mutators, absent a suitable mutual exclusion
 762 // mechanism the iterators may go awry. For instance a block being iterated
 763 // may suddenly be allocated or divided up and part of it allocated and
 764 // so on.
 765 
 766 // Apply the given closure to each block in the space.
 767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 768   assert_lock_strong(freelistLock());
 769   HeapWord *cur, *limit;
 770   for (cur = bottom(), limit = end(); cur < limit;
 771        cur += cl->do_blk_careful(cur));
 772 }
 773 
 774 // Apply the given closure to each block in the space.
 775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 776   assert_lock_strong(freelistLock());
 777   HeapWord *cur, *limit;
 778   for (cur = bottom(), limit = end(); cur < limit;
 779        cur += cl->do_blk(cur));
 780 }
 781 
 782 // Apply the given closure to each oop in the space.
 783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 784   assert_lock_strong(freelistLock());
 785   HeapWord *cur, *limit;
 786   size_t curSize;
 787   for (cur = bottom(), limit = end(); cur < limit;
 788        cur += curSize) {
 789     curSize = block_size(cur);
 790     if (block_is_obj(cur)) {
 791       oop(cur)->oop_iterate(cl);
 792     }
 793   }
 794 }
 795 
 796 // NOTE: In the following methods, in order to safely be able to
 797 // apply the closure to an object, we need to be sure that the
 798 // object has been initialized. We are guaranteed that an object
 799 // is initialized if we are holding the Heap_lock with the
 800 // world stopped.
 801 void CompactibleFreeListSpace::verify_objects_initialized() const {
 802   if (is_init_completed()) {
 803     assert_locked_or_safepoint(Heap_lock);
 804     if (Universe::is_fully_initialized()) {
 805       guarantee(SafepointSynchronize::is_at_safepoint(),
 806                 "Required for objects to be initialized");
 807     }
 808   } // else make a concession at vm start-up
 809 }
 810 
 811 // Apply the given closure to each object in the space
 812 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 813   assert_lock_strong(freelistLock());
 814   NOT_PRODUCT(verify_objects_initialized());
 815   HeapWord *cur, *limit;
 816   size_t curSize;
 817   for (cur = bottom(), limit = end(); cur < limit;
 818        cur += curSize) {
 819     curSize = block_size(cur);
 820     if (block_is_obj(cur)) {
 821       blk->do_object(oop(cur));
 822     }
 823   }
 824 }
 825 
 826 // Apply the given closure to each live object in the space
 827 //   The usage of CompactibleFreeListSpace
 828 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 829 // objects in the space with references to objects that are no longer
 830 // valid.  For example, an object may reference another object
 831 // that has already been sweep up (collected).  This method uses
 832 // obj_is_alive() to determine whether it is safe to apply the closure to
 833 // an object.  See obj_is_alive() for details on how liveness of an
 834 // object is decided.
 835 
 836 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 837   assert_lock_strong(freelistLock());
 838   NOT_PRODUCT(verify_objects_initialized());
 839   HeapWord *cur, *limit;
 840   size_t curSize;
 841   for (cur = bottom(), limit = end(); cur < limit;
 842        cur += curSize) {
 843     curSize = block_size(cur);
 844     if (block_is_obj(cur) && obj_is_alive(cur)) {
 845       blk->do_object(oop(cur));
 846     }
 847   }
 848 }
 849 
 850 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 851                                                   UpwardsObjectClosure* cl) {
 852   assert_locked(freelistLock());
 853   NOT_PRODUCT(verify_objects_initialized());
 854   assert(!mr.is_empty(), "Should be non-empty");
 855   // We use MemRegion(bottom(), end()) rather than used_region() below
 856   // because the two are not necessarily equal for some kinds of
 857   // spaces, in particular, certain kinds of free list spaces.
 858   // We could use the more complicated but more precise:
 859   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 860   // but the slight imprecision seems acceptable in the assertion check.
 861   assert(MemRegion(bottom(), end()).contains(mr),
 862          "Should be within used space");
 863   HeapWord* prev = cl->previous();   // max address from last time
 864   if (prev >= mr.end()) { // nothing to do
 865     return;
 866   }
 867   // This assert will not work when we go from cms space to perm
 868   // space, and use same closure. Easy fix deferred for later. XXX YSR
 869   // assert(prev == NULL || contains(prev), "Should be within space");
 870 
 871   bool last_was_obj_array = false;
 872   HeapWord *blk_start_addr, *region_start_addr;
 873   if (prev > mr.start()) {
 874     region_start_addr = prev;
 875     blk_start_addr    = prev;
 876     // The previous invocation may have pushed "prev" beyond the
 877     // last allocated block yet there may be still be blocks
 878     // in this region due to a particular coalescing policy.
 879     // Relax the assertion so that the case where the unallocated
 880     // block is maintained and "prev" is beyond the unallocated
 881     // block does not cause the assertion to fire.
 882     assert((BlockOffsetArrayUseUnallocatedBlock &&
 883             (!is_in(prev))) ||
 884            (blk_start_addr == block_start(region_start_addr)), "invariant");
 885   } else {
 886     region_start_addr = mr.start();
 887     blk_start_addr    = block_start(region_start_addr);
 888   }
 889   HeapWord* region_end_addr = mr.end();
 890   MemRegion derived_mr(region_start_addr, region_end_addr);
 891   while (blk_start_addr < region_end_addr) {
 892     const size_t size = block_size(blk_start_addr);
 893     if (block_is_obj(blk_start_addr)) {
 894       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 895     } else {
 896       last_was_obj_array = false;
 897     }
 898     blk_start_addr += size;
 899   }
 900   if (!last_was_obj_array) {
 901     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 902            "Should be within (closed) used space");
 903     assert(blk_start_addr > prev, "Invariant");
 904     cl->set_previous(blk_start_addr); // min address for next time
 905   }
 906 }
 907 
 908 
 909 // Callers of this iterator beware: The closure application should
 910 // be robust in the face of uninitialized objects and should (always)
 911 // return a correct size so that the next addr + size below gives us a
 912 // valid block boundary. [See for instance,
 913 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 914 // in ConcurrentMarkSweepGeneration.cpp.]
 915 HeapWord*
 916 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 917   ObjectClosureCareful* cl) {
 918   assert_lock_strong(freelistLock());
 919   // Can't use used_region() below because it may not necessarily
 920   // be the same as [bottom(),end()); although we could
 921   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 922   // that appears too cumbersome, so we just do the simpler check
 923   // in the assertion below.
 924   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 925          "mr should be non-empty and within used space");
 926   HeapWord *addr, *end;
 927   size_t size;
 928   for (addr = block_start_careful(mr.start()), end  = mr.end();
 929        addr < end; addr += size) {
 930     FreeChunk* fc = (FreeChunk*)addr;
 931     if (fc->is_free()) {
 932       // Since we hold the free list lock, which protects direct
 933       // allocation in this generation by mutators, a free object
 934       // will remain free throughout this iteration code.
 935       size = fc->size();
 936     } else {
 937       // Note that the object need not necessarily be initialized,
 938       // because (for instance) the free list lock does NOT protect
 939       // object initialization. The closure application below must
 940       // therefore be correct in the face of uninitialized objects.
 941       size = cl->do_object_careful_m(oop(addr), mr);
 942       if (size == 0) {
 943         // An unparsable object found. Signal early termination.
 944         return addr;
 945       }
 946     }
 947   }
 948   return NULL;
 949 }
 950 
 951 
 952 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 953   NOT_PRODUCT(verify_objects_initialized());
 954   return _bt.block_start(p);
 955 }
 956 
 957 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 958   return _bt.block_start_careful(p);
 959 }
 960 
 961 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 962   NOT_PRODUCT(verify_objects_initialized());
 963   // This must be volatile, or else there is a danger that the compiler
 964   // will compile the code below into a sometimes-infinite loop, by keeping
 965   // the value read the first time in a register.
 966   while (true) {
 967     // We must do this until we get a consistent view of the object.
 968     if (FreeChunk::indicatesFreeChunk(p)) {
 969       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 970       size_t res = fc->size();
 971 
 972       // Bugfix for systems with weak memory model (PPC64/IA64). The
 973       // block's free bit was set and we have read the size of the
 974       // block. Acquire and check the free bit again. If the block is
 975       // still free, the read size is correct.
 976       OrderAccess::acquire();
 977 
 978       // If the object is still a free chunk, return the size, else it
 979       // has been allocated so try again.
 980       if (FreeChunk::indicatesFreeChunk(p)) {
 981         assert(res != 0, "Block size should not be 0");
 982         return res;
 983       }
 984     } else {
 985       // must read from what 'p' points to in each loop.
 986       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 987       if (k != NULL) {
 988         assert(k->is_klass(), "Should really be klass oop.");
 989         oop o = (oop)p;
 990         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 991 
 992         // Bugfix for systems with weak memory model (PPC64/IA64).
 993         // The object o may be an array. Acquire to make sure that the array
 994         // size (third word) is consistent.
 995         OrderAccess::acquire();
 996 
 997         size_t res = o->size_given_klass(k);
 998         res = adjustObjectSize(res);
 999         assert(res != 0, "Block size should not be 0");
1000         return res;
1001       }
1002     }
1003   }
1004 }
1005 
1006 // TODO: Now that is_parsable is gone, we should combine these two functions.
1007 // A variant of the above that uses the Printezis bits for
1008 // unparsable but allocated objects. This avoids any possible
1009 // stalls waiting for mutators to initialize objects, and is
1010 // thus potentially faster than the variant above. However,
1011 // this variant may return a zero size for a block that is
1012 // under mutation and for which a consistent size cannot be
1013 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1014 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1015                                                      const CMSCollector* c)
1016 const {
1017   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1018   // This must be volatile, or else there is a danger that the compiler
1019   // will compile the code below into a sometimes-infinite loop, by keeping
1020   // the value read the first time in a register.
1021   DEBUG_ONLY(uint loops = 0;)
1022   while (true) {
1023     // We must do this until we get a consistent view of the object.
1024     if (FreeChunk::indicatesFreeChunk(p)) {
1025       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1026       size_t res = fc->size();
1027 
1028       // Bugfix for systems with weak memory model (PPC64/IA64). The
1029       // free bit of the block was set and we have read the size of
1030       // the block. Acquire and check the free bit again. If the
1031       // block is still free, the read size is correct.
1032       OrderAccess::acquire();
1033 
1034       if (FreeChunk::indicatesFreeChunk(p)) {
1035         assert(res != 0, "Block size should not be 0");
1036         assert(loops == 0, "Should be 0");
1037         return res;
1038       }
1039     } else {
1040       // must read from what 'p' points to in each loop.
1041       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1042       // We trust the size of any object that has a non-NULL
1043       // klass and (for those in the perm gen) is parsable
1044       // -- irrespective of its conc_safe-ty.
1045       if (k != NULL) {
1046         assert(k->is_klass(), "Should really be klass oop.");
1047         oop o = (oop)p;
1048         assert(o->is_oop(), "Should be an oop");
1049 
1050         // Bugfix for systems with weak memory model (PPC64/IA64).
1051         // The object o may be an array. Acquire to make sure that the array
1052         // size (third word) is consistent.
1053         OrderAccess::acquire();
1054 
1055         size_t res = o->size_given_klass(k);
1056         res = adjustObjectSize(res);
1057         assert(res != 0, "Block size should not be 0");
1058         return res;
1059       } else {
1060         // May return 0 if P-bits not present.
1061         return c->block_size_if_printezis_bits(p);
1062       }
1063     }
1064     assert(loops == 0, "Can loop at most once");
1065     DEBUG_ONLY(loops++;)
1066   }
1067 }
1068 
1069 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1070   NOT_PRODUCT(verify_objects_initialized());
1071   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1072   FreeChunk* fc = (FreeChunk*)p;
1073   if (fc->is_free()) {
1074     return fc->size();
1075   } else {
1076     // Ignore mark word because this may be a recently promoted
1077     // object whose mark word is used to chain together grey
1078     // objects (the last one would have a null value).
1079     assert(oop(p)->is_oop(true), "Should be an oop");
1080     return adjustObjectSize(oop(p)->size());
1081   }
1082 }
1083 
1084 // This implementation assumes that the property of "being an object" is
1085 // stable.  But being a free chunk may not be (because of parallel
1086 // promotion.)
1087 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1088   FreeChunk* fc = (FreeChunk*)p;
1089   assert(is_in_reserved(p), "Should be in space");
1090   // When doing a mark-sweep-compact of the CMS generation, this
1091   // assertion may fail because prepare_for_compaction() uses
1092   // space that is garbage to maintain information on ranges of
1093   // live objects so that these live ranges can be moved as a whole.
1094   // Comment out this assertion until that problem can be solved
1095   // (i.e., that the block start calculation may look at objects
1096   // at address below "p" in finding the object that contains "p"
1097   // and those objects (if garbage) may have been modified to hold
1098   // live range information.
1099   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1100   //        "Should be a block boundary");
1101   if (FreeChunk::indicatesFreeChunk(p)) return false;
1102   Klass* k = oop(p)->klass_or_null();
1103   if (k != NULL) {
1104     // Ignore mark word because it may have been used to
1105     // chain together promoted objects (the last one
1106     // would have a null value).
1107     assert(oop(p)->is_oop(true), "Should be an oop");
1108     return true;
1109   } else {
1110     return false;  // Was not an object at the start of collection.
1111   }
1112 }
1113 
1114 // Check if the object is alive. This fact is checked either by consulting
1115 // the main marking bitmap in the sweeping phase or, if it's a permanent
1116 // generation and we're not in the sweeping phase, by checking the
1117 // perm_gen_verify_bit_map where we store the "deadness" information if
1118 // we did not sweep the perm gen in the most recent previous GC cycle.
1119 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1120   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1121          "Else races are possible");
1122   assert(block_is_obj(p), "The address should point to an object");
1123 
1124   // If we're sweeping, we use object liveness information from the main bit map
1125   // for both perm gen and old gen.
1126   // We don't need to lock the bitmap (live_map or dead_map below), because
1127   // EITHER we are in the middle of the sweeping phase, and the
1128   // main marking bit map (live_map below) is locked,
1129   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1130   // is stable, because it's mutated only in the sweeping phase.
1131   // NOTE: This method is also used by jmap where, if class unloading is
1132   // off, the results can return "false" for legitimate perm objects,
1133   // when we are not in the midst of a sweeping phase, which can result
1134   // in jmap not reporting certain perm gen objects. This will be moot
1135   // if/when the perm gen goes away in the future.
1136   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1137     CMSBitMap* live_map = _collector->markBitMap();
1138     return live_map->par_isMarked((HeapWord*) p);
1139   }
1140   return true;
1141 }
1142 
1143 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1144   FreeChunk* fc = (FreeChunk*)p;
1145   assert(is_in_reserved(p), "Should be in space");
1146   assert(_bt.block_start(p) == p, "Should be a block boundary");
1147   if (!fc->is_free()) {
1148     // Ignore mark word because it may have been used to
1149     // chain together promoted objects (the last one
1150     // would have a null value).
1151     assert(oop(p)->is_oop(true), "Should be an oop");
1152     return true;
1153   }
1154   return false;
1155 }
1156 
1157 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1158 // approximate answer if you don't hold the freelistlock when you call this.
1159 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1160   size_t size = 0;
1161   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1162     debug_only(
1163       // We may be calling here without the lock in which case we
1164       // won't do this modest sanity check.
1165       if (freelistLock()->owned_by_self()) {
1166         size_t total_list_size = 0;
1167         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1168           fc = fc->next()) {
1169           total_list_size += i;
1170         }
1171         assert(total_list_size == i * _indexedFreeList[i].count(),
1172                "Count in list is incorrect");
1173       }
1174     )
1175     size += i * _indexedFreeList[i].count();
1176   }
1177   return size;
1178 }
1179 
1180 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1181   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1182   return allocate(size);
1183 }
1184 
1185 HeapWord*
1186 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1187   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1188 }
1189 
1190 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1191   assert_lock_strong(freelistLock());
1192   HeapWord* res = NULL;
1193   assert(size == adjustObjectSize(size),
1194          "use adjustObjectSize() before calling into allocate()");
1195 
1196   if (_adaptive_freelists) {
1197     res = allocate_adaptive_freelists(size);
1198   } else {  // non-adaptive free lists
1199     res = allocate_non_adaptive_freelists(size);
1200   }
1201 
1202   if (res != NULL) {
1203     // check that res does lie in this space!
1204     assert(is_in_reserved(res), "Not in this space!");
1205     assert(is_aligned((void*)res), "alignment check");
1206 
1207     FreeChunk* fc = (FreeChunk*)res;
1208     fc->markNotFree();
1209     assert(!fc->is_free(), "shouldn't be marked free");
1210     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1211     // Verify that the block offset table shows this to
1212     // be a single block, but not one which is unallocated.
1213     _bt.verify_single_block(res, size);
1214     _bt.verify_not_unallocated(res, size);
1215     // mangle a just allocated object with a distinct pattern.
1216     debug_only(fc->mangleAllocated(size));
1217   }
1218 
1219   return res;
1220 }
1221 
1222 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1223   HeapWord* res = NULL;
1224   // try and use linear allocation for smaller blocks
1225   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1226     // if successful, the following also adjusts block offset table
1227     res = getChunkFromSmallLinearAllocBlock(size);
1228   }
1229   // Else triage to indexed lists for smaller sizes
1230   if (res == NULL) {
1231     if (size < SmallForDictionary) {
1232       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1233     } else {
1234       // else get it from the big dictionary; if even this doesn't
1235       // work we are out of luck.
1236       res = (HeapWord*)getChunkFromDictionaryExact(size);
1237     }
1238   }
1239 
1240   return res;
1241 }
1242 
1243 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1244   assert_lock_strong(freelistLock());
1245   HeapWord* res = NULL;
1246   assert(size == adjustObjectSize(size),
1247          "use adjustObjectSize() before calling into allocate()");
1248 
1249   // Strategy
1250   //   if small
1251   //     exact size from small object indexed list if small
1252   //     small or large linear allocation block (linAB) as appropriate
1253   //     take from lists of greater sized chunks
1254   //   else
1255   //     dictionary
1256   //     small or large linear allocation block if it has the space
1257   // Try allocating exact size from indexTable first
1258   if (size < IndexSetSize) {
1259     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1260     if(res != NULL) {
1261       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1262         "Not removed from free list");
1263       // no block offset table adjustment is necessary on blocks in
1264       // the indexed lists.
1265 
1266     // Try allocating from the small LinAB
1267     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1268         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1269         // if successful, the above also adjusts block offset table
1270         // Note that this call will refill the LinAB to
1271         // satisfy the request.  This is different that
1272         // evm.
1273         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1274     } else {
1275       // Raid the exact free lists larger than size, even if they are not
1276       // overpopulated.
1277       res = (HeapWord*) getChunkFromGreater(size);
1278     }
1279   } else {
1280     // Big objects get allocated directly from the dictionary.
1281     res = (HeapWord*) getChunkFromDictionaryExact(size);
1282     if (res == NULL) {
1283       // Try hard not to fail since an allocation failure will likely
1284       // trigger a synchronous GC.  Try to get the space from the
1285       // allocation blocks.
1286       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1287     }
1288   }
1289 
1290   return res;
1291 }
1292 
1293 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1294 // when promoting obj.
1295 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1296   // Depending on the object size, expansion may require refilling either a
1297   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1298   // is added because the dictionary may over-allocate to avoid fragmentation.
1299   size_t space = obj_size;
1300   if (!_adaptive_freelists) {
1301     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1302   }
1303   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1304   return space;
1305 }
1306 
1307 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1308   FreeChunk* ret;
1309 
1310   assert(numWords >= MinChunkSize, "Size is less than minimum");
1311   assert(linearAllocationWouldFail() || bestFitFirst(),
1312     "Should not be here");
1313 
1314   size_t i;
1315   size_t currSize = numWords + MinChunkSize;
1316   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1317   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1318     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1319     if (fl->head()) {
1320       ret = getFromListGreater(fl, numWords);
1321       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1322       return ret;
1323     }
1324   }
1325 
1326   currSize = MAX2((size_t)SmallForDictionary,
1327                   (size_t)(numWords + MinChunkSize));
1328 
1329   /* Try to get a chunk that satisfies request, while avoiding
1330      fragmentation that can't be handled. */
1331   {
1332     ret =  dictionary()->get_chunk(currSize);
1333     if (ret != NULL) {
1334       assert(ret->size() - numWords >= MinChunkSize,
1335              "Chunk is too small");
1336       _bt.allocated((HeapWord*)ret, ret->size());
1337       /* Carve returned chunk. */
1338       (void) splitChunkAndReturnRemainder(ret, numWords);
1339       /* Label this as no longer a free chunk. */
1340       assert(ret->is_free(), "This chunk should be free");
1341       ret->link_prev(NULL);
1342     }
1343     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1344     return ret;
1345   }
1346   ShouldNotReachHere();
1347 }
1348 
1349 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1350   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1351   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1352 }
1353 
1354 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1355   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1356          (_smallLinearAllocBlock._word_size == fc->size()),
1357          "Linear allocation block shows incorrect size");
1358   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1359           (_smallLinearAllocBlock._word_size == fc->size()));
1360 }
1361 
1362 // Check if the purported free chunk is present either as a linear
1363 // allocation block, the size-indexed table of (smaller) free blocks,
1364 // or the larger free blocks kept in the binary tree dictionary.
1365 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1366   if (verify_chunk_is_linear_alloc_block(fc)) {
1367     return true;
1368   } else if (fc->size() < IndexSetSize) {
1369     return verifyChunkInIndexedFreeLists(fc);
1370   } else {
1371     return dictionary()->verify_chunk_in_free_list(fc);
1372   }
1373 }
1374 
1375 #ifndef PRODUCT
1376 void CompactibleFreeListSpace::assert_locked() const {
1377   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1378 }
1379 
1380 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1381   CMSLockVerifier::assert_locked(lock);
1382 }
1383 #endif
1384 
1385 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1386   // In the parallel case, the main thread holds the free list lock
1387   // on behalf the parallel threads.
1388   FreeChunk* fc;
1389   {
1390     // If GC is parallel, this might be called by several threads.
1391     // This should be rare enough that the locking overhead won't affect
1392     // the sequential code.
1393     MutexLockerEx x(parDictionaryAllocLock(),
1394                     Mutex::_no_safepoint_check_flag);
1395     fc = getChunkFromDictionary(size);
1396   }
1397   if (fc != NULL) {
1398     fc->dontCoalesce();
1399     assert(fc->is_free(), "Should be free, but not coalescable");
1400     // Verify that the block offset table shows this to
1401     // be a single block, but not one which is unallocated.
1402     _bt.verify_single_block((HeapWord*)fc, fc->size());
1403     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1404   }
1405   return fc;
1406 }
1407 
1408 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1409   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1410   assert_locked();
1411 
1412   // if we are tracking promotions, then first ensure space for
1413   // promotion (including spooling space for saving header if necessary).
1414   // then allocate and copy, then track promoted info if needed.
1415   // When tracking (see PromotionInfo::track()), the mark word may
1416   // be displaced and in this case restoration of the mark word
1417   // occurs in the (oop_since_save_marks_)iterate phase.
1418   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1419     return NULL;
1420   }
1421   // Call the allocate(size_t, bool) form directly to avoid the
1422   // additional call through the allocate(size_t) form.  Having
1423   // the compile inline the call is problematic because allocate(size_t)
1424   // is a virtual method.
1425   HeapWord* res = allocate(adjustObjectSize(obj_size));
1426   if (res != NULL) {
1427     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1428     // if we should be tracking promotions, do so.
1429     if (_promoInfo.tracking()) {
1430         _promoInfo.track((PromotedObject*)res);
1431     }
1432   }
1433   return oop(res);
1434 }
1435 
1436 HeapWord*
1437 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1438   assert_locked();
1439   assert(size >= MinChunkSize, "minimum chunk size");
1440   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1441     "maximum from smallLinearAllocBlock");
1442   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1443 }
1444 
1445 HeapWord*
1446 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1447                                                        size_t size) {
1448   assert_locked();
1449   assert(size >= MinChunkSize, "too small");
1450   HeapWord* res = NULL;
1451   // Try to do linear allocation from blk, making sure that
1452   if (blk->_word_size == 0) {
1453     // We have probably been unable to fill this either in the prologue or
1454     // when it was exhausted at the last linear allocation. Bail out until
1455     // next time.
1456     assert(blk->_ptr == NULL, "consistency check");
1457     return NULL;
1458   }
1459   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1460   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1461   if (res != NULL) return res;
1462 
1463   // about to exhaust this linear allocation block
1464   if (blk->_word_size == size) { // exactly satisfied
1465     res = blk->_ptr;
1466     _bt.allocated(res, blk->_word_size);
1467   } else if (size + MinChunkSize <= blk->_refillSize) {
1468     size_t sz = blk->_word_size;
1469     // Update _unallocated_block if the size is such that chunk would be
1470     // returned to the indexed free list.  All other chunks in the indexed
1471     // free lists are allocated from the dictionary so that _unallocated_block
1472     // has already been adjusted for them.  Do it here so that the cost
1473     // for all chunks added back to the indexed free lists.
1474     if (sz < SmallForDictionary) {
1475       _bt.allocated(blk->_ptr, sz);
1476     }
1477     // Return the chunk that isn't big enough, and then refill below.
1478     addChunkToFreeLists(blk->_ptr, sz);
1479     split_birth(sz);
1480     // Don't keep statistics on adding back chunk from a LinAB.
1481   } else {
1482     // A refilled block would not satisfy the request.
1483     return NULL;
1484   }
1485 
1486   blk->_ptr = NULL; blk->_word_size = 0;
1487   refillLinearAllocBlock(blk);
1488   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1489          "block was replenished");
1490   if (res != NULL) {
1491     split_birth(size);
1492     repairLinearAllocBlock(blk);
1493   } else if (blk->_ptr != NULL) {
1494     res = blk->_ptr;
1495     size_t blk_size = blk->_word_size;
1496     blk->_word_size -= size;
1497     blk->_ptr  += size;
1498     split_birth(size);
1499     repairLinearAllocBlock(blk);
1500     // Update BOT last so that other (parallel) GC threads see a consistent
1501     // view of the BOT and free blocks.
1502     // Above must occur before BOT is updated below.
1503     OrderAccess::storestore();
1504     _bt.split_block(res, blk_size, size);  // adjust block offset table
1505   }
1506   return res;
1507 }
1508 
1509 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1510                                         LinearAllocBlock* blk,
1511                                         size_t size) {
1512   assert_locked();
1513   assert(size >= MinChunkSize, "too small");
1514 
1515   HeapWord* res = NULL;
1516   // This is the common case.  Keep it simple.
1517   if (blk->_word_size >= size + MinChunkSize) {
1518     assert(blk->_ptr != NULL, "consistency check");
1519     res = blk->_ptr;
1520     // Note that the BOT is up-to-date for the linAB before allocation.  It
1521     // indicates the start of the linAB.  The split_block() updates the
1522     // BOT for the linAB after the allocation (indicates the start of the
1523     // next chunk to be allocated).
1524     size_t blk_size = blk->_word_size;
1525     blk->_word_size -= size;
1526     blk->_ptr  += size;
1527     split_birth(size);
1528     repairLinearAllocBlock(blk);
1529     // Update BOT last so that other (parallel) GC threads see a consistent
1530     // view of the BOT and free blocks.
1531     // Above must occur before BOT is updated below.
1532     OrderAccess::storestore();
1533     _bt.split_block(res, blk_size, size);  // adjust block offset table
1534     _bt.allocated(res, size);
1535   }
1536   return res;
1537 }
1538 
1539 FreeChunk*
1540 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1541   assert_locked();
1542   assert(size < SmallForDictionary, "just checking");
1543   FreeChunk* res;
1544   res = _indexedFreeList[size].get_chunk_at_head();
1545   if (res == NULL) {
1546     res = getChunkFromIndexedFreeListHelper(size);
1547   }
1548   _bt.verify_not_unallocated((HeapWord*) res, size);
1549   assert(res == NULL || res->size() == size, "Incorrect block size");
1550   return res;
1551 }
1552 
1553 FreeChunk*
1554 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1555   bool replenish) {
1556   assert_locked();
1557   FreeChunk* fc = NULL;
1558   if (size < SmallForDictionary) {
1559     assert(_indexedFreeList[size].head() == NULL ||
1560       _indexedFreeList[size].surplus() <= 0,
1561       "List for this size should be empty or under populated");
1562     // Try best fit in exact lists before replenishing the list
1563     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1564       // Replenish list.
1565       //
1566       // Things tried that failed.
1567       //   Tried allocating out of the two LinAB's first before
1568       // replenishing lists.
1569       //   Tried small linAB of size 256 (size in indexed list)
1570       // and replenishing indexed lists from the small linAB.
1571       //
1572       FreeChunk* newFc = NULL;
1573       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1574       if (replenish_size < SmallForDictionary) {
1575         // Do not replenish from an underpopulated size.
1576         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1577             _indexedFreeList[replenish_size].head() != NULL) {
1578           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1579         } else if (bestFitFirst()) {
1580           newFc = bestFitSmall(replenish_size);
1581         }
1582       }
1583       if (newFc == NULL && replenish_size > size) {
1584         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1585         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1586       }
1587       // Note: The stats update re split-death of block obtained above
1588       // will be recorded below precisely when we know we are going to
1589       // be actually splitting it into more than one pieces below.
1590       if (newFc != NULL) {
1591         if  (replenish || CMSReplenishIntermediate) {
1592           // Replenish this list and return one block to caller.
1593           size_t i;
1594           FreeChunk *curFc, *nextFc;
1595           size_t num_blk = newFc->size() / size;
1596           assert(num_blk >= 1, "Smaller than requested?");
1597           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1598           if (num_blk > 1) {
1599             // we are sure we will be splitting the block just obtained
1600             // into multiple pieces; record the split-death of the original
1601             splitDeath(replenish_size);
1602           }
1603           // carve up and link blocks 0, ..., num_blk - 2
1604           // The last chunk is not added to the lists but is returned as the
1605           // free chunk.
1606           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1607                i = 0;
1608                i < (num_blk - 1);
1609                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1610                i++) {
1611             curFc->set_size(size);
1612             // Don't record this as a return in order to try and
1613             // determine the "returns" from a GC.
1614             _bt.verify_not_unallocated((HeapWord*) fc, size);
1615             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1616             _bt.mark_block((HeapWord*)curFc, size);
1617             split_birth(size);
1618             // Don't record the initial population of the indexed list
1619             // as a split birth.
1620           }
1621 
1622           // check that the arithmetic was OK above
1623           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1624             "inconsistency in carving newFc");
1625           curFc->set_size(size);
1626           _bt.mark_block((HeapWord*)curFc, size);
1627           split_birth(size);
1628           fc = curFc;
1629         } else {
1630           // Return entire block to caller
1631           fc = newFc;
1632         }
1633       }
1634     }
1635   } else {
1636     // Get a free chunk from the free chunk dictionary to be returned to
1637     // replenish the indexed free list.
1638     fc = getChunkFromDictionaryExact(size);
1639   }
1640   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1641   return fc;
1642 }
1643 
1644 FreeChunk*
1645 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1646   assert_locked();
1647   FreeChunk* fc = _dictionary->get_chunk(size,
1648                                          FreeBlockDictionary<FreeChunk>::atLeast);
1649   if (fc == NULL) {
1650     return NULL;
1651   }
1652   _bt.allocated((HeapWord*)fc, fc->size());
1653   if (fc->size() >= size + MinChunkSize) {
1654     fc = splitChunkAndReturnRemainder(fc, size);
1655   }
1656   assert(fc->size() >= size, "chunk too small");
1657   assert(fc->size() < size + MinChunkSize, "chunk too big");
1658   _bt.verify_single_block((HeapWord*)fc, fc->size());
1659   return fc;
1660 }
1661 
1662 FreeChunk*
1663 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1664   assert_locked();
1665   FreeChunk* fc = _dictionary->get_chunk(size,
1666                                          FreeBlockDictionary<FreeChunk>::atLeast);
1667   if (fc == NULL) {
1668     return fc;
1669   }
1670   _bt.allocated((HeapWord*)fc, fc->size());
1671   if (fc->size() == size) {
1672     _bt.verify_single_block((HeapWord*)fc, size);
1673     return fc;
1674   }
1675   assert(fc->size() > size, "get_chunk() guarantee");
1676   if (fc->size() < size + MinChunkSize) {
1677     // Return the chunk to the dictionary and go get a bigger one.
1678     returnChunkToDictionary(fc);
1679     fc = _dictionary->get_chunk(size + MinChunkSize,
1680                                 FreeBlockDictionary<FreeChunk>::atLeast);
1681     if (fc == NULL) {
1682       return NULL;
1683     }
1684     _bt.allocated((HeapWord*)fc, fc->size());
1685   }
1686   assert(fc->size() >= size + MinChunkSize, "tautology");
1687   fc = splitChunkAndReturnRemainder(fc, size);
1688   assert(fc->size() == size, "chunk is wrong size");
1689   _bt.verify_single_block((HeapWord*)fc, size);
1690   return fc;
1691 }
1692 
1693 void
1694 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1695   assert_locked();
1696 
1697   size_t size = chunk->size();
1698   _bt.verify_single_block((HeapWord*)chunk, size);
1699   // adjust _unallocated_block downward, as necessary
1700   _bt.freed((HeapWord*)chunk, size);
1701   _dictionary->return_chunk(chunk);
1702 #ifndef PRODUCT
1703   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1704     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1705     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1706     tl->verify_stats();
1707   }
1708 #endif // PRODUCT
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1713   assert_locked();
1714   size_t size = fc->size();
1715   _bt.verify_single_block((HeapWord*) fc, size);
1716   _bt.verify_not_unallocated((HeapWord*) fc, size);
1717   if (_adaptive_freelists) {
1718     _indexedFreeList[size].return_chunk_at_tail(fc);
1719   } else {
1720     _indexedFreeList[size].return_chunk_at_head(fc);
1721   }
1722 #ifndef PRODUCT
1723   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1724      _indexedFreeList[size].verify_stats();
1725   }
1726 #endif // PRODUCT
1727 }
1728 
1729 // Add chunk to end of last block -- if it's the largest
1730 // block -- and update BOT and census data. We would
1731 // of course have preferred to coalesce it with the
1732 // last block, but it's currently less expensive to find the
1733 // largest block than it is to find the last.
1734 void
1735 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1736   HeapWord* chunk, size_t     size) {
1737   // check that the chunk does lie in this space!
1738   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1739   // One of the parallel gc task threads may be here
1740   // whilst others are allocating.
1741   Mutex* lock = NULL;
1742   if (ParallelGCThreads != 0) {
1743     lock = &_parDictionaryAllocLock;
1744   }
1745   FreeChunk* ec;
1746   {
1747     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1748     ec = dictionary()->find_largest_dict();  // get largest block
1749     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1750       // It's a coterminal block - we can coalesce.
1751       size_t old_size = ec->size();
1752       coalDeath(old_size);
1753       removeChunkFromDictionary(ec);
1754       size += old_size;
1755     } else {
1756       ec = (FreeChunk*)chunk;
1757     }
1758   }
1759   ec->set_size(size);
1760   debug_only(ec->mangleFreed(size));
1761   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1762     lock = _indexedFreeListParLocks[size];
1763   }
1764   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1765   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1766   // record the birth under the lock since the recording involves
1767   // manipulation of the list on which the chunk lives and
1768   // if the chunk is allocated and is the last on the list,
1769   // the list can go away.
1770   coalBirth(size);
1771 }
1772 
1773 void
1774 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1775                                               size_t     size) {
1776   // check that the chunk does lie in this space!
1777   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1778   assert_locked();
1779   _bt.verify_single_block(chunk, size);
1780 
1781   FreeChunk* fc = (FreeChunk*) chunk;
1782   fc->set_size(size);
1783   debug_only(fc->mangleFreed(size));
1784   if (size < SmallForDictionary) {
1785     returnChunkToFreeList(fc);
1786   } else {
1787     returnChunkToDictionary(fc);
1788   }
1789 }
1790 
1791 void
1792 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1793   size_t size, bool coalesced) {
1794   assert_locked();
1795   assert(chunk != NULL, "null chunk");
1796   if (coalesced) {
1797     // repair BOT
1798     _bt.single_block(chunk, size);
1799   }
1800   addChunkToFreeLists(chunk, size);
1801 }
1802 
1803 // We _must_ find the purported chunk on our free lists;
1804 // we assert if we don't.
1805 void
1806 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1807   size_t size = fc->size();
1808   assert_locked();
1809   debug_only(verifyFreeLists());
1810   if (size < SmallForDictionary) {
1811     removeChunkFromIndexedFreeList(fc);
1812   } else {
1813     removeChunkFromDictionary(fc);
1814   }
1815   _bt.verify_single_block((HeapWord*)fc, size);
1816   debug_only(verifyFreeLists());
1817 }
1818 
1819 void
1820 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1821   size_t size = fc->size();
1822   assert_locked();
1823   assert(fc != NULL, "null chunk");
1824   _bt.verify_single_block((HeapWord*)fc, size);
1825   _dictionary->remove_chunk(fc);
1826   // adjust _unallocated_block upward, as necessary
1827   _bt.allocated((HeapWord*)fc, size);
1828 }
1829 
1830 void
1831 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1832   assert_locked();
1833   size_t size = fc->size();
1834   _bt.verify_single_block((HeapWord*)fc, size);
1835   NOT_PRODUCT(
1836     if (FLSVerifyIndexTable) {
1837       verifyIndexedFreeList(size);
1838     }
1839   )
1840   _indexedFreeList[size].remove_chunk(fc);
1841   NOT_PRODUCT(
1842     if (FLSVerifyIndexTable) {
1843       verifyIndexedFreeList(size);
1844     }
1845   )
1846 }
1847 
1848 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1849   /* A hint is the next larger size that has a surplus.
1850      Start search at a size large enough to guarantee that
1851      the excess is >= MIN_CHUNK. */
1852   size_t start = align_object_size(numWords + MinChunkSize);
1853   if (start < IndexSetSize) {
1854     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1855     size_t    hint = _indexedFreeList[start].hint();
1856     while (hint < IndexSetSize) {
1857       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1858       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1859       if (fl->surplus() > 0 && fl->head() != NULL) {
1860         // Found a list with surplus, reset original hint
1861         // and split out a free chunk which is returned.
1862         _indexedFreeList[start].set_hint(hint);
1863         FreeChunk* res = getFromListGreater(fl, numWords);
1864         assert(res == NULL || res->is_free(),
1865           "Should be returning a free chunk");
1866         return res;
1867       }
1868       hint = fl->hint(); /* keep looking */
1869     }
1870     /* None found. */
1871     it[start].set_hint(IndexSetSize);
1872   }
1873   return NULL;
1874 }
1875 
1876 /* Requires fl->size >= numWords + MinChunkSize */
1877 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1878   size_t numWords) {
1879   FreeChunk *curr = fl->head();
1880   size_t oldNumWords = curr->size();
1881   assert(numWords >= MinChunkSize, "Word size is too small");
1882   assert(curr != NULL, "List is empty");
1883   assert(oldNumWords >= numWords + MinChunkSize,
1884         "Size of chunks in the list is too small");
1885 
1886   fl->remove_chunk(curr);
1887   // recorded indirectly by splitChunkAndReturnRemainder -
1888   // smallSplit(oldNumWords, numWords);
1889   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1890   // Does anything have to be done for the remainder in terms of
1891   // fixing the card table?
1892   assert(new_chunk == NULL || new_chunk->is_free(),
1893     "Should be returning a free chunk");
1894   return new_chunk;
1895 }
1896 
1897 FreeChunk*
1898 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1899   size_t new_size) {
1900   assert_locked();
1901   size_t size = chunk->size();
1902   assert(size > new_size, "Split from a smaller block?");
1903   assert(is_aligned(chunk), "alignment problem");
1904   assert(size == adjustObjectSize(size), "alignment problem");
1905   size_t rem_size = size - new_size;
1906   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1907   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1908   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1909   assert(is_aligned(ffc), "alignment problem");
1910   ffc->set_size(rem_size);
1911   ffc->link_next(NULL);
1912   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1913   // Above must occur before BOT is updated below.
1914   // adjust block offset table
1915   OrderAccess::storestore();
1916   assert(chunk->is_free() && ffc->is_free(), "Error");
1917   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1918   if (rem_size < SmallForDictionary) {
1919     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1920     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1921     assert(!is_par ||
1922            (SharedHeap::heap()->n_par_threads() ==
1923             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1924     returnChunkToFreeList(ffc);
1925     split(size, rem_size);
1926     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1927   } else {
1928     returnChunkToDictionary(ffc);
1929     split(size ,rem_size);
1930   }
1931   chunk->set_size(new_size);
1932   return chunk;
1933 }
1934 
1935 void
1936 CompactibleFreeListSpace::sweep_completed() {
1937   // Now that space is probably plentiful, refill linear
1938   // allocation blocks as needed.
1939   refillLinearAllocBlocksIfNeeded();
1940 }
1941 
1942 void
1943 CompactibleFreeListSpace::gc_prologue() {
1944   assert_locked();
1945   if (PrintFLSStatistics != 0) {
1946     gclog_or_tty->print("Before GC:\n");
1947     reportFreeListStatistics();
1948   }
1949   refillLinearAllocBlocksIfNeeded();
1950 }
1951 
1952 void
1953 CompactibleFreeListSpace::gc_epilogue() {
1954   assert_locked();
1955   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1956     if (_smallLinearAllocBlock._word_size == 0)
1957       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1958   }
1959   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1960   _promoInfo.stopTrackingPromotions();
1961   repairLinearAllocationBlocks();
1962   // Print Space's stats
1963   if (PrintFLSStatistics != 0) {
1964     gclog_or_tty->print("After GC:\n");
1965     reportFreeListStatistics();
1966   }
1967 }
1968 
1969 // Iteration support, mostly delegated from a CMS generation
1970 
1971 void CompactibleFreeListSpace::save_marks() {
1972   assert(Thread::current()->is_VM_thread(),
1973          "Global variable should only be set when single-threaded");
1974   // Mark the "end" of the used space at the time of this call;
1975   // note, however, that promoted objects from this point
1976   // on are tracked in the _promoInfo below.
1977   set_saved_mark_word(unallocated_block());
1978 #ifdef ASSERT
1979   // Check the sanity of save_marks() etc.
1980   MemRegion ur    = used_region();
1981   MemRegion urasm = used_region_at_save_marks();
1982   assert(ur.contains(urasm),
1983          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1984                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1985                  ur.start(), ur.end(), urasm.start(), urasm.end()));
1986 #endif
1987   // inform allocator that promotions should be tracked.
1988   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1989   _promoInfo.startTrackingPromotions();
1990 }
1991 
1992 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1993   assert(_promoInfo.tracking(), "No preceding save_marks?");
1994   assert(SharedHeap::heap()->n_par_threads() == 0,
1995          "Shouldn't be called if using parallel gc.");
1996   return _promoInfo.noPromotions();
1997 }
1998 
1999 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2000                                                                             \
2001 void CompactibleFreeListSpace::                                             \
2002 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2003   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2004          "Shouldn't be called (yet) during parallel part of gc.");          \
2005   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2006   /*                                                                        \
2007    * This also restores any displaced headers and removes the elements from \
2008    * the iteration set as they are processed, so that we have a clean slate \
2009    * at the end of the iteration. Note, thus, that if new objects are       \
2010    * promoted as a result of the iteration they are iterated over as well.  \
2011    */                                                                       \
2012   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2013 }
2014 
2015 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2016 
2017 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2018   return _smallLinearAllocBlock._word_size == 0;
2019 }
2020 
2021 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2022   // Fix up linear allocation blocks to look like free blocks
2023   repairLinearAllocBlock(&_smallLinearAllocBlock);
2024 }
2025 
2026 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2027   assert_locked();
2028   if (blk->_ptr != NULL) {
2029     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2030            "Minimum block size requirement");
2031     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2032     fc->set_size(blk->_word_size);
2033     fc->link_prev(NULL);   // mark as free
2034     fc->dontCoalesce();
2035     assert(fc->is_free(), "just marked it free");
2036     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2037   }
2038 }
2039 
2040 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2041   assert_locked();
2042   if (_smallLinearAllocBlock._ptr == NULL) {
2043     assert(_smallLinearAllocBlock._word_size == 0,
2044       "Size of linAB should be zero if the ptr is NULL");
2045     // Reset the linAB refill and allocation size limit.
2046     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2047   }
2048   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2049 }
2050 
2051 void
2052 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2053   assert_locked();
2054   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2055          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2056          "blk invariant");
2057   if (blk->_ptr == NULL) {
2058     refillLinearAllocBlock(blk);
2059   }
2060   if (PrintMiscellaneous && Verbose) {
2061     if (blk->_word_size == 0) {
2062       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2063     }
2064   }
2065 }
2066 
2067 void
2068 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2069   assert_locked();
2070   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2071          "linear allocation block should be empty");
2072   FreeChunk* fc;
2073   if (blk->_refillSize < SmallForDictionary &&
2074       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2075     // A linAB's strategy might be to use small sizes to reduce
2076     // fragmentation but still get the benefits of allocation from a
2077     // linAB.
2078   } else {
2079     fc = getChunkFromDictionary(blk->_refillSize);
2080   }
2081   if (fc != NULL) {
2082     blk->_ptr  = (HeapWord*)fc;
2083     blk->_word_size = fc->size();
2084     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2085   }
2086 }
2087 
2088 // Support for concurrent collection policy decisions.
2089 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2090   // In the future we might want to add in fragmentation stats --
2091   // including erosion of the "mountain" into this decision as well.
2092   return !adaptive_freelists() && linearAllocationWouldFail();
2093 }
2094 
2095 // Support for compaction
2096 
2097 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2098   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2099   // Prepare_for_compaction() uses the space between live objects
2100   // so that later phase can skip dead space quickly.  So verification
2101   // of the free lists doesn't work after.
2102 }
2103 
2104 #define obj_size(q) adjustObjectSize(oop(q)->size())
2105 #define adjust_obj_size(s) adjustObjectSize(s)
2106 
2107 void CompactibleFreeListSpace::adjust_pointers() {
2108   // In other versions of adjust_pointers(), a bail out
2109   // based on the amount of live data in the generation
2110   // (i.e., if 0, bail out) may be used.
2111   // Cannot test used() == 0 here because the free lists have already
2112   // been mangled by the compaction.
2113 
2114   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2115   // See note about verification in prepare_for_compaction().
2116 }
2117 
2118 void CompactibleFreeListSpace::compact() {
2119   SCAN_AND_COMPACT(obj_size);
2120 }
2121 
2122 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2123 // where fbs is free block sizes
2124 double CompactibleFreeListSpace::flsFrag() const {
2125   size_t itabFree = totalSizeInIndexedFreeLists();
2126   double frag = 0.0;
2127   size_t i;
2128 
2129   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2130     double sz  = i;
2131     frag      += _indexedFreeList[i].count() * (sz * sz);
2132   }
2133 
2134   double totFree = itabFree +
2135                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2136   if (totFree > 0) {
2137     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2138             (totFree * totFree));
2139     frag = (double)1.0  - frag;
2140   } else {
2141     assert(frag == 0.0, "Follows from totFree == 0");
2142   }
2143   return frag;
2144 }
2145 
2146 void CompactibleFreeListSpace::beginSweepFLCensus(
2147   float inter_sweep_current,
2148   float inter_sweep_estimate,
2149   float intra_sweep_estimate) {
2150   assert_locked();
2151   size_t i;
2152   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2153     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2154     if (PrintFLSStatistics > 1) {
2155       gclog_or_tty->print("size[%d] : ", i);
2156     }
2157     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2158     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2159     fl->set_before_sweep(fl->count());
2160     fl->set_bfr_surp(fl->surplus());
2161   }
2162   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2163                                     inter_sweep_current,
2164                                     inter_sweep_estimate,
2165                                     intra_sweep_estimate);
2166 }
2167 
2168 void CompactibleFreeListSpace::setFLSurplus() {
2169   assert_locked();
2170   size_t i;
2171   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2172     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2173     fl->set_surplus(fl->count() -
2174                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2175   }
2176 }
2177 
2178 void CompactibleFreeListSpace::setFLHints() {
2179   assert_locked();
2180   size_t i;
2181   size_t h = IndexSetSize;
2182   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2183     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2184     fl->set_hint(h);
2185     if (fl->surplus() > 0) {
2186       h = i;
2187     }
2188   }
2189 }
2190 
2191 void CompactibleFreeListSpace::clearFLCensus() {
2192   assert_locked();
2193   size_t i;
2194   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2195     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2196     fl->set_prev_sweep(fl->count());
2197     fl->set_coal_births(0);
2198     fl->set_coal_deaths(0);
2199     fl->set_split_births(0);
2200     fl->set_split_deaths(0);
2201   }
2202 }
2203 
2204 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2205   if (PrintFLSStatistics > 0) {
2206     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2207     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2208                            largestAddr);
2209   }
2210   setFLSurplus();
2211   setFLHints();
2212   if (PrintGC && PrintFLSCensus > 0) {
2213     printFLCensus(sweep_count);
2214   }
2215   clearFLCensus();
2216   assert_locked();
2217   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2218 }
2219 
2220 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2221   if (size < SmallForDictionary) {
2222     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2223     return (fl->coal_desired() < 0) ||
2224            ((int)fl->count() > fl->coal_desired());
2225   } else {
2226     return dictionary()->coal_dict_over_populated(size);
2227   }
2228 }
2229 
2230 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2231   assert(size < SmallForDictionary, "Size too large for indexed list");
2232   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2233   fl->increment_coal_births();
2234   fl->increment_surplus();
2235 }
2236 
2237 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2238   assert(size < SmallForDictionary, "Size too large for indexed list");
2239   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2240   fl->increment_coal_deaths();
2241   fl->decrement_surplus();
2242 }
2243 
2244 void CompactibleFreeListSpace::coalBirth(size_t size) {
2245   if (size  < SmallForDictionary) {
2246     smallCoalBirth(size);
2247   } else {
2248     dictionary()->dict_census_update(size,
2249                                    false /* split */,
2250                                    true /* birth */);
2251   }
2252 }
2253 
2254 void CompactibleFreeListSpace::coalDeath(size_t size) {
2255   if(size  < SmallForDictionary) {
2256     smallCoalDeath(size);
2257   } else {
2258     dictionary()->dict_census_update(size,
2259                                    false /* split */,
2260                                    false /* birth */);
2261   }
2262 }
2263 
2264 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2265   assert(size < SmallForDictionary, "Size too large for indexed list");
2266   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2267   fl->increment_split_births();
2268   fl->increment_surplus();
2269 }
2270 
2271 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2272   assert(size < SmallForDictionary, "Size too large for indexed list");
2273   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2274   fl->increment_split_deaths();
2275   fl->decrement_surplus();
2276 }
2277 
2278 void CompactibleFreeListSpace::split_birth(size_t size) {
2279   if (size  < SmallForDictionary) {
2280     smallSplitBirth(size);
2281   } else {
2282     dictionary()->dict_census_update(size,
2283                                    true /* split */,
2284                                    true /* birth */);
2285   }
2286 }
2287 
2288 void CompactibleFreeListSpace::splitDeath(size_t size) {
2289   if (size  < SmallForDictionary) {
2290     smallSplitDeath(size);
2291   } else {
2292     dictionary()->dict_census_update(size,
2293                                    true /* split */,
2294                                    false /* birth */);
2295   }
2296 }
2297 
2298 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2299   size_t to2 = from - to1;
2300   splitDeath(from);
2301   split_birth(to1);
2302   split_birth(to2);
2303 }
2304 
2305 void CompactibleFreeListSpace::print() const {
2306   print_on(tty);
2307 }
2308 
2309 void CompactibleFreeListSpace::prepare_for_verify() {
2310   assert_locked();
2311   repairLinearAllocationBlocks();
2312   // Verify that the SpoolBlocks look like free blocks of
2313   // appropriate sizes... To be done ...
2314 }
2315 
2316 class VerifyAllBlksClosure: public BlkClosure {
2317  private:
2318   const CompactibleFreeListSpace* _sp;
2319   const MemRegion                 _span;
2320   HeapWord*                       _last_addr;
2321   size_t                          _last_size;
2322   bool                            _last_was_obj;
2323   bool                            _last_was_live;
2324 
2325  public:
2326   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2327     MemRegion span) :  _sp(sp), _span(span),
2328                        _last_addr(NULL), _last_size(0),
2329                        _last_was_obj(false), _last_was_live(false) { }
2330 
2331   virtual size_t do_blk(HeapWord* addr) {
2332     size_t res;
2333     bool   was_obj  = false;
2334     bool   was_live = false;
2335     if (_sp->block_is_obj(addr)) {
2336       was_obj = true;
2337       oop p = oop(addr);
2338       guarantee(p->is_oop(), "Should be an oop");
2339       res = _sp->adjustObjectSize(p->size());
2340       if (_sp->obj_is_alive(addr)) {
2341         was_live = true;
2342         p->verify();
2343       }
2344     } else {
2345       FreeChunk* fc = (FreeChunk*)addr;
2346       res = fc->size();
2347       if (FLSVerifyLists && !fc->cantCoalesce()) {
2348         guarantee(_sp->verify_chunk_in_free_list(fc),
2349                   "Chunk should be on a free list");
2350       }
2351     }
2352     if (res == 0) {
2353       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2354       gclog_or_tty->print_cr(
2355         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2356         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2357         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2358         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2359       _sp->print_on(gclog_or_tty);
2360       guarantee(false, "Seppuku!");
2361     }
2362     _last_addr = addr;
2363     _last_size = res;
2364     _last_was_obj  = was_obj;
2365     _last_was_live = was_live;
2366     return res;
2367   }
2368 };
2369 
2370 class VerifyAllOopsClosure: public OopClosure {
2371  private:
2372   const CMSCollector*             _collector;
2373   const CompactibleFreeListSpace* _sp;
2374   const MemRegion                 _span;
2375   const bool                      _past_remark;
2376   const CMSBitMap*                _bit_map;
2377 
2378  protected:
2379   void do_oop(void* p, oop obj) {
2380     if (_span.contains(obj)) { // the interior oop points into CMS heap
2381       if (!_span.contains(p)) { // reference from outside CMS heap
2382         // Should be a valid object; the first disjunct below allows
2383         // us to sidestep an assertion in block_is_obj() that insists
2384         // that p be in _sp. Note that several generations (and spaces)
2385         // are spanned by _span (CMS heap) above.
2386         guarantee(!_sp->is_in_reserved(obj) ||
2387                   _sp->block_is_obj((HeapWord*)obj),
2388                   "Should be an object");
2389         guarantee(obj->is_oop(), "Should be an oop");
2390         obj->verify();
2391         if (_past_remark) {
2392           // Remark has been completed, the object should be marked
2393           _bit_map->isMarked((HeapWord*)obj);
2394         }
2395       } else { // reference within CMS heap
2396         if (_past_remark) {
2397           // Remark has been completed -- so the referent should have
2398           // been marked, if referring object is.
2399           if (_bit_map->isMarked(_collector->block_start(p))) {
2400             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2401           }
2402         }
2403       }
2404     } else if (_sp->is_in_reserved(p)) {
2405       // the reference is from FLS, and points out of FLS
2406       guarantee(obj->is_oop(), "Should be an oop");
2407       obj->verify();
2408     }
2409   }
2410 
2411   template <class T> void do_oop_work(T* p) {
2412     T heap_oop = oopDesc::load_heap_oop(p);
2413     if (!oopDesc::is_null(heap_oop)) {
2414       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2415       do_oop(p, obj);
2416     }
2417   }
2418 
2419  public:
2420   VerifyAllOopsClosure(const CMSCollector* collector,
2421     const CompactibleFreeListSpace* sp, MemRegion span,
2422     bool past_remark, CMSBitMap* bit_map) :
2423     _collector(collector), _sp(sp), _span(span),
2424     _past_remark(past_remark), _bit_map(bit_map) { }
2425 
2426   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2427   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2428 };
2429 
2430 void CompactibleFreeListSpace::verify() const {
2431   assert_lock_strong(&_freelistLock);
2432   verify_objects_initialized();
2433   MemRegion span = _collector->_span;
2434   bool past_remark = (_collector->abstract_state() ==
2435                       CMSCollector::Sweeping);
2436 
2437   ResourceMark rm;
2438   HandleMark  hm;
2439 
2440   // Check integrity of CFL data structures
2441   _promoInfo.verify();
2442   _dictionary->verify();
2443   if (FLSVerifyIndexTable) {
2444     verifyIndexedFreeLists();
2445   }
2446   // Check integrity of all objects and free blocks in space
2447   {
2448     VerifyAllBlksClosure cl(this, span);
2449     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2450   }
2451   // Check that all references in the heap to FLS
2452   // are to valid objects in FLS or that references in
2453   // FLS are to valid objects elsewhere in the heap
2454   if (FLSVerifyAllHeapReferences)
2455   {
2456     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2457       _collector->markBitMap());
2458     CollectedHeap* ch = Universe::heap();
2459 
2460     // Iterate over all oops in the heap. Uses the _no_header version
2461     // since we are not interested in following the klass pointers.
2462     ch->oop_iterate_no_header(&cl);
2463   }
2464 
2465   if (VerifyObjectStartArray) {
2466     // Verify the block offset table
2467     _bt.verify();
2468   }
2469 }
2470 
2471 #ifndef PRODUCT
2472 void CompactibleFreeListSpace::verifyFreeLists() const {
2473   if (FLSVerifyLists) {
2474     _dictionary->verify();
2475     verifyIndexedFreeLists();
2476   } else {
2477     if (FLSVerifyDictionary) {
2478       _dictionary->verify();
2479     }
2480     if (FLSVerifyIndexTable) {
2481       verifyIndexedFreeLists();
2482     }
2483   }
2484 }
2485 #endif
2486 
2487 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2488   size_t i = 0;
2489   for (; i < IndexSetStart; i++) {
2490     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2491   }
2492   for (; i < IndexSetSize; i++) {
2493     verifyIndexedFreeList(i);
2494   }
2495 }
2496 
2497 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2498   FreeChunk* fc   =  _indexedFreeList[size].head();
2499   FreeChunk* tail =  _indexedFreeList[size].tail();
2500   size_t    num = _indexedFreeList[size].count();
2501   size_t      n = 0;
2502   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2503             "Slot should have been empty");
2504   for (; fc != NULL; fc = fc->next(), n++) {
2505     guarantee(fc->size() == size, "Size inconsistency");
2506     guarantee(fc->is_free(), "!free?");
2507     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2508     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2509   }
2510   guarantee(n == num, "Incorrect count");
2511 }
2512 
2513 #ifndef PRODUCT
2514 void CompactibleFreeListSpace::check_free_list_consistency() const {
2515   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2516     "Some sizes can't be allocated without recourse to"
2517     " linear allocation buffers");
2518   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2519     "else MIN_TREE_CHUNK_SIZE is wrong");
2520   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2521   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2522 }
2523 #endif
2524 
2525 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2526   assert_lock_strong(&_freelistLock);
2527   AdaptiveFreeList<FreeChunk> total;
2528   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2529   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2530   size_t total_free = 0;
2531   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2532     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2533     total_free += fl->count() * fl->size();
2534     if (i % (40*IndexSetStride) == 0) {
2535       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2536     }
2537     fl->print_on(gclog_or_tty);
2538     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2539     total.set_surplus(    total.surplus()     + fl->surplus()    );
2540     total.set_desired(    total.desired()     + fl->desired()    );
2541     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2542     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2543     total.set_count(      total.count()       + fl->count()      );
2544     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2545     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2546     total.set_split_births(total.split_births() + fl->split_births());
2547     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2548   }
2549   total.print_on(gclog_or_tty, "TOTAL");
2550   gclog_or_tty->print_cr("Total free in indexed lists "
2551                          SIZE_FORMAT " words", total_free);
2552   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2553     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2554             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2555     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2556   _dictionary->print_dict_census();
2557 }
2558 
2559 ///////////////////////////////////////////////////////////////////////////
2560 // CFLS_LAB
2561 ///////////////////////////////////////////////////////////////////////////
2562 
2563 #define VECTOR_257(x)                                                                                  \
2564   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2565   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2566      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2567      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2568      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2569      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2570      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2571      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2572      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2573      x }
2574 
2575 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2576 // OldPLABSize, whose static default is different; if overridden at the
2577 // command-line, this will get reinitialized via a call to
2578 // modify_initialization() below.
2579 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2580   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2581 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2582 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2583 
2584 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2585   _cfls(cfls)
2586 {
2587   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2588   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2589        i < CompactibleFreeListSpace::IndexSetSize;
2590        i += CompactibleFreeListSpace::IndexSetStride) {
2591     _indexedFreeList[i].set_size(i);
2592     _num_blocks[i] = 0;
2593   }
2594 }
2595 
2596 static bool _CFLS_LAB_modified = false;
2597 
2598 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2599   assert(!_CFLS_LAB_modified, "Call only once");
2600   _CFLS_LAB_modified = true;
2601   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2602        i < CompactibleFreeListSpace::IndexSetSize;
2603        i += CompactibleFreeListSpace::IndexSetStride) {
2604     _blocks_to_claim[i].modify(n, wt, true /* force */);
2605   }
2606 }
2607 
2608 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2609   FreeChunk* res;
2610   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2611   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2612     // This locking manages sync with other large object allocations.
2613     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2614                     Mutex::_no_safepoint_check_flag);
2615     res = _cfls->getChunkFromDictionaryExact(word_sz);
2616     if (res == NULL) return NULL;
2617   } else {
2618     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2619     if (fl->count() == 0) {
2620       // Attempt to refill this local free list.
2621       get_from_global_pool(word_sz, fl);
2622       // If it didn't work, give up.
2623       if (fl->count() == 0) return NULL;
2624     }
2625     res = fl->get_chunk_at_head();
2626     assert(res != NULL, "Why was count non-zero?");
2627   }
2628   res->markNotFree();
2629   assert(!res->is_free(), "shouldn't be marked free");
2630   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2631   // mangle a just allocated object with a distinct pattern.
2632   debug_only(res->mangleAllocated(word_sz));
2633   return (HeapWord*)res;
2634 }
2635 
2636 // Get a chunk of blocks of the right size and update related
2637 // book-keeping stats
2638 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2639   // Get the #blocks we want to claim
2640   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2641   assert(n_blks > 0, "Error");
2642   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2643   // In some cases, when the application has a phase change,
2644   // there may be a sudden and sharp shift in the object survival
2645   // profile, and updating the counts at the end of a scavenge
2646   // may not be quick enough, giving rise to large scavenge pauses
2647   // during these phase changes. It is beneficial to detect such
2648   // changes on-the-fly during a scavenge and avoid such a phase-change
2649   // pothole. The following code is a heuristic attempt to do that.
2650   // It is protected by a product flag until we have gained
2651   // enough experience with this heuristic and fine-tuned its behavior.
2652   // WARNING: This might increase fragmentation if we overreact to
2653   // small spikes, so some kind of historical smoothing based on
2654   // previous experience with the greater reactivity might be useful.
2655   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2656   // default.
2657   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2658     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2659     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2660     n_blks = MIN2(n_blks, CMSOldPLABMax);
2661   }
2662   assert(n_blks > 0, "Error");
2663   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2664   // Update stats table entry for this block size
2665   _num_blocks[word_sz] += fl->count();
2666 }
2667 
2668 void CFLS_LAB::compute_desired_plab_size() {
2669   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2670        i < CompactibleFreeListSpace::IndexSetSize;
2671        i += CompactibleFreeListSpace::IndexSetStride) {
2672     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2673            "Counter inconsistency");
2674     if (_global_num_workers[i] > 0) {
2675       // Need to smooth wrt historical average
2676       if (ResizeOldPLAB) {
2677         _blocks_to_claim[i].sample(
2678           MAX2((size_t)CMSOldPLABMin,
2679           MIN2((size_t)CMSOldPLABMax,
2680                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2681       }
2682       // Reset counters for next round
2683       _global_num_workers[i] = 0;
2684       _global_num_blocks[i] = 0;
2685       if (PrintOldPLAB) {
2686         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2687       }
2688     }
2689   }
2690 }
2691 
2692 // If this is changed in the future to allow parallel
2693 // access, one would need to take the FL locks and,
2694 // depending on how it is used, stagger access from
2695 // parallel threads to reduce contention.
2696 void CFLS_LAB::retire(int tid) {
2697   // We run this single threaded with the world stopped;
2698   // so no need for locks and such.
2699   NOT_PRODUCT(Thread* t = Thread::current();)
2700   assert(Thread::current()->is_VM_thread(), "Error");
2701   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2702        i < CompactibleFreeListSpace::IndexSetSize;
2703        i += CompactibleFreeListSpace::IndexSetStride) {
2704     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2705            "Can't retire more than what we obtained");
2706     if (_num_blocks[i] > 0) {
2707       size_t num_retire =  _indexedFreeList[i].count();
2708       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2709       {
2710         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2711         //                Mutex::_no_safepoint_check_flag);
2712 
2713         // Update globals stats for num_blocks used
2714         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2715         _global_num_workers[i]++;
2716         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2717         if (num_retire > 0) {
2718           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2719           // Reset this list.
2720           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2721           _indexedFreeList[i].set_size(i);
2722         }
2723       }
2724       if (PrintOldPLAB) {
2725         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2726                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2727       }
2728       // Reset stats for next round
2729       _num_blocks[i]         = 0;
2730     }
2731   }
2732 }
2733 
2734 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2735   assert(fl->count() == 0, "Precondition.");
2736   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2737          "Precondition");
2738 
2739   // We'll try all multiples of word_sz in the indexed set, starting with
2740   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2741   // then try getting a big chunk and splitting it.
2742   {
2743     bool found;
2744     int  k;
2745     size_t cur_sz;
2746     for (k = 1, cur_sz = k * word_sz, found = false;
2747          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2748          (CMSSplitIndexedFreeListBlocks || k <= 1);
2749          k++, cur_sz = k * word_sz) {
2750       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2751       fl_for_cur_sz.set_size(cur_sz);
2752       {
2753         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2754                         Mutex::_no_safepoint_check_flag);
2755         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2756         if (gfl->count() != 0) {
2757           // nn is the number of chunks of size cur_sz that
2758           // we'd need to split k-ways each, in order to create
2759           // "n" chunks of size word_sz each.
2760           const size_t nn = MAX2(n/k, (size_t)1);
2761           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2762           found = true;
2763           if (k > 1) {
2764             // Update split death stats for the cur_sz-size blocks list:
2765             // we increment the split death count by the number of blocks
2766             // we just took from the cur_sz-size blocks list and which
2767             // we will be splitting below.
2768             ssize_t deaths = gfl->split_deaths() +
2769                              fl_for_cur_sz.count();
2770             gfl->set_split_deaths(deaths);
2771           }
2772         }
2773       }
2774       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2775       if (found) {
2776         if (k == 1) {
2777           fl->prepend(&fl_for_cur_sz);
2778         } else {
2779           // Divide each block on fl_for_cur_sz up k ways.
2780           FreeChunk* fc;
2781           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2782             // Must do this in reverse order, so that anybody attempting to
2783             // access the main chunk sees it as a single free block until we
2784             // change it.
2785             size_t fc_size = fc->size();
2786             assert(fc->is_free(), "Error");
2787             for (int i = k-1; i >= 0; i--) {
2788               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2789               assert((i != 0) ||
2790                         ((fc == ffc) && ffc->is_free() &&
2791                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2792                         "Counting error");
2793               ffc->set_size(word_sz);
2794               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2795               ffc->link_next(NULL);
2796               // Above must occur before BOT is updated below.
2797               OrderAccess::storestore();
2798               // splitting from the right, fc_size == i * word_sz
2799               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2800               fc_size -= word_sz;
2801               assert(fc_size == i*word_sz, "Error");
2802               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2803               _bt.verify_single_block((HeapWord*)fc, fc_size);
2804               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2805               // Push this on "fl".
2806               fl->return_chunk_at_head(ffc);
2807             }
2808             // TRAP
2809             assert(fl->tail()->next() == NULL, "List invariant.");
2810           }
2811         }
2812         // Update birth stats for this block size.
2813         size_t num = fl->count();
2814         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2815                         Mutex::_no_safepoint_check_flag);
2816         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2817         _indexedFreeList[word_sz].set_split_births(births);
2818         return;
2819       }
2820     }
2821   }
2822   // Otherwise, we'll split a block from the dictionary.
2823   FreeChunk* fc = NULL;
2824   FreeChunk* rem_fc = NULL;
2825   size_t rem;
2826   {
2827     MutexLockerEx x(parDictionaryAllocLock(),
2828                     Mutex::_no_safepoint_check_flag);
2829     while (n > 0) {
2830       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2831                                   FreeBlockDictionary<FreeChunk>::atLeast);
2832       if (fc != NULL) {
2833         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2834         dictionary()->dict_census_update(fc->size(),
2835                                        true /*split*/,
2836                                        false /*birth*/);
2837         break;
2838       } else {
2839         n--;
2840       }
2841     }
2842     if (fc == NULL) return;
2843     // Otherwise, split up that block.
2844     assert((ssize_t)n >= 1, "Control point invariant");
2845     assert(fc->is_free(), "Error: should be a free block");
2846     _bt.verify_single_block((HeapWord*)fc, fc->size());
2847     const size_t nn = fc->size() / word_sz;
2848     n = MIN2(nn, n);
2849     assert((ssize_t)n >= 1, "Control point invariant");
2850     rem = fc->size() - n * word_sz;
2851     // If there is a remainder, and it's too small, allocate one fewer.
2852     if (rem > 0 && rem < MinChunkSize) {
2853       n--; rem += word_sz;
2854     }
2855     // Note that at this point we may have n == 0.
2856     assert((ssize_t)n >= 0, "Control point invariant");
2857 
2858     // If n is 0, the chunk fc that was found is not large
2859     // enough to leave a viable remainder.  We are unable to
2860     // allocate even one block.  Return fc to the
2861     // dictionary and return, leaving "fl" empty.
2862     if (n == 0) {
2863       returnChunkToDictionary(fc);
2864       assert(fl->count() == 0, "We never allocated any blocks");
2865       return;
2866     }
2867 
2868     // First return the remainder, if any.
2869     // Note that we hold the lock until we decide if we're going to give
2870     // back the remainder to the dictionary, since a concurrent allocation
2871     // may otherwise see the heap as empty.  (We're willing to take that
2872     // hit if the block is a small block.)
2873     if (rem > 0) {
2874       size_t prefix_size = n * word_sz;
2875       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2876       rem_fc->set_size(rem);
2877       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2878       rem_fc->link_next(NULL);
2879       // Above must occur before BOT is updated below.
2880       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2881       OrderAccess::storestore();
2882       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2883       assert(fc->is_free(), "Error");
2884       fc->set_size(prefix_size);
2885       if (rem >= IndexSetSize) {
2886         returnChunkToDictionary(rem_fc);
2887         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2888         rem_fc = NULL;
2889       }
2890       // Otherwise, return it to the small list below.
2891     }
2892   }
2893   if (rem_fc != NULL) {
2894     MutexLockerEx x(_indexedFreeListParLocks[rem],
2895                     Mutex::_no_safepoint_check_flag);
2896     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2897     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2898     smallSplitBirth(rem);
2899   }
2900   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2901   // Now do the splitting up.
2902   // Must do this in reverse order, so that anybody attempting to
2903   // access the main chunk sees it as a single free block until we
2904   // change it.
2905   size_t fc_size = n * word_sz;
2906   // All but first chunk in this loop
2907   for (ssize_t i = n-1; i > 0; i--) {
2908     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2909     ffc->set_size(word_sz);
2910     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2911     ffc->link_next(NULL);
2912     // Above must occur before BOT is updated below.
2913     OrderAccess::storestore();
2914     // splitting from the right, fc_size == (n - i + 1) * wordsize
2915     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2916     fc_size -= word_sz;
2917     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2918     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2919     _bt.verify_single_block((HeapWord*)fc, fc_size);
2920     // Push this on "fl".
2921     fl->return_chunk_at_head(ffc);
2922   }
2923   // First chunk
2924   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2925   // The blocks above should show their new sizes before the first block below
2926   fc->set_size(word_sz);
2927   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2928   fc->link_next(NULL);
2929   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2930   _bt.verify_single_block((HeapWord*)fc, fc->size());
2931   fl->return_chunk_at_head(fc);
2932 
2933   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2934   {
2935     // Update the stats for this block size.
2936     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2937                     Mutex::_no_safepoint_check_flag);
2938     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2939     _indexedFreeList[word_sz].set_split_births(births);
2940     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2941     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2942   }
2943 
2944   // TRAP
2945   assert(fl->tail()->next() == NULL, "List invariant.");
2946 }
2947 
2948 // Set up the space's par_seq_tasks structure for work claiming
2949 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2950 // XXX Need to suitably abstract and generalize this and the next
2951 // method into one.
2952 void
2953 CompactibleFreeListSpace::
2954 initialize_sequential_subtasks_for_rescan(int n_threads) {
2955   // The "size" of each task is fixed according to rescan_task_size.
2956   assert(n_threads > 0, "Unexpected n_threads argument");
2957   const size_t task_size = rescan_task_size();
2958   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2959   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2960   assert(n_tasks == 0 ||
2961          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2962           (used_region().start() + n_tasks*task_size >= used_region().end())),
2963          "n_tasks calculation incorrect");
2964   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2965   assert(!pst->valid(), "Clobbering existing data?");
2966   // Sets the condition for completion of the subtask (how many threads
2967   // need to finish in order to be done).
2968   pst->set_n_threads(n_threads);
2969   pst->set_n_tasks((int)n_tasks);
2970 }
2971 
2972 // Set up the space's par_seq_tasks structure for work claiming
2973 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2974 void
2975 CompactibleFreeListSpace::
2976 initialize_sequential_subtasks_for_marking(int n_threads,
2977                                            HeapWord* low) {
2978   // The "size" of each task is fixed according to rescan_task_size.
2979   assert(n_threads > 0, "Unexpected n_threads argument");
2980   const size_t task_size = marking_task_size();
2981   assert(task_size > CardTableModRefBS::card_size_in_words &&
2982          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2983          "Otherwise arithmetic below would be incorrect");
2984   MemRegion span = _gen->reserved();
2985   if (low != NULL) {
2986     if (span.contains(low)) {
2987       // Align low down to  a card boundary so that
2988       // we can use block_offset_careful() on span boundaries.
2989       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2990                                  CardTableModRefBS::card_size);
2991       // Clip span prefix at aligned_low
2992       span = span.intersection(MemRegion(aligned_low, span.end()));
2993     } else if (low > span.end()) {
2994       span = MemRegion(low, low);  // Null region
2995     } // else use entire span
2996   }
2997   assert(span.is_empty() ||
2998          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2999         "span should start at a card boundary");
3000   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3001   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3002   assert(n_tasks == 0 ||
3003          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3004           (span.start() + n_tasks*task_size >= span.end())),
3005          "n_tasks calculation incorrect");
3006   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3007   assert(!pst->valid(), "Clobbering existing data?");
3008   // Sets the condition for completion of the subtask (how many threads
3009   // need to finish in order to be done).
3010   pst->set_n_threads(n_threads);
3011   pst->set_n_tasks((int)n_tasks);
3012 }