1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94     "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 105     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 106     default:
 107       warning("dictionaryChoice: selected option not understood; using"
 108               " default BinaryTreeDictionary implementation instead.");
 109     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 110       _dictionary = new BinaryTreeDictionary<FreeChunk>(mr, use_adaptive_freelists);
 111       break;
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
 126     // The small linAB initially has all the space and will allocate
 127     // a chunk of any size.
 128     HeapWord* addr = (HeapWord*) fc;
 129     _smallLinearAllocBlock.set(addr, fc->size() ,
 130       1024*SmallForLinearAlloc, fc->size());
 131     // Note that _unallocated_block is not updated here.
 132     // Allocations from the linear allocation block should
 133     // update it.
 134   } else {
 135     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 136                                SmallForLinearAlloc);
 137   }
 138   // CMSIndexedFreeListReplenish should be at least 1
 139   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 140   _promoInfo.setSpace(this);
 141   if (UseCMSBestFit) {
 142     _fitStrategy = FreeBlockBestFitFirst;
 143   } else {
 144     _fitStrategy = FreeBlockStrategyNone;
 145   }
 146   check_free_list_consistency();
 147 
 148   // Initialize locks for parallel case.
 149 
 150   if (CollectedHeap::use_parallel_gc_threads()) {
 151     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 152       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 153                                               "a freelist par lock",
 154                                               true);
 155       if (_indexedFreeListParLocks[i] == NULL)
 156         vm_exit_during_initialization("Could not allocate a par lock");
 157       DEBUG_ONLY(
 158         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 159       )
 160     }
 161     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 162   }
 163 }
 164 
 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
 166 // update the block offset table.  Removed initialize_threshold call because
 167 // CFLS does not use a block offset array for contiguous spaces.
 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 169                                     CompactPoint* cp, HeapWord* compact_top) {
 170   // q is alive
 171   // First check if we should switch compaction space
 172   assert(this == cp->space, "'this' should be current compaction space.");
 173   size_t compaction_max_size = pointer_delta(end(), compact_top);
 174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 175     "virtual adjustObjectSize_v() method is not correct");
 176   size_t adjusted_size = adjustObjectSize(size);
 177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 178          "no small fragments allowed");
 179   assert(minimum_free_block_size() == MinChunkSize,
 180          "for de-virtualized reference below");
 181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 182   if (adjusted_size + MinChunkSize > compaction_max_size &&
 183       adjusted_size != compaction_max_size) {
 184     do {
 185       // switch to next compaction space
 186       cp->space->set_compaction_top(compact_top);
 187       cp->space = cp->space->next_compaction_space();
 188       if (cp->space == NULL) {
 189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 190         assert(cp->gen != NULL, "compaction must succeed");
 191         cp->space = cp->gen->first_compaction_space();
 192         assert(cp->space != NULL, "generation must have a first compaction space");
 193       }
 194       compact_top = cp->space->bottom();
 195       cp->space->set_compaction_top(compact_top);
 196       // The correct adjusted_size may not be the same as that for this method
 197       // (i.e., cp->space may no longer be "this" so adjust the size again.
 198       // Use the virtual method which is not used above to save the virtual
 199       // dispatch.
 200       adjusted_size = cp->space->adjust_object_size_v(size);
 201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 203     } while (adjusted_size > compaction_max_size);
 204   }
 205 
 206   // store the forwarding pointer into the mark word
 207   if ((HeapWord*)q != compact_top) {
 208     q->forward_to(oop(compact_top));
 209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 210   } else {
 211     // if the object isn't moving we can just set the mark to the default
 212     // mark and handle it specially later on.
 213     q->init_mark();
 214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 215   }
 216 
 217   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288   }
 289   _promoInfo.reset();
 290   _smallLinearAllocBlock._ptr = NULL;
 291   _smallLinearAllocBlock._word_size = 0;
 292 }
 293 
 294 void CompactibleFreeListSpace::reset_after_compaction() {
 295   // Reset the space to the new reality - one free chunk.
 296   MemRegion mr(compaction_top(), end());
 297   reset(mr);
 298   // Now refill the linear allocation block(s) if possible.
 299   if (_adaptive_freelists) {
 300     refillLinearAllocBlocksIfNeeded();
 301   } else {
 302     // Place as much of mr in the linAB as we can get,
 303     // provided it was big enough to go into the dictionary.
 304     FreeChunk* fc = dictionary()->find_largest_dict();
 305     if (fc != NULL) {
 306       assert(fc->size() == mr.word_size(),
 307              "Why was the chunk broken up?");
 308       removeChunkFromDictionary(fc);
 309       HeapWord* addr = (HeapWord*) fc;
 310       _smallLinearAllocBlock.set(addr, fc->size() ,
 311         1024*SmallForLinearAlloc, fc->size());
 312       // Note that _unallocated_block is not updated here.
 313     }
 314   }
 315 }
 316 
 317 // Walks the entire dictionary, returning a coterminal
 318 // chunk, if it exists. Use with caution since it involves
 319 // a potentially complete walk of a potentially large tree.
 320 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 321 
 322   assert_lock_strong(&_freelistLock);
 323 
 324   return dictionary()->find_chunk_ends_at(end());
 325 }
 326 
 327 
 328 #ifndef PRODUCT
 329 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 330   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 331     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 332   }
 333 }
 334 
 335 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 336   size_t sum = 0;
 337   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 338     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 339   }
 340   return sum;
 341 }
 342 
 343 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 344   size_t count = 0;
 345   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 346     debug_only(
 347       ssize_t total_list_count = 0;
 348       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 349          fc = fc->next()) {
 350         total_list_count++;
 351       }
 352       assert(total_list_count ==  _indexedFreeList[i].count(),
 353         "Count in list is incorrect");
 354     )
 355     count += _indexedFreeList[i].count();
 356   }
 357   return count;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::totalCount() {
 361   size_t num = totalCountInIndexedFreeLists();
 362   num +=  dictionary()->total_count();
 363   if (_smallLinearAllocBlock._word_size != 0) {
 364     num++;
 365   }
 366   return num;
 367 }
 368 #endif
 369 
 370 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 371   FreeChunk* fc = (FreeChunk*) p;
 372   return fc->is_free();
 373 }
 374 
 375 size_t CompactibleFreeListSpace::used() const {
 376   return capacity() - free();
 377 }
 378 
 379 size_t CompactibleFreeListSpace::free() const {
 380   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 381   // if you do this while the structures are in flux you
 382   // may get an approximate answer only; for instance
 383   // because there is concurrent allocation either
 384   // directly by mutators or for promotion during a GC.
 385   // It's "MT-safe", however, in the sense that you are guaranteed
 386   // not to crash and burn, for instance, because of walking
 387   // pointers that could disappear as you were walking them.
 388   // The approximation is because the various components
 389   // that are read below are not read atomically (and
 390   // further the computation of totalSizeInIndexedFreeLists()
 391   // is itself a non-atomic computation. The normal use of
 392   // this is during a resize operation at the end of GC
 393   // and at that time you are guaranteed to get the
 394   // correct actual value. However, for instance, this is
 395   // also read completely asynchronously by the "perf-sampler"
 396   // that supports jvmstat, and you are apt to see the values
 397   // flicker in such cases.
 398   assert(_dictionary != NULL, "No _dictionary?");
 399   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 400           totalSizeInIndexedFreeLists() +
 401           _smallLinearAllocBlock._word_size) * HeapWordSize;
 402 }
 403 
 404 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 405   assert(_dictionary != NULL, "No _dictionary?");
 406   assert_locked();
 407   size_t res = _dictionary->max_chunk_size();
 408   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 409                        (size_t) SmallForLinearAlloc - 1));
 410   // XXX the following could potentially be pretty slow;
 411   // should one, pesimally for the rare cases when res
 412   // caclulated above is less than IndexSetSize,
 413   // just return res calculated above? My reasoning was that
 414   // those cases will be so rare that the extra time spent doesn't
 415   // really matter....
 416   // Note: do not change the loop test i >= res + IndexSetStride
 417   // to i > res below, because i is unsigned and res may be zero.
 418   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 419        i -= IndexSetStride) {
 420     if (_indexedFreeList[i].head() != NULL) {
 421       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 422       return i;
 423     }
 424   }
 425   return res;
 426 }
 427 
 428 void LinearAllocBlock::print_on(outputStream* st) const {
 429   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 430             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 431             _ptr, _word_size, _refillSize, _allocation_size_limit);
 432 }
 433 
 434 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 435   st->print_cr("COMPACTIBLE FREELIST SPACE");
 436   st->print_cr(" Space:");
 437   Space::print_on(st);
 438 
 439   st->print_cr("promoInfo:");
 440   _promoInfo.print_on(st);
 441 
 442   st->print_cr("_smallLinearAllocBlock");
 443   _smallLinearAllocBlock.print_on(st);
 444 
 445   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 446 
 447   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 448                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 449 }
 450 
 451 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 452 const {
 453   reportIndexedFreeListStatistics();
 454   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 455   gclog_or_tty->print_cr("---------------------------");
 456   FreeList<FreeChunk>::print_labels_on(st, "size");
 457   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 458     _indexedFreeList[i].print_on(gclog_or_tty);
 459     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 460          fc = fc->next()) {
 461       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 462                           fc, (HeapWord*)fc + i,
 463                           fc->cantCoalesce() ? "\t CC" : "");
 464     }
 465   }
 466 }
 467 
 468 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 469 const {
 470   _promoInfo.print_on(st);
 471 }
 472 
 473 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 474 const {
 475   _dictionary->report_statistics();
 476   st->print_cr("Layout of Freelists in Tree");
 477   st->print_cr("---------------------------");
 478   _dictionary->print_free_lists(st);
 479 }
 480 
 481 class BlkPrintingClosure: public BlkClosure {
 482   const CMSCollector*             _collector;
 483   const CompactibleFreeListSpace* _sp;
 484   const CMSBitMap*                _live_bit_map;
 485   const bool                      _post_remark;
 486   outputStream*                   _st;
 487 public:
 488   BlkPrintingClosure(const CMSCollector* collector,
 489                      const CompactibleFreeListSpace* sp,
 490                      const CMSBitMap* live_bit_map,
 491                      outputStream* st):
 492     _collector(collector),
 493     _sp(sp),
 494     _live_bit_map(live_bit_map),
 495     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 496     _st(st) { }
 497   size_t do_blk(HeapWord* addr);
 498 };
 499 
 500 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 501   size_t sz = _sp->block_size_no_stall(addr, _collector);
 502   assert(sz != 0, "Should always be able to compute a size");
 503   if (_sp->block_is_obj(addr)) {
 504     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 505     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 506       addr,
 507       dead ? "dead" : "live",
 508       sz,
 509       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 510     if (CMSPrintObjectsInDump && !dead) {
 511       oop(addr)->print_on(_st);
 512       _st->print_cr("--------------------------------------");
 513     }
 514   } else { // free block
 515     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 516       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 517     if (CMSPrintChunksInDump) {
 518       ((FreeChunk*)addr)->print_on(_st);
 519       _st->print_cr("--------------------------------------");
 520     }
 521   }
 522   return sz;
 523 }
 524 
 525 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 526   outputStream* st) {
 527   st->print_cr("\n=========================");
 528   st->print_cr("Block layout in CMS Heap:");
 529   st->print_cr("=========================");
 530   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 531   blk_iterate(&bpcl);
 532 
 533   st->print_cr("\n=======================================");
 534   st->print_cr("Order & Layout of Promotion Info Blocks");
 535   st->print_cr("=======================================");
 536   print_promo_info_blocks(st);
 537 
 538   st->print_cr("\n===========================");
 539   st->print_cr("Order of Indexed Free Lists");
 540   st->print_cr("=========================");
 541   print_indexed_free_lists(st);
 542 
 543   st->print_cr("\n=================================");
 544   st->print_cr("Order of Free Lists in Dictionary");
 545   st->print_cr("=================================");
 546   print_dictionary_free_lists(st);
 547 }
 548 
 549 
 550 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 551   assert_lock_strong(&_freelistLock);
 552   assert(PrintFLSStatistics != 0, "Reporting error");
 553   _dictionary->report_statistics();
 554   if (PrintFLSStatistics > 1) {
 555     reportIndexedFreeListStatistics();
 556     size_t total_size = totalSizeInIndexedFreeLists() +
 557                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 558     gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
 559   }
 560 }
 561 
 562 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 563   assert_lock_strong(&_freelistLock);
 564   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 565                       "--------------------------------\n");
 566   size_t total_size = totalSizeInIndexedFreeLists();
 567   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 568   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 569   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 570   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 571   if (free_blocks != 0) {
 572     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 573   }
 574 }
 575 
 576 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 577   size_t res = 0;
 578   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 579     debug_only(
 580       ssize_t recount = 0;
 581       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 582          fc = fc->next()) {
 583         recount += 1;
 584       }
 585       assert(recount == _indexedFreeList[i].count(),
 586         "Incorrect count in list");
 587     )
 588     res += _indexedFreeList[i].count();
 589   }
 590   return res;
 591 }
 592 
 593 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 594   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 595     if (_indexedFreeList[i].head() != NULL) {
 596       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 597       return (size_t)i;
 598     }
 599   }
 600   return 0;
 601 }
 602 
 603 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 604   HeapWord* prevEnd = end();
 605   assert(prevEnd != value, "unnecessary set_end call");
 606   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 607         "New end is below unallocated block");
 608   _end = value;
 609   if (prevEnd != NULL) {
 610     // Resize the underlying block offset table.
 611     _bt.resize(pointer_delta(value, bottom()));
 612     if (value <= prevEnd) {
 613       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 614              "New end is below unallocated block");
 615     } else {
 616       // Now, take this new chunk and add it to the free blocks.
 617       // Note that the BOT has not yet been updated for this block.
 618       size_t newFcSize = pointer_delta(value, prevEnd);
 619       // XXX This is REALLY UGLY and should be fixed up. XXX
 620       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 621         // Mark the boundary of the new block in BOT
 622         _bt.mark_block(prevEnd, value);
 623         // put it all in the linAB
 624         if (ParallelGCThreads == 0) {
 625           _smallLinearAllocBlock._ptr = prevEnd;
 626           _smallLinearAllocBlock._word_size = newFcSize;
 627           repairLinearAllocBlock(&_smallLinearAllocBlock);
 628         } else { // ParallelGCThreads > 0
 629           MutexLockerEx x(parDictionaryAllocLock(),
 630                           Mutex::_no_safepoint_check_flag);
 631           _smallLinearAllocBlock._ptr = prevEnd;
 632           _smallLinearAllocBlock._word_size = newFcSize;
 633           repairLinearAllocBlock(&_smallLinearAllocBlock);
 634         }
 635         // Births of chunks put into a LinAB are not recorded.  Births
 636         // of chunks as they are allocated out of a LinAB are.
 637       } else {
 638         // Add the block to the free lists, if possible coalescing it
 639         // with the last free block, and update the BOT and census data.
 640         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 641       }
 642     }
 643   }
 644 }
 645 
 646 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 647   CompactibleFreeListSpace* _cfls;
 648   CMSCollector* _collector;
 649 protected:
 650   // Override.
 651 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 652   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 653                                        HeapWord* bottom, HeapWord* top, \
 654                                        ClosureType* cl);                \
 655       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 656                                        HeapWord* bottom, HeapWord* top, \
 657                                        ClosureType* cl);                \
 658     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 659                                        HeapWord* bottom, HeapWord* top, \
 660                                        ClosureType* cl)
 661   walk_mem_region_with_cl_DECL(OopClosure);
 662   walk_mem_region_with_cl_DECL(FilteringClosure);
 663 
 664 public:
 665   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 666                       CMSCollector* collector,
 667                       OopClosure* cl,
 668                       CardTableModRefBS::PrecisionStyle precision,
 669                       HeapWord* boundary) :
 670     Filtering_DCTOC(sp, cl, precision, boundary),
 671     _cfls(sp), _collector(collector) {}
 672 };
 673 
 674 // We de-virtualize the block-related calls below, since we know that our
 675 // space is a CompactibleFreeListSpace.
 676 
 677 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 678 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 679                                                  HeapWord* bottom,              \
 680                                                  HeapWord* top,                 \
 681                                                  ClosureType* cl) {             \
 682    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 683    if (is_par) {                                                                \
 684      assert(SharedHeap::heap()->n_par_threads() ==                              \
 685             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 686      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 687    } else {                                                                     \
 688      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 689    }                                                                            \
 690 }                                                                               \
 691 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 692                                                       HeapWord* bottom,         \
 693                                                       HeapWord* top,            \
 694                                                       ClosureType* cl) {        \
 695   /* Skip parts that are before "mr", in case "block_start" sent us             \
 696      back too far. */                                                           \
 697   HeapWord* mr_start = mr.start();                                              \
 698   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 699   HeapWord* next = bottom + bot_size;                                           \
 700   while (next < mr_start) {                                                     \
 701     bottom = next;                                                              \
 702     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 703     next = bottom + bot_size;                                                   \
 704   }                                                                             \
 705                                                                                 \
 706   while (bottom < top) {                                                        \
 707     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 708         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 709                     oop(bottom)) &&                                             \
 710         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 711       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 712       bottom += _cfls->adjustObjectSize(word_sz);                               \
 713     } else {                                                                    \
 714       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 715     }                                                                           \
 716   }                                                                             \
 717 }                                                                               \
 718 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 719                                                         HeapWord* bottom,       \
 720                                                         HeapWord* top,          \
 721                                                         ClosureType* cl) {      \
 722   /* Skip parts that are before "mr", in case "block_start" sent us             \
 723      back too far. */                                                           \
 724   HeapWord* mr_start = mr.start();                                              \
 725   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 726   HeapWord* next = bottom + bot_size;                                           \
 727   while (next < mr_start) {                                                     \
 728     bottom = next;                                                              \
 729     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 730     next = bottom + bot_size;                                                   \
 731   }                                                                             \
 732                                                                                 \
 733   while (bottom < top) {                                                        \
 734     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 735         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 736                     oop(bottom)) &&                                             \
 737         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 738       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 739       bottom += _cfls->adjustObjectSize(word_sz);                               \
 740     } else {                                                                    \
 741       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 742     }                                                                           \
 743   }                                                                             \
 744 }
 745 
 746 // (There are only two of these, rather than N, because the split is due
 747 // only to the introduction of the FilteringClosure, a local part of the
 748 // impl of this abstraction.)
 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 751 
 752 DirtyCardToOopClosure*
 753 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 754                                       CardTableModRefBS::PrecisionStyle precision,
 755                                       HeapWord* boundary) {
 756   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 757 }
 758 
 759 
 760 // Note on locking for the space iteration functions:
 761 // since the collector's iteration activities are concurrent with
 762 // allocation activities by mutators, absent a suitable mutual exclusion
 763 // mechanism the iterators may go awry. For instace a block being iterated
 764 // may suddenly be allocated or divided up and part of it allocated and
 765 // so on.
 766 
 767 // Apply the given closure to each block in the space.
 768 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 769   assert_lock_strong(freelistLock());
 770   HeapWord *cur, *limit;
 771   for (cur = bottom(), limit = end(); cur < limit;
 772        cur += cl->do_blk_careful(cur));
 773 }
 774 
 775 // Apply the given closure to each block in the space.
 776 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 777   assert_lock_strong(freelistLock());
 778   HeapWord *cur, *limit;
 779   for (cur = bottom(), limit = end(); cur < limit;
 780        cur += cl->do_blk(cur));
 781 }
 782 
 783 // Apply the given closure to each oop in the space.
 784 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 785   assert_lock_strong(freelistLock());
 786   HeapWord *cur, *limit;
 787   size_t curSize;
 788   for (cur = bottom(), limit = end(); cur < limit;
 789        cur += curSize) {
 790     curSize = block_size(cur);
 791     if (block_is_obj(cur)) {
 792       oop(cur)->oop_iterate(cl);
 793     }
 794   }
 795 }
 796 
 797 // Apply the given closure to each oop in the space \intersect memory region.
 798 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 799   assert_lock_strong(freelistLock());
 800   if (is_empty()) {
 801     return;
 802   }
 803   MemRegion cur = MemRegion(bottom(), end());
 804   mr = mr.intersection(cur);
 805   if (mr.is_empty()) {
 806     return;
 807   }
 808   if (mr.equals(cur)) {
 809     oop_iterate(cl);
 810     return;
 811   }
 812   assert(mr.end() <= end(), "just took an intersection above");
 813   HeapWord* obj_addr = block_start(mr.start());
 814   HeapWord* t = mr.end();
 815 
 816   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 817   if (block_is_obj(obj_addr)) {
 818     // Handle first object specially.
 819     oop obj = oop(obj_addr);
 820     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 821   } else {
 822     FreeChunk* fc = (FreeChunk*)obj_addr;
 823     obj_addr += fc->size();
 824   }
 825   while (obj_addr < t) {
 826     HeapWord* obj = obj_addr;
 827     obj_addr += block_size(obj_addr);
 828     // If "obj_addr" is not greater than top, then the
 829     // entire object "obj" is within the region.
 830     if (obj_addr <= t) {
 831       if (block_is_obj(obj)) {
 832         oop(obj)->oop_iterate(cl);
 833       }
 834     } else {
 835       // "obj" extends beyond end of region
 836       if (block_is_obj(obj)) {
 837         oop(obj)->oop_iterate(&smr_blk);
 838       }
 839       break;
 840     }
 841   }
 842 }
 843 
 844 // NOTE: In the following methods, in order to safely be able to
 845 // apply the closure to an object, we need to be sure that the
 846 // object has been initialized. We are guaranteed that an object
 847 // is initialized if we are holding the Heap_lock with the
 848 // world stopped.
 849 void CompactibleFreeListSpace::verify_objects_initialized() const {
 850   if (is_init_completed()) {
 851     assert_locked_or_safepoint(Heap_lock);
 852     if (Universe::is_fully_initialized()) {
 853       guarantee(SafepointSynchronize::is_at_safepoint(),
 854                 "Required for objects to be initialized");
 855     }
 856   } // else make a concession at vm start-up
 857 }
 858 
 859 // Apply the given closure to each object in the space
 860 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 861   assert_lock_strong(freelistLock());
 862   NOT_PRODUCT(verify_objects_initialized());
 863   HeapWord *cur, *limit;
 864   size_t curSize;
 865   for (cur = bottom(), limit = end(); cur < limit;
 866        cur += curSize) {
 867     curSize = block_size(cur);
 868     if (block_is_obj(cur)) {
 869       blk->do_object(oop(cur));
 870     }
 871   }
 872 }
 873 
 874 // Apply the given closure to each live object in the space
 875 //   The usage of CompactibleFreeListSpace
 876 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 877 // objects in the space with references to objects that are no longer
 878 // valid.  For example, an object may reference another object
 879 // that has already been sweep up (collected).  This method uses
 880 // obj_is_alive() to determine whether it is safe to apply the closure to
 881 // an object.  See obj_is_alive() for details on how liveness of an
 882 // object is decided.
 883 
 884 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 885   assert_lock_strong(freelistLock());
 886   NOT_PRODUCT(verify_objects_initialized());
 887   HeapWord *cur, *limit;
 888   size_t curSize;
 889   for (cur = bottom(), limit = end(); cur < limit;
 890        cur += curSize) {
 891     curSize = block_size(cur);
 892     if (block_is_obj(cur) && obj_is_alive(cur)) {
 893       blk->do_object(oop(cur));
 894     }
 895   }
 896 }
 897 
 898 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 899                                                   UpwardsObjectClosure* cl) {
 900   assert_locked(freelistLock());
 901   NOT_PRODUCT(verify_objects_initialized());
 902   Space::object_iterate_mem(mr, cl);
 903 }
 904 
 905 // Callers of this iterator beware: The closure application should
 906 // be robust in the face of uninitialized objects and should (always)
 907 // return a correct size so that the next addr + size below gives us a
 908 // valid block boundary. [See for instance,
 909 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 910 // in ConcurrentMarkSweepGeneration.cpp.]
 911 HeapWord*
 912 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 913   assert_lock_strong(freelistLock());
 914   HeapWord *addr, *last;
 915   size_t size;
 916   for (addr = bottom(), last  = end();
 917        addr < last; addr += size) {
 918     FreeChunk* fc = (FreeChunk*)addr;
 919     if (fc->is_free()) {
 920       // Since we hold the free list lock, which protects direct
 921       // allocation in this generation by mutators, a free object
 922       // will remain free throughout this iteration code.
 923       size = fc->size();
 924     } else {
 925       // Note that the object need not necessarily be initialized,
 926       // because (for instance) the free list lock does NOT protect
 927       // object initialization. The closure application below must
 928       // therefore be correct in the face of uninitialized objects.
 929       size = cl->do_object_careful(oop(addr));
 930       if (size == 0) {
 931         // An unparsable object found. Signal early termination.
 932         return addr;
 933       }
 934     }
 935   }
 936   return NULL;
 937 }
 938 
 939 // Callers of this iterator beware: The closure application should
 940 // be robust in the face of uninitialized objects and should (always)
 941 // return a correct size so that the next addr + size below gives us a
 942 // valid block boundary. [See for instance,
 943 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 944 // in ConcurrentMarkSweepGeneration.cpp.]
 945 HeapWord*
 946 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 947   ObjectClosureCareful* cl) {
 948   assert_lock_strong(freelistLock());
 949   // Can't use used_region() below because it may not necessarily
 950   // be the same as [bottom(),end()); although we could
 951   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 952   // that appears too cumbersome, so we just do the simpler check
 953   // in the assertion below.
 954   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 955          "mr should be non-empty and within used space");
 956   HeapWord *addr, *end;
 957   size_t size;
 958   for (addr = block_start_careful(mr.start()), end  = mr.end();
 959        addr < end; addr += size) {
 960     FreeChunk* fc = (FreeChunk*)addr;
 961     if (fc->is_free()) {
 962       // Since we hold the free list lock, which protects direct
 963       // allocation in this generation by mutators, a free object
 964       // will remain free throughout this iteration code.
 965       size = fc->size();
 966     } else {
 967       // Note that the object need not necessarily be initialized,
 968       // because (for instance) the free list lock does NOT protect
 969       // object initialization. The closure application below must
 970       // therefore be correct in the face of uninitialized objects.
 971       size = cl->do_object_careful_m(oop(addr), mr);
 972       if (size == 0) {
 973         // An unparsable object found. Signal early termination.
 974         return addr;
 975       }
 976     }
 977   }
 978   return NULL;
 979 }
 980 
 981 
 982 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 983   NOT_PRODUCT(verify_objects_initialized());
 984   return _bt.block_start(p);
 985 }
 986 
 987 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 988   return _bt.block_start_careful(p);
 989 }
 990 
 991 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 992   NOT_PRODUCT(verify_objects_initialized());
 993   // This must be volatile, or else there is a danger that the compiler
 994   // will compile the code below into a sometimes-infinite loop, by keeping
 995   // the value read the first time in a register.
 996   while (true) {
 997     // We must do this until we get a consistent view of the object.
 998     if (FreeChunk::indicatesFreeChunk(p)) {
 999       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1000       size_t res = fc->size();
1001       // If the object is still a free chunk, return the size, else it
1002       // has been allocated so try again.
1003       if (FreeChunk::indicatesFreeChunk(p)) {
1004         assert(res != 0, "Block size should not be 0");
1005         return res;
1006       }
1007     } else {
1008       // must read from what 'p' points to in each loop.
1009       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1010       if (k != NULL) {
1011         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
1012         oop o = (oop)p;
1013         assert(o->is_parsable(), "Should be parsable");
1014         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1015         size_t res = o->size_given_klass(k->klass_part());
1016         res = adjustObjectSize(res);
1017         assert(res != 0, "Block size should not be 0");
1018         return res;
1019       }
1020     }
1021   }
1022 }
1023 
1024 // A variant of the above that uses the Printezis bits for
1025 // unparsable but allocated objects. This avoids any possible
1026 // stalls waiting for mutators to initialize objects, and is
1027 // thus potentially faster than the variant above. However,
1028 // this variant may return a zero size for a block that is
1029 // under mutation and for which a consistent size cannot be
1030 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1031 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1032                                                      const CMSCollector* c)
1033 const {
1034   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1035   // This must be volatile, or else there is a danger that the compiler
1036   // will compile the code below into a sometimes-infinite loop, by keeping
1037   // the value read the first time in a register.
1038   DEBUG_ONLY(uint loops = 0;)
1039   while (true) {
1040     // We must do this until we get a consistent view of the object.
1041     if (FreeChunk::indicatesFreeChunk(p)) {
1042       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1043       size_t res = fc->size();
1044       if (FreeChunk::indicatesFreeChunk(p)) {
1045         assert(res != 0, "Block size should not be 0");
1046         assert(loops == 0, "Should be 0");
1047         return res;
1048       }
1049     } else {
1050       // must read from what 'p' points to in each loop.
1051       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1052       // We trust the size of any object that has a non-NULL
1053       // klass and (for those in the perm gen) is parsable
1054       // -- irrespective of its conc_safe-ty.
1055       if (k != NULL && ((oopDesc*)p)->is_parsable()) {
1056         assert(k->is_oop(), "Should really be klass oop.");
1057         oop o = (oop)p;
1058         assert(o->is_oop(), "Should be an oop");
1059         size_t res = o->size_given_klass(k->klass_part());
1060         res = adjustObjectSize(res);
1061         assert(res != 0, "Block size should not be 0");
1062         return res;
1063       } else {
1064         // May return 0 if P-bits not present.
1065         return c->block_size_if_printezis_bits(p);
1066       }
1067     }
1068     assert(loops == 0, "Can loop at most once");
1069     DEBUG_ONLY(loops++;)
1070   }
1071 }
1072 
1073 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1074   NOT_PRODUCT(verify_objects_initialized());
1075   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1076   FreeChunk* fc = (FreeChunk*)p;
1077   if (fc->is_free()) {
1078     return fc->size();
1079   } else {
1080     // Ignore mark word because this may be a recently promoted
1081     // object whose mark word is used to chain together grey
1082     // objects (the last one would have a null value).
1083     assert(oop(p)->is_oop(true), "Should be an oop");
1084     return adjustObjectSize(oop(p)->size());
1085   }
1086 }
1087 
1088 // This implementation assumes that the property of "being an object" is
1089 // stable.  But being a free chunk may not be (because of parallel
1090 // promotion.)
1091 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1092   FreeChunk* fc = (FreeChunk*)p;
1093   assert(is_in_reserved(p), "Should be in space");
1094   // When doing a mark-sweep-compact of the CMS generation, this
1095   // assertion may fail because prepare_for_compaction() uses
1096   // space that is garbage to maintain information on ranges of
1097   // live objects so that these live ranges can be moved as a whole.
1098   // Comment out this assertion until that problem can be solved
1099   // (i.e., that the block start calculation may look at objects
1100   // at address below "p" in finding the object that contains "p"
1101   // and those objects (if garbage) may have been modified to hold
1102   // live range information.
1103   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1104   //        "Should be a block boundary");
1105   if (FreeChunk::indicatesFreeChunk(p)) return false;
1106   klassOop k = oop(p)->klass_or_null();
1107   if (k != NULL) {
1108     // Ignore mark word because it may have been used to
1109     // chain together promoted objects (the last one
1110     // would have a null value).
1111     assert(oop(p)->is_oop(true), "Should be an oop");
1112     return true;
1113   } else {
1114     return false;  // Was not an object at the start of collection.
1115   }
1116 }
1117 
1118 // Check if the object is alive. This fact is checked either by consulting
1119 // the main marking bitmap in the sweeping phase or, if it's a permanent
1120 // generation and we're not in the sweeping phase, by checking the
1121 // perm_gen_verify_bit_map where we store the "deadness" information if
1122 // we did not sweep the perm gen in the most recent previous GC cycle.
1123 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1124   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1125          "Else races are possible");
1126   assert(block_is_obj(p), "The address should point to an object");
1127 
1128   // If we're sweeping, we use object liveness information from the main bit map
1129   // for both perm gen and old gen.
1130   // We don't need to lock the bitmap (live_map or dead_map below), because
1131   // EITHER we are in the middle of the sweeping phase, and the
1132   // main marking bit map (live_map below) is locked,
1133   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1134   // is stable, because it's mutated only in the sweeping phase.
1135   // NOTE: This method is also used by jmap where, if class unloading is
1136   // off, the results can return "false" for legitimate perm objects,
1137   // when we are not in the midst of a sweeping phase, which can result
1138   // in jmap not reporting certain perm gen objects. This will be moot
1139   // if/when the perm gen goes away in the future.
1140   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1141     CMSBitMap* live_map = _collector->markBitMap();
1142     return live_map->par_isMarked((HeapWord*) p);
1143   } else {
1144     // If we're not currently sweeping and we haven't swept the perm gen in
1145     // the previous concurrent cycle then we may have dead but unswept objects
1146     // in the perm gen. In this case, we use the "deadness" information
1147     // that we had saved in perm_gen_verify_bit_map at the last sweep.
1148     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1149       if (_collector->verifying()) {
1150         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1151         // Object is marked in the dead_map bitmap at the previous sweep
1152         // when we know that it's dead; if the bitmap is not allocated then
1153         // the object is alive.
1154         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1155                || !dead_map->par_isMarked((HeapWord*) p);
1156       } else {
1157         return false; // We can't say for sure if it's live, so we say that it's dead.
1158       }
1159     }
1160   }
1161   return true;
1162 }
1163 
1164 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1165   FreeChunk* fc = (FreeChunk*)p;
1166   assert(is_in_reserved(p), "Should be in space");
1167   assert(_bt.block_start(p) == p, "Should be a block boundary");
1168   if (!fc->is_free()) {
1169     // Ignore mark word because it may have been used to
1170     // chain together promoted objects (the last one
1171     // would have a null value).
1172     assert(oop(p)->is_oop(true), "Should be an oop");
1173     return true;
1174   }
1175   return false;
1176 }
1177 
1178 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1179 // approximate answer if you don't hold the freelistlock when you call this.
1180 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1181   size_t size = 0;
1182   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1183     debug_only(
1184       // We may be calling here without the lock in which case we
1185       // won't do this modest sanity check.
1186       if (freelistLock()->owned_by_self()) {
1187         size_t total_list_size = 0;
1188         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1189           fc = fc->next()) {
1190           total_list_size += i;
1191         }
1192         assert(total_list_size == i * _indexedFreeList[i].count(),
1193                "Count in list is incorrect");
1194       }
1195     )
1196     size += i * _indexedFreeList[i].count();
1197   }
1198   return size;
1199 }
1200 
1201 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1202   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1203   return allocate(size);
1204 }
1205 
1206 HeapWord*
1207 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1208   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1209 }
1210 
1211 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1212   assert_lock_strong(freelistLock());
1213   HeapWord* res = NULL;
1214   assert(size == adjustObjectSize(size),
1215          "use adjustObjectSize() before calling into allocate()");
1216 
1217   if (_adaptive_freelists) {
1218     res = allocate_adaptive_freelists(size);
1219   } else {  // non-adaptive free lists
1220     res = allocate_non_adaptive_freelists(size);
1221   }
1222 
1223   if (res != NULL) {
1224     // check that res does lie in this space!
1225     assert(is_in_reserved(res), "Not in this space!");
1226     assert(is_aligned((void*)res), "alignment check");
1227 
1228     FreeChunk* fc = (FreeChunk*)res;
1229     fc->markNotFree();
1230     assert(!fc->is_free(), "shouldn't be marked free");
1231     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1232     // Verify that the block offset table shows this to
1233     // be a single block, but not one which is unallocated.
1234     _bt.verify_single_block(res, size);
1235     _bt.verify_not_unallocated(res, size);
1236     // mangle a just allocated object with a distinct pattern.
1237     debug_only(fc->mangleAllocated(size));
1238   }
1239 
1240   return res;
1241 }
1242 
1243 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1244   HeapWord* res = NULL;
1245   // try and use linear allocation for smaller blocks
1246   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1247     // if successful, the following also adjusts block offset table
1248     res = getChunkFromSmallLinearAllocBlock(size);
1249   }
1250   // Else triage to indexed lists for smaller sizes
1251   if (res == NULL) {
1252     if (size < SmallForDictionary) {
1253       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1254     } else {
1255       // else get it from the big dictionary; if even this doesn't
1256       // work we are out of luck.
1257       res = (HeapWord*)getChunkFromDictionaryExact(size);
1258     }
1259   }
1260 
1261   return res;
1262 }
1263 
1264 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1265   assert_lock_strong(freelistLock());
1266   HeapWord* res = NULL;
1267   assert(size == adjustObjectSize(size),
1268          "use adjustObjectSize() before calling into allocate()");
1269 
1270   // Strategy
1271   //   if small
1272   //     exact size from small object indexed list if small
1273   //     small or large linear allocation block (linAB) as appropriate
1274   //     take from lists of greater sized chunks
1275   //   else
1276   //     dictionary
1277   //     small or large linear allocation block if it has the space
1278   // Try allocating exact size from indexTable first
1279   if (size < IndexSetSize) {
1280     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1281     if(res != NULL) {
1282       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1283         "Not removed from free list");
1284       // no block offset table adjustment is necessary on blocks in
1285       // the indexed lists.
1286 
1287     // Try allocating from the small LinAB
1288     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1289         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1290         // if successful, the above also adjusts block offset table
1291         // Note that this call will refill the LinAB to
1292         // satisfy the request.  This is different that
1293         // evm.
1294         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1295     } else {
1296       // Raid the exact free lists larger than size, even if they are not
1297       // overpopulated.
1298       res = (HeapWord*) getChunkFromGreater(size);
1299     }
1300   } else {
1301     // Big objects get allocated directly from the dictionary.
1302     res = (HeapWord*) getChunkFromDictionaryExact(size);
1303     if (res == NULL) {
1304       // Try hard not to fail since an allocation failure will likely
1305       // trigger a synchronous GC.  Try to get the space from the
1306       // allocation blocks.
1307       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1308     }
1309   }
1310 
1311   return res;
1312 }
1313 
1314 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1315 // when promoting obj.
1316 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1317   // Depending on the object size, expansion may require refilling either a
1318   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1319   // is added because the dictionary may over-allocate to avoid fragmentation.
1320   size_t space = obj_size;
1321   if (!_adaptive_freelists) {
1322     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1323   }
1324   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1325   return space;
1326 }
1327 
1328 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1329   FreeChunk* ret;
1330 
1331   assert(numWords >= MinChunkSize, "Size is less than minimum");
1332   assert(linearAllocationWouldFail() || bestFitFirst(),
1333     "Should not be here");
1334 
1335   size_t i;
1336   size_t currSize = numWords + MinChunkSize;
1337   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1338   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1339     FreeList<FreeChunk>* fl = &_indexedFreeList[i];
1340     if (fl->head()) {
1341       ret = getFromListGreater(fl, numWords);
1342       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1343       return ret;
1344     }
1345   }
1346 
1347   currSize = MAX2((size_t)SmallForDictionary,
1348                   (size_t)(numWords + MinChunkSize));
1349 
1350   /* Try to get a chunk that satisfies request, while avoiding
1351      fragmentation that can't be handled. */
1352   {
1353     ret =  dictionary()->get_chunk(currSize);
1354     if (ret != NULL) {
1355       assert(ret->size() - numWords >= MinChunkSize,
1356              "Chunk is too small");
1357       _bt.allocated((HeapWord*)ret, ret->size());
1358       /* Carve returned chunk. */
1359       (void) splitChunkAndReturnRemainder(ret, numWords);
1360       /* Label this as no longer a free chunk. */
1361       assert(ret->is_free(), "This chunk should be free");
1362       ret->link_prev(NULL);
1363     }
1364     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1365     return ret;
1366   }
1367   ShouldNotReachHere();
1368 }
1369 
1370 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1371   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1372   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1373 }
1374 
1375 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1376   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1377          (_smallLinearAllocBlock._word_size == fc->size()),
1378          "Linear allocation block shows incorrect size");
1379   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1380           (_smallLinearAllocBlock._word_size == fc->size()));
1381 }
1382 
1383 // Check if the purported free chunk is present either as a linear
1384 // allocation block, the size-indexed table of (smaller) free blocks,
1385 // or the larger free blocks kept in the binary tree dictionary.
1386 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1387   if (verify_chunk_is_linear_alloc_block(fc)) {
1388     return true;
1389   } else if (fc->size() < IndexSetSize) {
1390     return verifyChunkInIndexedFreeLists(fc);
1391   } else {
1392     return dictionary()->verify_chunk_in_free_list(fc);
1393   }
1394 }
1395 
1396 #ifndef PRODUCT
1397 void CompactibleFreeListSpace::assert_locked() const {
1398   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1399 }
1400 
1401 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1402   CMSLockVerifier::assert_locked(lock);
1403 }
1404 #endif
1405 
1406 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1407   // In the parallel case, the main thread holds the free list lock
1408   // on behalf the parallel threads.
1409   FreeChunk* fc;
1410   {
1411     // If GC is parallel, this might be called by several threads.
1412     // This should be rare enough that the locking overhead won't affect
1413     // the sequential code.
1414     MutexLockerEx x(parDictionaryAllocLock(),
1415                     Mutex::_no_safepoint_check_flag);
1416     fc = getChunkFromDictionary(size);
1417   }
1418   if (fc != NULL) {
1419     fc->dontCoalesce();
1420     assert(fc->is_free(), "Should be free, but not coalescable");
1421     // Verify that the block offset table shows this to
1422     // be a single block, but not one which is unallocated.
1423     _bt.verify_single_block((HeapWord*)fc, fc->size());
1424     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1425   }
1426   return fc;
1427 }
1428 
1429 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1430   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1431   assert_locked();
1432 
1433   // if we are tracking promotions, then first ensure space for
1434   // promotion (including spooling space for saving header if necessary).
1435   // then allocate and copy, then track promoted info if needed.
1436   // When tracking (see PromotionInfo::track()), the mark word may
1437   // be displaced and in this case restoration of the mark word
1438   // occurs in the (oop_since_save_marks_)iterate phase.
1439   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1440     return NULL;
1441   }
1442   // Call the allocate(size_t, bool) form directly to avoid the
1443   // additional call through the allocate(size_t) form.  Having
1444   // the compile inline the call is problematic because allocate(size_t)
1445   // is a virtual method.
1446   HeapWord* res = allocate(adjustObjectSize(obj_size));
1447   if (res != NULL) {
1448     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1449     // if we should be tracking promotions, do so.
1450     if (_promoInfo.tracking()) {
1451         _promoInfo.track((PromotedObject*)res);
1452     }
1453   }
1454   return oop(res);
1455 }
1456 
1457 HeapWord*
1458 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1459   assert_locked();
1460   assert(size >= MinChunkSize, "minimum chunk size");
1461   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1462     "maximum from smallLinearAllocBlock");
1463   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1464 }
1465 
1466 HeapWord*
1467 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1468                                                        size_t size) {
1469   assert_locked();
1470   assert(size >= MinChunkSize, "too small");
1471   HeapWord* res = NULL;
1472   // Try to do linear allocation from blk, making sure that
1473   if (blk->_word_size == 0) {
1474     // We have probably been unable to fill this either in the prologue or
1475     // when it was exhausted at the last linear allocation. Bail out until
1476     // next time.
1477     assert(blk->_ptr == NULL, "consistency check");
1478     return NULL;
1479   }
1480   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1481   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1482   if (res != NULL) return res;
1483 
1484   // about to exhaust this linear allocation block
1485   if (blk->_word_size == size) { // exactly satisfied
1486     res = blk->_ptr;
1487     _bt.allocated(res, blk->_word_size);
1488   } else if (size + MinChunkSize <= blk->_refillSize) {
1489     size_t sz = blk->_word_size;
1490     // Update _unallocated_block if the size is such that chunk would be
1491     // returned to the indexed free list.  All other chunks in the indexed
1492     // free lists are allocated from the dictionary so that _unallocated_block
1493     // has already been adjusted for them.  Do it here so that the cost
1494     // for all chunks added back to the indexed free lists.
1495     if (sz < SmallForDictionary) {
1496       _bt.allocated(blk->_ptr, sz);
1497     }
1498     // Return the chunk that isn't big enough, and then refill below.
1499     addChunkToFreeLists(blk->_ptr, sz);
1500     split_birth(sz);
1501     // Don't keep statistics on adding back chunk from a LinAB.
1502   } else {
1503     // A refilled block would not satisfy the request.
1504     return NULL;
1505   }
1506 
1507   blk->_ptr = NULL; blk->_word_size = 0;
1508   refillLinearAllocBlock(blk);
1509   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1510          "block was replenished");
1511   if (res != NULL) {
1512     split_birth(size);
1513     repairLinearAllocBlock(blk);
1514   } else if (blk->_ptr != NULL) {
1515     res = blk->_ptr;
1516     size_t blk_size = blk->_word_size;
1517     blk->_word_size -= size;
1518     blk->_ptr  += size;
1519     split_birth(size);
1520     repairLinearAllocBlock(blk);
1521     // Update BOT last so that other (parallel) GC threads see a consistent
1522     // view of the BOT and free blocks.
1523     // Above must occur before BOT is updated below.
1524     OrderAccess::storestore();
1525     _bt.split_block(res, blk_size, size);  // adjust block offset table
1526   }
1527   return res;
1528 }
1529 
1530 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1531                                         LinearAllocBlock* blk,
1532                                         size_t size) {
1533   assert_locked();
1534   assert(size >= MinChunkSize, "too small");
1535 
1536   HeapWord* res = NULL;
1537   // This is the common case.  Keep it simple.
1538   if (blk->_word_size >= size + MinChunkSize) {
1539     assert(blk->_ptr != NULL, "consistency check");
1540     res = blk->_ptr;
1541     // Note that the BOT is up-to-date for the linAB before allocation.  It
1542     // indicates the start of the linAB.  The split_block() updates the
1543     // BOT for the linAB after the allocation (indicates the start of the
1544     // next chunk to be allocated).
1545     size_t blk_size = blk->_word_size;
1546     blk->_word_size -= size;
1547     blk->_ptr  += size;
1548     split_birth(size);
1549     repairLinearAllocBlock(blk);
1550     // Update BOT last so that other (parallel) GC threads see a consistent
1551     // view of the BOT and free blocks.
1552     // Above must occur before BOT is updated below.
1553     OrderAccess::storestore();
1554     _bt.split_block(res, blk_size, size);  // adjust block offset table
1555     _bt.allocated(res, size);
1556   }
1557   return res;
1558 }
1559 
1560 FreeChunk*
1561 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1562   assert_locked();
1563   assert(size < SmallForDictionary, "just checking");
1564   FreeChunk* res;
1565   res = _indexedFreeList[size].get_chunk_at_head();
1566   if (res == NULL) {
1567     res = getChunkFromIndexedFreeListHelper(size);
1568   }
1569   _bt.verify_not_unallocated((HeapWord*) res, size);
1570   assert(res == NULL || res->size() == size, "Incorrect block size");
1571   return res;
1572 }
1573 
1574 FreeChunk*
1575 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1576   bool replenish) {
1577   assert_locked();
1578   FreeChunk* fc = NULL;
1579   if (size < SmallForDictionary) {
1580     assert(_indexedFreeList[size].head() == NULL ||
1581       _indexedFreeList[size].surplus() <= 0,
1582       "List for this size should be empty or under populated");
1583     // Try best fit in exact lists before replenishing the list
1584     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1585       // Replenish list.
1586       //
1587       // Things tried that failed.
1588       //   Tried allocating out of the two LinAB's first before
1589       // replenishing lists.
1590       //   Tried small linAB of size 256 (size in indexed list)
1591       // and replenishing indexed lists from the small linAB.
1592       //
1593       FreeChunk* newFc = NULL;
1594       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1595       if (replenish_size < SmallForDictionary) {
1596         // Do not replenish from an underpopulated size.
1597         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1598             _indexedFreeList[replenish_size].head() != NULL) {
1599           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1600         } else if (bestFitFirst()) {
1601           newFc = bestFitSmall(replenish_size);
1602         }
1603       }
1604       if (newFc == NULL && replenish_size > size) {
1605         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1606         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1607       }
1608       // Note: The stats update re split-death of block obtained above
1609       // will be recorded below precisely when we know we are going to
1610       // be actually splitting it into more than one pieces below.
1611       if (newFc != NULL) {
1612         if  (replenish || CMSReplenishIntermediate) {
1613           // Replenish this list and return one block to caller.
1614           size_t i;
1615           FreeChunk *curFc, *nextFc;
1616           size_t num_blk = newFc->size() / size;
1617           assert(num_blk >= 1, "Smaller than requested?");
1618           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1619           if (num_blk > 1) {
1620             // we are sure we will be splitting the block just obtained
1621             // into multiple pieces; record the split-death of the original
1622             splitDeath(replenish_size);
1623           }
1624           // carve up and link blocks 0, ..., num_blk - 2
1625           // The last chunk is not added to the lists but is returned as the
1626           // free chunk.
1627           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1628                i = 0;
1629                i < (num_blk - 1);
1630                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1631                i++) {
1632             curFc->set_size(size);
1633             // Don't record this as a return in order to try and
1634             // determine the "returns" from a GC.
1635             _bt.verify_not_unallocated((HeapWord*) fc, size);
1636             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1637             _bt.mark_block((HeapWord*)curFc, size);
1638             split_birth(size);
1639             // Don't record the initial population of the indexed list
1640             // as a split birth.
1641           }
1642 
1643           // check that the arithmetic was OK above
1644           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1645             "inconsistency in carving newFc");
1646           curFc->set_size(size);
1647           _bt.mark_block((HeapWord*)curFc, size);
1648           split_birth(size);
1649           fc = curFc;
1650         } else {
1651           // Return entire block to caller
1652           fc = newFc;
1653         }
1654       }
1655     }
1656   } else {
1657     // Get a free chunk from the free chunk dictionary to be returned to
1658     // replenish the indexed free list.
1659     fc = getChunkFromDictionaryExact(size);
1660   }
1661   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1662   return fc;
1663 }
1664 
1665 FreeChunk*
1666 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1667   assert_locked();
1668   FreeChunk* fc = _dictionary->get_chunk(size);
1669   if (fc == NULL) {
1670     return NULL;
1671   }
1672   _bt.allocated((HeapWord*)fc, fc->size());
1673   if (fc->size() >= size + MinChunkSize) {
1674     fc = splitChunkAndReturnRemainder(fc, size);
1675   }
1676   assert(fc->size() >= size, "chunk too small");
1677   assert(fc->size() < size + MinChunkSize, "chunk too big");
1678   _bt.verify_single_block((HeapWord*)fc, fc->size());
1679   return fc;
1680 }
1681 
1682 FreeChunk*
1683 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1684   assert_locked();
1685   FreeChunk* fc = _dictionary->get_chunk(size);
1686   if (fc == NULL) {
1687     return fc;
1688   }
1689   _bt.allocated((HeapWord*)fc, fc->size());
1690   if (fc->size() == size) {
1691     _bt.verify_single_block((HeapWord*)fc, size);
1692     return fc;
1693   }
1694   assert(fc->size() > size, "get_chunk() guarantee");
1695   if (fc->size() < size + MinChunkSize) {
1696     // Return the chunk to the dictionary and go get a bigger one.
1697     returnChunkToDictionary(fc);
1698     fc = _dictionary->get_chunk(size + MinChunkSize);
1699     if (fc == NULL) {
1700       return NULL;
1701     }
1702     _bt.allocated((HeapWord*)fc, fc->size());
1703   }
1704   assert(fc->size() >= size + MinChunkSize, "tautology");
1705   fc = splitChunkAndReturnRemainder(fc, size);
1706   assert(fc->size() == size, "chunk is wrong size");
1707   _bt.verify_single_block((HeapWord*)fc, size);
1708   return fc;
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1713   assert_locked();
1714 
1715   size_t size = chunk->size();
1716   _bt.verify_single_block((HeapWord*)chunk, size);
1717   // adjust _unallocated_block downward, as necessary
1718   _bt.freed((HeapWord*)chunk, size);
1719   _dictionary->return_chunk(chunk);
1720 #ifndef PRODUCT
1721   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1722     TreeChunk<FreeChunk>::as_TreeChunk(chunk)->list()->verify_stats();
1723   }
1724 #endif // PRODUCT
1725 }
1726 
1727 void
1728 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1729   assert_locked();
1730   size_t size = fc->size();
1731   _bt.verify_single_block((HeapWord*) fc, size);
1732   _bt.verify_not_unallocated((HeapWord*) fc, size);
1733   if (_adaptive_freelists) {
1734     _indexedFreeList[size].return_chunk_at_tail(fc);
1735   } else {
1736     _indexedFreeList[size].return_chunk_at_head(fc);
1737   }
1738 #ifndef PRODUCT
1739   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1740      _indexedFreeList[size].verify_stats();
1741   }
1742 #endif // PRODUCT
1743 }
1744 
1745 // Add chunk to end of last block -- if it's the largest
1746 // block -- and update BOT and census data. We would
1747 // of course have preferred to coalesce it with the
1748 // last block, but it's currently less expensive to find the
1749 // largest block than it is to find the last.
1750 void
1751 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1752   HeapWord* chunk, size_t     size) {
1753   // check that the chunk does lie in this space!
1754   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1755   // One of the parallel gc task threads may be here
1756   // whilst others are allocating.
1757   Mutex* lock = NULL;
1758   if (ParallelGCThreads != 0) {
1759     lock = &_parDictionaryAllocLock;
1760   }
1761   FreeChunk* ec;
1762   {
1763     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1764     ec = dictionary()->find_largest_dict();  // get largest block
1765     if (ec != NULL && ec->end() == chunk) {
1766       // It's a coterminal block - we can coalesce.
1767       size_t old_size = ec->size();
1768       coalDeath(old_size);
1769       removeChunkFromDictionary(ec);
1770       size += old_size;
1771     } else {
1772       ec = (FreeChunk*)chunk;
1773     }
1774   }
1775   ec->set_size(size);
1776   debug_only(ec->mangleFreed(size));
1777   if (size < SmallForDictionary) {
1778     lock = _indexedFreeListParLocks[size];
1779   }
1780   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1781   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1782   // record the birth under the lock since the recording involves
1783   // manipulation of the list on which the chunk lives and
1784   // if the chunk is allocated and is the last on the list,
1785   // the list can go away.
1786   coalBirth(size);
1787 }
1788 
1789 void
1790 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1791                                               size_t     size) {
1792   // check that the chunk does lie in this space!
1793   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1794   assert_locked();
1795   _bt.verify_single_block(chunk, size);
1796 
1797   FreeChunk* fc = (FreeChunk*) chunk;
1798   fc->set_size(size);
1799   debug_only(fc->mangleFreed(size));
1800   if (size < SmallForDictionary) {
1801     returnChunkToFreeList(fc);
1802   } else {
1803     returnChunkToDictionary(fc);
1804   }
1805 }
1806 
1807 void
1808 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1809   size_t size, bool coalesced) {
1810   assert_locked();
1811   assert(chunk != NULL, "null chunk");
1812   if (coalesced) {
1813     // repair BOT
1814     _bt.single_block(chunk, size);
1815   }
1816   addChunkToFreeLists(chunk, size);
1817 }
1818 
1819 // We _must_ find the purported chunk on our free lists;
1820 // we assert if we don't.
1821 void
1822 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1823   size_t size = fc->size();
1824   assert_locked();
1825   debug_only(verifyFreeLists());
1826   if (size < SmallForDictionary) {
1827     removeChunkFromIndexedFreeList(fc);
1828   } else {
1829     removeChunkFromDictionary(fc);
1830   }
1831   _bt.verify_single_block((HeapWord*)fc, size);
1832   debug_only(verifyFreeLists());
1833 }
1834 
1835 void
1836 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1837   size_t size = fc->size();
1838   assert_locked();
1839   assert(fc != NULL, "null chunk");
1840   _bt.verify_single_block((HeapWord*)fc, size);
1841   _dictionary->remove_chunk(fc);
1842   // adjust _unallocated_block upward, as necessary
1843   _bt.allocated((HeapWord*)fc, size);
1844 }
1845 
1846 void
1847 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1848   assert_locked();
1849   size_t size = fc->size();
1850   _bt.verify_single_block((HeapWord*)fc, size);
1851   NOT_PRODUCT(
1852     if (FLSVerifyIndexTable) {
1853       verifyIndexedFreeList(size);
1854     }
1855   )
1856   _indexedFreeList[size].remove_chunk(fc);
1857   NOT_PRODUCT(
1858     if (FLSVerifyIndexTable) {
1859       verifyIndexedFreeList(size);
1860     }
1861   )
1862 }
1863 
1864 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1865   /* A hint is the next larger size that has a surplus.
1866      Start search at a size large enough to guarantee that
1867      the excess is >= MIN_CHUNK. */
1868   size_t start = align_object_size(numWords + MinChunkSize);
1869   if (start < IndexSetSize) {
1870     FreeList<FreeChunk>* it   = _indexedFreeList;
1871     size_t    hint = _indexedFreeList[start].hint();
1872     while (hint < IndexSetSize) {
1873       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1874       FreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1875       if (fl->surplus() > 0 && fl->head() != NULL) {
1876         // Found a list with surplus, reset original hint
1877         // and split out a free chunk which is returned.
1878         _indexedFreeList[start].set_hint(hint);
1879         FreeChunk* res = getFromListGreater(fl, numWords);
1880         assert(res == NULL || res->is_free(),
1881           "Should be returning a free chunk");
1882         return res;
1883       }
1884       hint = fl->hint(); /* keep looking */
1885     }
1886     /* None found. */
1887     it[start].set_hint(IndexSetSize);
1888   }
1889   return NULL;
1890 }
1891 
1892 /* Requires fl->size >= numWords + MinChunkSize */
1893 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList<FreeChunk>* fl,
1894   size_t numWords) {
1895   FreeChunk *curr = fl->head();
1896   size_t oldNumWords = curr->size();
1897   assert(numWords >= MinChunkSize, "Word size is too small");
1898   assert(curr != NULL, "List is empty");
1899   assert(oldNumWords >= numWords + MinChunkSize,
1900         "Size of chunks in the list is too small");
1901 
1902   fl->remove_chunk(curr);
1903   // recorded indirectly by splitChunkAndReturnRemainder -
1904   // smallSplit(oldNumWords, numWords);
1905   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1906   // Does anything have to be done for the remainder in terms of
1907   // fixing the card table?
1908   assert(new_chunk == NULL || new_chunk->is_free(),
1909     "Should be returning a free chunk");
1910   return new_chunk;
1911 }
1912 
1913 FreeChunk*
1914 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1915   size_t new_size) {
1916   assert_locked();
1917   size_t size = chunk->size();
1918   assert(size > new_size, "Split from a smaller block?");
1919   assert(is_aligned(chunk), "alignment problem");
1920   assert(size == adjustObjectSize(size), "alignment problem");
1921   size_t rem_size = size - new_size;
1922   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1923   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1924   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1925   assert(is_aligned(ffc), "alignment problem");
1926   ffc->set_size(rem_size);
1927   ffc->link_next(NULL);
1928   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1929   // Above must occur before BOT is updated below.
1930   // adjust block offset table
1931   OrderAccess::storestore();
1932   assert(chunk->is_free() && ffc->is_free(), "Error");
1933   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1934   if (rem_size < SmallForDictionary) {
1935     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1936     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1937     assert(!is_par ||
1938            (SharedHeap::heap()->n_par_threads() ==
1939             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1940     returnChunkToFreeList(ffc);
1941     split(size, rem_size);
1942     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1943   } else {
1944     returnChunkToDictionary(ffc);
1945     split(size ,rem_size);
1946   }
1947   chunk->set_size(new_size);
1948   return chunk;
1949 }
1950 
1951 void
1952 CompactibleFreeListSpace::sweep_completed() {
1953   // Now that space is probably plentiful, refill linear
1954   // allocation blocks as needed.
1955   refillLinearAllocBlocksIfNeeded();
1956 }
1957 
1958 void
1959 CompactibleFreeListSpace::gc_prologue() {
1960   assert_locked();
1961   if (PrintFLSStatistics != 0) {
1962     gclog_or_tty->print("Before GC:\n");
1963     reportFreeListStatistics();
1964   }
1965   refillLinearAllocBlocksIfNeeded();
1966 }
1967 
1968 void
1969 CompactibleFreeListSpace::gc_epilogue() {
1970   assert_locked();
1971   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1972     if (_smallLinearAllocBlock._word_size == 0)
1973       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1974   }
1975   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1976   _promoInfo.stopTrackingPromotions();
1977   repairLinearAllocationBlocks();
1978   // Print Space's stats
1979   if (PrintFLSStatistics != 0) {
1980     gclog_or_tty->print("After GC:\n");
1981     reportFreeListStatistics();
1982   }
1983 }
1984 
1985 // Iteration support, mostly delegated from a CMS generation
1986 
1987 void CompactibleFreeListSpace::save_marks() {
1988   assert(Thread::current()->is_VM_thread(),
1989          "Global variable should only be set when single-threaded");
1990   // Mark the "end" of the used space at the time of this call;
1991   // note, however, that promoted objects from this point
1992   // on are tracked in the _promoInfo below.
1993   set_saved_mark_word(unallocated_block());
1994 #ifdef ASSERT
1995   // Check the sanity of save_marks() etc.
1996   MemRegion ur    = used_region();
1997   MemRegion urasm = used_region_at_save_marks();
1998   assert(ur.contains(urasm),
1999          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2000                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2001                  ur.start(), ur.end(), urasm.start(), urasm.end()));
2002 #endif
2003   // inform allocator that promotions should be tracked.
2004   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2005   _promoInfo.startTrackingPromotions();
2006 }
2007 
2008 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2009   assert(_promoInfo.tracking(), "No preceding save_marks?");
2010   assert(SharedHeap::heap()->n_par_threads() == 0,
2011          "Shouldn't be called if using parallel gc.");
2012   return _promoInfo.noPromotions();
2013 }
2014 
2015 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2016                                                                             \
2017 void CompactibleFreeListSpace::                                             \
2018 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2019   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2020          "Shouldn't be called (yet) during parallel part of gc.");          \
2021   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2022   /*                                                                        \
2023    * This also restores any displaced headers and removes the elements from \
2024    * the iteration set as they are processed, so that we have a clean slate \
2025    * at the end of the iteration. Note, thus, that if new objects are       \
2026    * promoted as a result of the iteration they are iterated over as well.  \
2027    */                                                                       \
2028   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2029 }
2030 
2031 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2032 
2033 
2034 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2035   // ugghh... how would one do this efficiently for a non-contiguous space?
2036   guarantee(false, "NYI");
2037 }
2038 
2039 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2040   return _smallLinearAllocBlock._word_size == 0;
2041 }
2042 
2043 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2044   // Fix up linear allocation blocks to look like free blocks
2045   repairLinearAllocBlock(&_smallLinearAllocBlock);
2046 }
2047 
2048 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2049   assert_locked();
2050   if (blk->_ptr != NULL) {
2051     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2052            "Minimum block size requirement");
2053     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2054     fc->set_size(blk->_word_size);
2055     fc->link_prev(NULL);   // mark as free
2056     fc->dontCoalesce();
2057     assert(fc->is_free(), "just marked it free");
2058     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2059   }
2060 }
2061 
2062 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2063   assert_locked();
2064   if (_smallLinearAllocBlock._ptr == NULL) {
2065     assert(_smallLinearAllocBlock._word_size == 0,
2066       "Size of linAB should be zero if the ptr is NULL");
2067     // Reset the linAB refill and allocation size limit.
2068     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2069   }
2070   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2071 }
2072 
2073 void
2074 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2075   assert_locked();
2076   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2077          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2078          "blk invariant");
2079   if (blk->_ptr == NULL) {
2080     refillLinearAllocBlock(blk);
2081   }
2082   if (PrintMiscellaneous && Verbose) {
2083     if (blk->_word_size == 0) {
2084       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2085     }
2086   }
2087 }
2088 
2089 void
2090 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2091   assert_locked();
2092   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2093          "linear allocation block should be empty");
2094   FreeChunk* fc;
2095   if (blk->_refillSize < SmallForDictionary &&
2096       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2097     // A linAB's strategy might be to use small sizes to reduce
2098     // fragmentation but still get the benefits of allocation from a
2099     // linAB.
2100   } else {
2101     fc = getChunkFromDictionary(blk->_refillSize);
2102   }
2103   if (fc != NULL) {
2104     blk->_ptr  = (HeapWord*)fc;
2105     blk->_word_size = fc->size();
2106     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2107   }
2108 }
2109 
2110 // Support for concurrent collection policy decisions.
2111 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2112   // In the future we might want to add in frgamentation stats --
2113   // including erosion of the "mountain" into this decision as well.
2114   return !adaptive_freelists() && linearAllocationWouldFail();
2115 }
2116 
2117 // Support for compaction
2118 
2119 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2120   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2121   // prepare_for_compaction() uses the space between live objects
2122   // so that later phase can skip dead space quickly.  So verification
2123   // of the free lists doesn't work after.
2124 }
2125 
2126 #define obj_size(q) adjustObjectSize(oop(q)->size())
2127 #define adjust_obj_size(s) adjustObjectSize(s)
2128 
2129 void CompactibleFreeListSpace::adjust_pointers() {
2130   // In other versions of adjust_pointers(), a bail out
2131   // based on the amount of live data in the generation
2132   // (i.e., if 0, bail out) may be used.
2133   // Cannot test used() == 0 here because the free lists have already
2134   // been mangled by the compaction.
2135 
2136   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2137   // See note about verification in prepare_for_compaction().
2138 }
2139 
2140 void CompactibleFreeListSpace::compact() {
2141   SCAN_AND_COMPACT(obj_size);
2142 }
2143 
2144 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2145 // where fbs is free block sizes
2146 double CompactibleFreeListSpace::flsFrag() const {
2147   size_t itabFree = totalSizeInIndexedFreeLists();
2148   double frag = 0.0;
2149   size_t i;
2150 
2151   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2152     double sz  = i;
2153     frag      += _indexedFreeList[i].count() * (sz * sz);
2154   }
2155 
2156   double totFree = itabFree +
2157                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2158   if (totFree > 0) {
2159     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2160             (totFree * totFree));
2161     frag = (double)1.0  - frag;
2162   } else {
2163     assert(frag == 0.0, "Follows from totFree == 0");
2164   }
2165   return frag;
2166 }
2167 
2168 void CompactibleFreeListSpace::beginSweepFLCensus(
2169   float inter_sweep_current,
2170   float inter_sweep_estimate,
2171   float intra_sweep_estimate) {
2172   assert_locked();
2173   size_t i;
2174   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2175     FreeList<FreeChunk>* fl = &_indexedFreeList[i];
2176     if (PrintFLSStatistics > 1) {
2177       gclog_or_tty->print("size[%d] : ", i);
2178     }
2179     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2180     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2181     fl->set_before_sweep(fl->count());
2182     fl->set_bfr_surp(fl->surplus());
2183   }
2184   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2185                                     inter_sweep_current,
2186                                     inter_sweep_estimate,
2187                                     intra_sweep_estimate);
2188 }
2189 
2190 void CompactibleFreeListSpace::setFLSurplus() {
2191   assert_locked();
2192   size_t i;
2193   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2194     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2195     fl->set_surplus(fl->count() -
2196                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2197   }
2198 }
2199 
2200 void CompactibleFreeListSpace::setFLHints() {
2201   assert_locked();
2202   size_t i;
2203   size_t h = IndexSetSize;
2204   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2205     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2206     fl->set_hint(h);
2207     if (fl->surplus() > 0) {
2208       h = i;
2209     }
2210   }
2211 }
2212 
2213 void CompactibleFreeListSpace::clearFLCensus() {
2214   assert_locked();
2215   size_t i;
2216   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2217     FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2218     fl->set_prev_sweep(fl->count());
2219     fl->set_coal_births(0);
2220     fl->set_coal_deaths(0);
2221     fl->set_split_births(0);
2222     fl->set_split_deaths(0);
2223   }
2224 }
2225 
2226 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2227   if (PrintFLSStatistics > 0) {
2228     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2229     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2230                            largestAddr);
2231   }
2232   setFLSurplus();
2233   setFLHints();
2234   if (PrintGC && PrintFLSCensus > 0) {
2235     printFLCensus(sweep_count);
2236   }
2237   clearFLCensus();
2238   assert_locked();
2239   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2240 }
2241 
2242 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2243   if (size < SmallForDictionary) {
2244     FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2245     return (fl->coal_desired() < 0) ||
2246            ((int)fl->count() > fl->coal_desired());
2247   } else {
2248     return dictionary()->coal_dict_over_populated(size);
2249   }
2250 }
2251 
2252 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2253   assert(size < SmallForDictionary, "Size too large for indexed list");
2254   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2255   fl->increment_coal_births();
2256   fl->increment_surplus();
2257 }
2258 
2259 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2260   assert(size < SmallForDictionary, "Size too large for indexed list");
2261   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2262   fl->increment_coal_deaths();
2263   fl->decrement_surplus();
2264 }
2265 
2266 void CompactibleFreeListSpace::coalBirth(size_t size) {
2267   if (size  < SmallForDictionary) {
2268     smallCoalBirth(size);
2269   } else {
2270     dictionary()->dict_census_udpate(size,
2271                                    false /* split */,
2272                                    true /* birth */);
2273   }
2274 }
2275 
2276 void CompactibleFreeListSpace::coalDeath(size_t size) {
2277   if(size  < SmallForDictionary) {
2278     smallCoalDeath(size);
2279   } else {
2280     dictionary()->dict_census_udpate(size,
2281                                    false /* split */,
2282                                    false /* birth */);
2283   }
2284 }
2285 
2286 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2287   assert(size < SmallForDictionary, "Size too large for indexed list");
2288   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2289   fl->increment_split_births();
2290   fl->increment_surplus();
2291 }
2292 
2293 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2294   assert(size < SmallForDictionary, "Size too large for indexed list");
2295   FreeList<FreeChunk> *fl = &_indexedFreeList[size];
2296   fl->increment_split_deaths();
2297   fl->decrement_surplus();
2298 }
2299 
2300 void CompactibleFreeListSpace::split_birth(size_t size) {
2301   if (size  < SmallForDictionary) {
2302     smallSplitBirth(size);
2303   } else {
2304     dictionary()->dict_census_udpate(size,
2305                                    true /* split */,
2306                                    true /* birth */);
2307   }
2308 }
2309 
2310 void CompactibleFreeListSpace::splitDeath(size_t size) {
2311   if (size  < SmallForDictionary) {
2312     smallSplitDeath(size);
2313   } else {
2314     dictionary()->dict_census_udpate(size,
2315                                    true /* split */,
2316                                    false /* birth */);
2317   }
2318 }
2319 
2320 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2321   size_t to2 = from - to1;
2322   splitDeath(from);
2323   split_birth(to1);
2324   split_birth(to2);
2325 }
2326 
2327 void CompactibleFreeListSpace::print() const {
2328   print_on(tty);
2329 }
2330 
2331 void CompactibleFreeListSpace::prepare_for_verify() {
2332   assert_locked();
2333   repairLinearAllocationBlocks();
2334   // Verify that the SpoolBlocks look like free blocks of
2335   // appropriate sizes... To be done ...
2336 }
2337 
2338 class VerifyAllBlksClosure: public BlkClosure {
2339  private:
2340   const CompactibleFreeListSpace* _sp;
2341   const MemRegion                 _span;
2342   HeapWord*                       _last_addr;
2343   size_t                          _last_size;
2344   bool                            _last_was_obj;
2345   bool                            _last_was_live;
2346 
2347  public:
2348   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2349     MemRegion span) :  _sp(sp), _span(span),
2350                        _last_addr(NULL), _last_size(0),
2351                        _last_was_obj(false), _last_was_live(false) { }
2352 
2353   virtual size_t do_blk(HeapWord* addr) {
2354     size_t res;
2355     bool   was_obj  = false;
2356     bool   was_live = false;
2357     if (_sp->block_is_obj(addr)) {
2358       was_obj = true;
2359       oop p = oop(addr);
2360       guarantee(p->is_oop(), "Should be an oop");
2361       res = _sp->adjustObjectSize(p->size());
2362       if (_sp->obj_is_alive(addr)) {
2363         was_live = true;
2364         p->verify();
2365       }
2366     } else {
2367       FreeChunk* fc = (FreeChunk*)addr;
2368       res = fc->size();
2369       if (FLSVerifyLists && !fc->cantCoalesce()) {
2370         guarantee(_sp->verify_chunk_in_free_list(fc),
2371                   "Chunk should be on a free list");
2372       }
2373     }
2374     if (res == 0) {
2375       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2376       gclog_or_tty->print_cr(
2377         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2378         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2379         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2380         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2381       _sp->print_on(gclog_or_tty);
2382       guarantee(false, "Seppuku!");
2383     }
2384     _last_addr = addr;
2385     _last_size = res;
2386     _last_was_obj  = was_obj;
2387     _last_was_live = was_live;
2388     return res;
2389   }
2390 };
2391 
2392 class VerifyAllOopsClosure: public OopClosure {
2393  private:
2394   const CMSCollector*             _collector;
2395   const CompactibleFreeListSpace* _sp;
2396   const MemRegion                 _span;
2397   const bool                      _past_remark;
2398   const CMSBitMap*                _bit_map;
2399 
2400  protected:
2401   void do_oop(void* p, oop obj) {
2402     if (_span.contains(obj)) { // the interior oop points into CMS heap
2403       if (!_span.contains(p)) { // reference from outside CMS heap
2404         // Should be a valid object; the first disjunct below allows
2405         // us to sidestep an assertion in block_is_obj() that insists
2406         // that p be in _sp. Note that several generations (and spaces)
2407         // are spanned by _span (CMS heap) above.
2408         guarantee(!_sp->is_in_reserved(obj) ||
2409                   _sp->block_is_obj((HeapWord*)obj),
2410                   "Should be an object");
2411         guarantee(obj->is_oop(), "Should be an oop");
2412         obj->verify();
2413         if (_past_remark) {
2414           // Remark has been completed, the object should be marked
2415           _bit_map->isMarked((HeapWord*)obj);
2416         }
2417       } else { // reference within CMS heap
2418         if (_past_remark) {
2419           // Remark has been completed -- so the referent should have
2420           // been marked, if referring object is.
2421           if (_bit_map->isMarked(_collector->block_start(p))) {
2422             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2423           }
2424         }
2425       }
2426     } else if (_sp->is_in_reserved(p)) {
2427       // the reference is from FLS, and points out of FLS
2428       guarantee(obj->is_oop(), "Should be an oop");
2429       obj->verify();
2430     }
2431   }
2432 
2433   template <class T> void do_oop_work(T* p) {
2434     T heap_oop = oopDesc::load_heap_oop(p);
2435     if (!oopDesc::is_null(heap_oop)) {
2436       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2437       do_oop(p, obj);
2438     }
2439   }
2440 
2441  public:
2442   VerifyAllOopsClosure(const CMSCollector* collector,
2443     const CompactibleFreeListSpace* sp, MemRegion span,
2444     bool past_remark, CMSBitMap* bit_map) :
2445     OopClosure(), _collector(collector), _sp(sp), _span(span),
2446     _past_remark(past_remark), _bit_map(bit_map) { }
2447 
2448   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2449   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2450 };
2451 
2452 void CompactibleFreeListSpace::verify() const {
2453   assert_lock_strong(&_freelistLock);
2454   verify_objects_initialized();
2455   MemRegion span = _collector->_span;
2456   bool past_remark = (_collector->abstract_state() ==
2457                       CMSCollector::Sweeping);
2458 
2459   ResourceMark rm;
2460   HandleMark  hm;
2461 
2462   // Check integrity of CFL data structures
2463   _promoInfo.verify();
2464   _dictionary->verify();
2465   if (FLSVerifyIndexTable) {
2466     verifyIndexedFreeLists();
2467   }
2468   // Check integrity of all objects and free blocks in space
2469   {
2470     VerifyAllBlksClosure cl(this, span);
2471     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2472   }
2473   // Check that all references in the heap to FLS
2474   // are to valid objects in FLS or that references in
2475   // FLS are to valid objects elsewhere in the heap
2476   if (FLSVerifyAllHeapReferences)
2477   {
2478     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2479       _collector->markBitMap());
2480     CollectedHeap* ch = Universe::heap();
2481     ch->oop_iterate(&cl);              // all oops in generations
2482     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2483   }
2484 
2485   if (VerifyObjectStartArray) {
2486     // Verify the block offset table
2487     _bt.verify();
2488   }
2489 }
2490 
2491 #ifndef PRODUCT
2492 void CompactibleFreeListSpace::verifyFreeLists() const {
2493   if (FLSVerifyLists) {
2494     _dictionary->verify();
2495     verifyIndexedFreeLists();
2496   } else {
2497     if (FLSVerifyDictionary) {
2498       _dictionary->verify();
2499     }
2500     if (FLSVerifyIndexTable) {
2501       verifyIndexedFreeLists();
2502     }
2503   }
2504 }
2505 #endif
2506 
2507 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2508   size_t i = 0;
2509   for (; i < IndexSetStart; i++) {
2510     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2511   }
2512   for (; i < IndexSetSize; i++) {
2513     verifyIndexedFreeList(i);
2514   }
2515 }
2516 
2517 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2518   FreeChunk* fc   =  _indexedFreeList[size].head();
2519   FreeChunk* tail =  _indexedFreeList[size].tail();
2520   size_t    num = _indexedFreeList[size].count();
2521   size_t      n = 0;
2522   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2523             "Slot should have been empty");
2524   for (; fc != NULL; fc = fc->next(), n++) {
2525     guarantee(fc->size() == size, "Size inconsistency");
2526     guarantee(fc->is_free(), "!free?");
2527     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2528     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2529   }
2530   guarantee(n == num, "Incorrect count");
2531 }
2532 
2533 #ifndef PRODUCT
2534 void CompactibleFreeListSpace::check_free_list_consistency() const {
2535   assert(_dictionary->min_size() <= IndexSetSize,
2536     "Some sizes can't be allocated without recourse to"
2537     " linear allocation buffers");
2538   assert(BinaryTreeDictionary<FreeChunk>::min_tree_chunk_size*HeapWordSize == sizeof(TreeChunk<FreeChunk>),
2539     "else MIN_TREE_CHUNK_SIZE is wrong");
2540   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2541   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2542 }
2543 #endif
2544 
2545 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2546   assert_lock_strong(&_freelistLock);
2547   FreeList<FreeChunk> total;
2548   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2549   FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2550   size_t total_free = 0;
2551   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2552     const FreeList<FreeChunk> *fl = &_indexedFreeList[i];
2553     total_free += fl->count() * fl->size();
2554     if (i % (40*IndexSetStride) == 0) {
2555       FreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2556     }
2557     fl->print_on(gclog_or_tty);
2558     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2559     total.set_surplus(    total.surplus()     + fl->surplus()    );
2560     total.set_desired(    total.desired()     + fl->desired()    );
2561     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2562     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2563     total.set_count(      total.count()       + fl->count()      );
2564     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2565     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2566     total.set_split_births(total.split_births() + fl->split_births());
2567     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2568   }
2569   total.print_on(gclog_or_tty, "TOTAL");
2570   gclog_or_tty->print_cr("Total free in indexed lists "
2571                          SIZE_FORMAT " words", total_free);
2572   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2573     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2574             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2575     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2576   _dictionary->print_dict_census();
2577 }
2578 
2579 ///////////////////////////////////////////////////////////////////////////
2580 // CFLS_LAB
2581 ///////////////////////////////////////////////////////////////////////////
2582 
2583 #define VECTOR_257(x)                                                                                  \
2584   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2585   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2586      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2587      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2588      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2589      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2590      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2591      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2592      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2593      x }
2594 
2595 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2596 // OldPLABSize, whose static default is different; if overridden at the
2597 // command-line, this will get reinitialized via a call to
2598 // modify_initialization() below.
2599 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2600   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2601 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2602 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2603 
2604 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2605   _cfls(cfls)
2606 {
2607   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2608   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2609        i < CompactibleFreeListSpace::IndexSetSize;
2610        i += CompactibleFreeListSpace::IndexSetStride) {
2611     _indexedFreeList[i].set_size(i);
2612     _num_blocks[i] = 0;
2613   }
2614 }
2615 
2616 static bool _CFLS_LAB_modified = false;
2617 
2618 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2619   assert(!_CFLS_LAB_modified, "Call only once");
2620   _CFLS_LAB_modified = true;
2621   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2622        i < CompactibleFreeListSpace::IndexSetSize;
2623        i += CompactibleFreeListSpace::IndexSetStride) {
2624     _blocks_to_claim[i].modify(n, wt, true /* force */);
2625   }
2626 }
2627 
2628 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2629   FreeChunk* res;
2630   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2631   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2632     // This locking manages sync with other large object allocations.
2633     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2634                     Mutex::_no_safepoint_check_flag);
2635     res = _cfls->getChunkFromDictionaryExact(word_sz);
2636     if (res == NULL) return NULL;
2637   } else {
2638     FreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2639     if (fl->count() == 0) {
2640       // Attempt to refill this local free list.
2641       get_from_global_pool(word_sz, fl);
2642       // If it didn't work, give up.
2643       if (fl->count() == 0) return NULL;
2644     }
2645     res = fl->get_chunk_at_head();
2646     assert(res != NULL, "Why was count non-zero?");
2647   }
2648   res->markNotFree();
2649   assert(!res->is_free(), "shouldn't be marked free");
2650   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2651   // mangle a just allocated object with a distinct pattern.
2652   debug_only(res->mangleAllocated(word_sz));
2653   return (HeapWord*)res;
2654 }
2655 
2656 // Get a chunk of blocks of the right size and update related
2657 // book-keeping stats
2658 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList<FreeChunk>* fl) {
2659   // Get the #blocks we want to claim
2660   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2661   assert(n_blks > 0, "Error");
2662   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2663   // In some cases, when the application has a phase change,
2664   // there may be a sudden and sharp shift in the object survival
2665   // profile, and updating the counts at the end of a scavenge
2666   // may not be quick enough, giving rise to large scavenge pauses
2667   // during these phase changes. It is beneficial to detect such
2668   // changes on-the-fly during a scavenge and avoid such a phase-change
2669   // pothole. The following code is a heuristic attempt to do that.
2670   // It is protected by a product flag until we have gained
2671   // enough experience with this heuristic and fine-tuned its behaviour.
2672   // WARNING: This might increase fragmentation if we overreact to
2673   // small spikes, so some kind of historical smoothing based on
2674   // previous experience with the greater reactivity might be useful.
2675   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2676   // default.
2677   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2678     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2679     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2680     n_blks = MIN2(n_blks, CMSOldPLABMax);
2681   }
2682   assert(n_blks > 0, "Error");
2683   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2684   // Update stats table entry for this block size
2685   _num_blocks[word_sz] += fl->count();
2686 }
2687 
2688 void CFLS_LAB::compute_desired_plab_size() {
2689   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2690        i < CompactibleFreeListSpace::IndexSetSize;
2691        i += CompactibleFreeListSpace::IndexSetStride) {
2692     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2693            "Counter inconsistency");
2694     if (_global_num_workers[i] > 0) {
2695       // Need to smooth wrt historical average
2696       if (ResizeOldPLAB) {
2697         _blocks_to_claim[i].sample(
2698           MAX2((size_t)CMSOldPLABMin,
2699           MIN2((size_t)CMSOldPLABMax,
2700                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2701       }
2702       // Reset counters for next round
2703       _global_num_workers[i] = 0;
2704       _global_num_blocks[i] = 0;
2705       if (PrintOldPLAB) {
2706         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2707       }
2708     }
2709   }
2710 }
2711 
2712 // If this is changed in the future to allow parallel
2713 // access, one would need to take the FL locks and,
2714 // depending on how it is used, stagger access from
2715 // parallel threads to reduce contention.
2716 void CFLS_LAB::retire(int tid) {
2717   // We run this single threaded with the world stopped;
2718   // so no need for locks and such.
2719   NOT_PRODUCT(Thread* t = Thread::current();)
2720   assert(Thread::current()->is_VM_thread(), "Error");
2721   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2722        i < CompactibleFreeListSpace::IndexSetSize;
2723        i += CompactibleFreeListSpace::IndexSetStride) {
2724     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2725            "Can't retire more than what we obtained");
2726     if (_num_blocks[i] > 0) {
2727       size_t num_retire =  _indexedFreeList[i].count();
2728       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2729       {
2730         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2731         //                Mutex::_no_safepoint_check_flag);
2732 
2733         // Update globals stats for num_blocks used
2734         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2735         _global_num_workers[i]++;
2736         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2737         if (num_retire > 0) {
2738           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2739           // Reset this list.
2740           _indexedFreeList[i] = FreeList<FreeChunk>();
2741           _indexedFreeList[i].set_size(i);
2742         }
2743       }
2744       if (PrintOldPLAB) {
2745         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2746                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2747       }
2748       // Reset stats for next round
2749       _num_blocks[i]         = 0;
2750     }
2751   }
2752 }
2753 
2754 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList<FreeChunk>* fl) {
2755   assert(fl->count() == 0, "Precondition.");
2756   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2757          "Precondition");
2758 
2759   // We'll try all multiples of word_sz in the indexed set, starting with
2760   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2761   // then try getting a big chunk and splitting it.
2762   {
2763     bool found;
2764     int  k;
2765     size_t cur_sz;
2766     for (k = 1, cur_sz = k * word_sz, found = false;
2767          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2768          (CMSSplitIndexedFreeListBlocks || k <= 1);
2769          k++, cur_sz = k * word_sz) {
2770       FreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2771       fl_for_cur_sz.set_size(cur_sz);
2772       {
2773         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2774                         Mutex::_no_safepoint_check_flag);
2775         FreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2776         if (gfl->count() != 0) {
2777           // nn is the number of chunks of size cur_sz that
2778           // we'd need to split k-ways each, in order to create
2779           // "n" chunks of size word_sz each.
2780           const size_t nn = MAX2(n/k, (size_t)1);
2781           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2782           found = true;
2783           if (k > 1) {
2784             // Update split death stats for the cur_sz-size blocks list:
2785             // we increment the split death count by the number of blocks
2786             // we just took from the cur_sz-size blocks list and which
2787             // we will be splitting below.
2788             ssize_t deaths = gfl->split_deaths() +
2789                              fl_for_cur_sz.count();
2790             gfl->set_split_deaths(deaths);
2791           }
2792         }
2793       }
2794       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2795       if (found) {
2796         if (k == 1) {
2797           fl->prepend(&fl_for_cur_sz);
2798         } else {
2799           // Divide each block on fl_for_cur_sz up k ways.
2800           FreeChunk* fc;
2801           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2802             // Must do this in reverse order, so that anybody attempting to
2803             // access the main chunk sees it as a single free block until we
2804             // change it.
2805             size_t fc_size = fc->size();
2806             assert(fc->is_free(), "Error");
2807             for (int i = k-1; i >= 0; i--) {
2808               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2809               assert((i != 0) ||
2810                         ((fc == ffc) && ffc->is_free() &&
2811                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2812                         "Counting error");
2813               ffc->set_size(word_sz);
2814               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2815               ffc->link_next(NULL);
2816               // Above must occur before BOT is updated below.
2817               OrderAccess::storestore();
2818               // splitting from the right, fc_size == i * word_sz
2819               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2820               fc_size -= word_sz;
2821               assert(fc_size == i*word_sz, "Error");
2822               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2823               _bt.verify_single_block((HeapWord*)fc, fc_size);
2824               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2825               // Push this on "fl".
2826               fl->return_chunk_at_head(ffc);
2827             }
2828             // TRAP
2829             assert(fl->tail()->next() == NULL, "List invariant.");
2830           }
2831         }
2832         // Update birth stats for this block size.
2833         size_t num = fl->count();
2834         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2835                         Mutex::_no_safepoint_check_flag);
2836         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2837         _indexedFreeList[word_sz].set_split_births(births);
2838         return;
2839       }
2840     }
2841   }
2842   // Otherwise, we'll split a block from the dictionary.
2843   FreeChunk* fc = NULL;
2844   FreeChunk* rem_fc = NULL;
2845   size_t rem;
2846   {
2847     MutexLockerEx x(parDictionaryAllocLock(),
2848                     Mutex::_no_safepoint_check_flag);
2849     while (n > 0) {
2850       fc = dictionary()->get_chunk(MAX2(n * word_sz,
2851                                   _dictionary->min_size()),
2852                                   FreeBlockDictionary<FreeChunk>::atLeast);
2853       if (fc != NULL) {
2854         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2855         dictionary()->dict_census_udpate(fc->size(),
2856                                        true /*split*/,
2857                                        false /*birth*/);
2858         break;
2859       } else {
2860         n--;
2861       }
2862     }
2863     if (fc == NULL) return;
2864     // Otherwise, split up that block.
2865     assert((ssize_t)n >= 1, "Control point invariant");
2866     assert(fc->is_free(), "Error: should be a free block");
2867     _bt.verify_single_block((HeapWord*)fc, fc->size());
2868     const size_t nn = fc->size() / word_sz;
2869     n = MIN2(nn, n);
2870     assert((ssize_t)n >= 1, "Control point invariant");
2871     rem = fc->size() - n * word_sz;
2872     // If there is a remainder, and it's too small, allocate one fewer.
2873     if (rem > 0 && rem < MinChunkSize) {
2874       n--; rem += word_sz;
2875     }
2876     // Note that at this point we may have n == 0.
2877     assert((ssize_t)n >= 0, "Control point invariant");
2878 
2879     // If n is 0, the chunk fc that was found is not large
2880     // enough to leave a viable remainder.  We are unable to
2881     // allocate even one block.  Return fc to the
2882     // dictionary and return, leaving "fl" empty.
2883     if (n == 0) {
2884       returnChunkToDictionary(fc);
2885       assert(fl->count() == 0, "We never allocated any blocks");
2886       return;
2887     }
2888 
2889     // First return the remainder, if any.
2890     // Note that we hold the lock until we decide if we're going to give
2891     // back the remainder to the dictionary, since a concurrent allocation
2892     // may otherwise see the heap as empty.  (We're willing to take that
2893     // hit if the block is a small block.)
2894     if (rem > 0) {
2895       size_t prefix_size = n * word_sz;
2896       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2897       rem_fc->set_size(rem);
2898       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2899       rem_fc->link_next(NULL);
2900       // Above must occur before BOT is updated below.
2901       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2902       OrderAccess::storestore();
2903       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2904       assert(fc->is_free(), "Error");
2905       fc->set_size(prefix_size);
2906       if (rem >= IndexSetSize) {
2907         returnChunkToDictionary(rem_fc);
2908         dictionary()->dict_census_udpate(rem, true /*split*/, true /*birth*/);
2909         rem_fc = NULL;
2910       }
2911       // Otherwise, return it to the small list below.
2912     }
2913   }
2914   if (rem_fc != NULL) {
2915     MutexLockerEx x(_indexedFreeListParLocks[rem],
2916                     Mutex::_no_safepoint_check_flag);
2917     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2918     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2919     smallSplitBirth(rem);
2920   }
2921   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2922   // Now do the splitting up.
2923   // Must do this in reverse order, so that anybody attempting to
2924   // access the main chunk sees it as a single free block until we
2925   // change it.
2926   size_t fc_size = n * word_sz;
2927   // All but first chunk in this loop
2928   for (ssize_t i = n-1; i > 0; i--) {
2929     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2930     ffc->set_size(word_sz);
2931     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2932     ffc->link_next(NULL);
2933     // Above must occur before BOT is updated below.
2934     OrderAccess::storestore();
2935     // splitting from the right, fc_size == (n - i + 1) * wordsize
2936     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2937     fc_size -= word_sz;
2938     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2939     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2940     _bt.verify_single_block((HeapWord*)fc, fc_size);
2941     // Push this on "fl".
2942     fl->return_chunk_at_head(ffc);
2943   }
2944   // First chunk
2945   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2946   // The blocks above should show their new sizes before the first block below
2947   fc->set_size(word_sz);
2948   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2949   fc->link_next(NULL);
2950   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2951   _bt.verify_single_block((HeapWord*)fc, fc->size());
2952   fl->return_chunk_at_head(fc);
2953 
2954   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2955   {
2956     // Update the stats for this block size.
2957     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2958                     Mutex::_no_safepoint_check_flag);
2959     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2960     _indexedFreeList[word_sz].set_split_births(births);
2961     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2962     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2963   }
2964 
2965   // TRAP
2966   assert(fl->tail()->next() == NULL, "List invariant.");
2967 }
2968 
2969 // Set up the space's par_seq_tasks structure for work claiming
2970 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2971 // XXX Need to suitably abstract and generalize this and the next
2972 // method into one.
2973 void
2974 CompactibleFreeListSpace::
2975 initialize_sequential_subtasks_for_rescan(int n_threads) {
2976   // The "size" of each task is fixed according to rescan_task_size.
2977   assert(n_threads > 0, "Unexpected n_threads argument");
2978   const size_t task_size = rescan_task_size();
2979   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2980   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2981   assert(n_tasks == 0 ||
2982          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2983           (used_region().start() + n_tasks*task_size >= used_region().end())),
2984          "n_tasks calculation incorrect");
2985   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2986   assert(!pst->valid(), "Clobbering existing data?");
2987   // Sets the condition for completion of the subtask (how many threads
2988   // need to finish in order to be done).
2989   pst->set_n_threads(n_threads);
2990   pst->set_n_tasks((int)n_tasks);
2991 }
2992 
2993 // Set up the space's par_seq_tasks structure for work claiming
2994 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2995 void
2996 CompactibleFreeListSpace::
2997 initialize_sequential_subtasks_for_marking(int n_threads,
2998                                            HeapWord* low) {
2999   // The "size" of each task is fixed according to rescan_task_size.
3000   assert(n_threads > 0, "Unexpected n_threads argument");
3001   const size_t task_size = marking_task_size();
3002   assert(task_size > CardTableModRefBS::card_size_in_words &&
3003          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3004          "Otherwise arithmetic below would be incorrect");
3005   MemRegion span = _gen->reserved();
3006   if (low != NULL) {
3007     if (span.contains(low)) {
3008       // Align low down to  a card boundary so that
3009       // we can use block_offset_careful() on span boundaries.
3010       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3011                                  CardTableModRefBS::card_size);
3012       // Clip span prefix at aligned_low
3013       span = span.intersection(MemRegion(aligned_low, span.end()));
3014     } else if (low > span.end()) {
3015       span = MemRegion(low, low);  // Null region
3016     } // else use entire span
3017   }
3018   assert(span.is_empty() ||
3019          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3020         "span should start at a card boundary");
3021   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3022   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3023   assert(n_tasks == 0 ||
3024          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3025           (span.start() + n_tasks*task_size >= span.end())),
3026          "n_tasks calculation incorrect");
3027   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3028   assert(!pst->valid(), "Clobbering existing data?");
3029   // Sets the condition for completion of the subtask (how many threads
3030   // need to finish in order to be done).
3031   pst->set_n_threads(n_threads);
3032   pst->set_n_tasks((int)n_tasks);
3033 }