1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  87   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  88                           "CompactibleFreeListSpace._dict_par_lock", true),
  89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSRescanMultiple),
  91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSConcMarkMultiple),
  93   _collector(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99   // We have all of "mr", all of which we place in the dictionary
 100   // as one big chunk. We'll need to decide here which of several
 101   // possible alternative dictionary implementations to use. For
 102   // now the choice is easy, since we have only one working
 103   // implementation, namely, the simple binary tree (splaying
 104   // temporarily disabled).
 105   switch (dictionaryChoice) {
 106     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 107       _dictionary = new AFLBinaryTreeDictionary(mr);
 108       break;
 109     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 110     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 111     default:
 112       warning("dictionaryChoice: selected option not understood; using"
 113               " default BinaryTreeDictionary implementation instead.");
 114   }
 115   assert(_dictionary != NULL, "CMS dictionary initialization");
 116   // The indexed free lists are initially all empty and are lazily
 117   // filled in on demand. Initialize the array elements to NULL.
 118   initializeIndexedFreeListArray();
 119 
 120   // Not using adaptive free lists assumes that allocation is first
 121   // from the linAB's.  Also a cms perm gen which can be compacted
 122   // has to have the klass's klassKlass allocated at a lower
 123   // address in the heap than the klass so that the klassKlass is
 124   // moved to its new location before the klass is moved.
 125   // Set the _refillSize for the linear allocation blocks
 126   if (!use_adaptive_freelists) {
 127     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 128                                            FreeBlockDictionary<FreeChunk>::atLeast);
 129     // The small linAB initially has all the space and will allocate
 130     // a chunk of any size.
 131     HeapWord* addr = (HeapWord*) fc;
 132     _smallLinearAllocBlock.set(addr, fc->size() ,
 133       1024*SmallForLinearAlloc, fc->size());
 134     // Note that _unallocated_block is not updated here.
 135     // Allocations from the linear allocation block should
 136     // update it.
 137   } else {
 138     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 139                                SmallForLinearAlloc);
 140   }
 141   // CMSIndexedFreeListReplenish should be at least 1
 142   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 143   _promoInfo.setSpace(this);
 144   if (UseCMSBestFit) {
 145     _fitStrategy = FreeBlockBestFitFirst;
 146   } else {
 147     _fitStrategy = FreeBlockStrategyNone;
 148   }
 149   check_free_list_consistency();
 150 
 151   // Initialize locks for parallel case.
 152 
 153   if (CollectedHeap::use_parallel_gc_threads()) {
 154     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 155       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 156                                               "a freelist par lock",
 157                                               true);
 158       DEBUG_ONLY(
 159         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 160       )
 161     }
 162     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 163   }
 164 }
 165 
 166 // Like CompactibleSpace forward() but always calls cross_threshold() to
 167 // update the block offset table.  Removed initialize_threshold call because
 168 // CFLS does not use a block offset array for contiguous spaces.
 169 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 170                                     CompactPoint* cp, HeapWord* compact_top) {
 171   // q is alive
 172   // First check if we should switch compaction space
 173   assert(this == cp->space, "'this' should be current compaction space.");
 174   size_t compaction_max_size = pointer_delta(end(), compact_top);
 175   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 176     "virtual adjustObjectSize_v() method is not correct");
 177   size_t adjusted_size = adjustObjectSize(size);
 178   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 179          "no small fragments allowed");
 180   assert(minimum_free_block_size() == MinChunkSize,
 181          "for de-virtualized reference below");
 182   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 183   if (adjusted_size + MinChunkSize > compaction_max_size &&
 184       adjusted_size != compaction_max_size) {
 185     do {
 186       // switch to next compaction space
 187       cp->space->set_compaction_top(compact_top);
 188       cp->space = cp->space->next_compaction_space();
 189       if (cp->space == NULL) {
 190         cp->gen = GenCollectedHeap::heap()->young_gen();
 191         assert(cp->gen != NULL, "compaction must succeed");
 192         cp->space = cp->gen->first_compaction_space();
 193         assert(cp->space != NULL, "generation must have a first compaction space");
 194       }
 195       compact_top = cp->space->bottom();
 196       cp->space->set_compaction_top(compact_top);
 197       // The correct adjusted_size may not be the same as that for this method
 198       // (i.e., cp->space may no longer be "this" so adjust the size again.
 199       // Use the virtual method which is not used above to save the virtual
 200       // dispatch.
 201       adjusted_size = cp->space->adjust_object_size_v(size);
 202       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 203       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 204     } while (adjusted_size > compaction_max_size);
 205   }
 206 
 207   // store the forwarding pointer into the mark word
 208   if ((HeapWord*)q != compact_top) {
 209     q->forward_to(oop(compact_top));
 210     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 211   } else {
 212     // if the object isn't moving we can just set the mark to the default
 213     // mark and handle it specially later on.
 214     q->init_mark();
 215     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 216   }
 217 
 218   compact_top += adjusted_size;
 219 
 220   // we need to update the offset table so that the beginnings of objects can be
 221   // found during scavenge.  Note that we are updating the offset table based on
 222   // where the object will be once the compaction phase finishes.
 223 
 224   // Always call cross_threshold().  A contiguous space can only call it when
 225   // the compaction_top exceeds the current threshold but not for an
 226   // non-contiguous space.
 227   cp->threshold =
 228     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 229   return compact_top;
 230 }
 231 
 232 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 233 // and use of single_block instead of alloc_block.  The name here is not really
 234 // appropriate - maybe a more general name could be invented for both the
 235 // contiguous and noncontiguous spaces.
 236 
 237 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 238   _bt.single_block(start, the_end);
 239   return end();
 240 }
 241 
 242 // Initialize them to NULL.
 243 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 244   for (size_t i = 0; i < IndexSetSize; i++) {
 245     // Note that on platforms where objects are double word aligned,
 246     // the odd array elements are not used.  It is convenient, however,
 247     // to map directly from the object size to the array element.
 248     _indexedFreeList[i].reset(IndexSetSize);
 249     _indexedFreeList[i].set_size(i);
 250     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 251     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 253     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 254   }
 255 }
 256 
 257 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 258   for (size_t i = 1; i < IndexSetSize; i++) {
 259     assert(_indexedFreeList[i].size() == (size_t) i,
 260       "Indexed free list sizes are incorrect");
 261     _indexedFreeList[i].reset(IndexSetSize);
 262     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 263     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 265     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 266   }
 267 }
 268 
 269 void CompactibleFreeListSpace::reset(MemRegion mr) {
 270   resetIndexedFreeListArray();
 271   dictionary()->reset();
 272   if (BlockOffsetArrayUseUnallocatedBlock) {
 273     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 274     // Everything's allocated until proven otherwise.
 275     _bt.set_unallocated_block(end());
 276   }
 277   if (!mr.is_empty()) {
 278     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 279     _bt.single_block(mr.start(), mr.word_size());
 280     FreeChunk* fc = (FreeChunk*) mr.start();
 281     fc->set_size(mr.word_size());
 282     if (mr.word_size() >= IndexSetSize ) {
 283       returnChunkToDictionary(fc);
 284     } else {
 285       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 286       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 287     }
 288     coalBirth(mr.word_size());
 289   }
 290   _promoInfo.reset();
 291   _smallLinearAllocBlock._ptr = NULL;
 292   _smallLinearAllocBlock._word_size = 0;
 293 }
 294 
 295 void CompactibleFreeListSpace::reset_after_compaction() {
 296   // Reset the space to the new reality - one free chunk.
 297   MemRegion mr(compaction_top(), end());
 298   reset(mr);
 299   // Now refill the linear allocation block(s) if possible.
 300   if (_adaptive_freelists) {
 301     refillLinearAllocBlocksIfNeeded();
 302   } else {
 303     // Place as much of mr in the linAB as we can get,
 304     // provided it was big enough to go into the dictionary.
 305     FreeChunk* fc = dictionary()->find_largest_dict();
 306     if (fc != NULL) {
 307       assert(fc->size() == mr.word_size(),
 308              "Why was the chunk broken up?");
 309       removeChunkFromDictionary(fc);
 310       HeapWord* addr = (HeapWord*) fc;
 311       _smallLinearAllocBlock.set(addr, fc->size() ,
 312         1024*SmallForLinearAlloc, fc->size());
 313       // Note that _unallocated_block is not updated here.
 314     }
 315   }
 316 }
 317 
 318 // Walks the entire dictionary, returning a coterminal
 319 // chunk, if it exists. Use with caution since it involves
 320 // a potentially complete walk of a potentially large tree.
 321 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 322 
 323   assert_lock_strong(&_freelistLock);
 324 
 325   return dictionary()->find_chunk_ends_at(end());
 326 }
 327 
 328 
 329 #ifndef PRODUCT
 330 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 331   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 332     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 333   }
 334 }
 335 
 336 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 337   size_t sum = 0;
 338   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 339     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 340   }
 341   return sum;
 342 }
 343 
 344 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 345   size_t count = 0;
 346   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 347     debug_only(
 348       ssize_t total_list_count = 0;
 349       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 350          fc = fc->next()) {
 351         total_list_count++;
 352       }
 353       assert(total_list_count ==  _indexedFreeList[i].count(),
 354         "Count in list is incorrect");
 355     )
 356     count += _indexedFreeList[i].count();
 357   }
 358   return count;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::totalCount() {
 362   size_t num = totalCountInIndexedFreeLists();
 363   num +=  dictionary()->total_count();
 364   if (_smallLinearAllocBlock._word_size != 0) {
 365     num++;
 366   }
 367   return num;
 368 }
 369 #endif
 370 
 371 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 372   FreeChunk* fc = (FreeChunk*) p;
 373   return fc->is_free();
 374 }
 375 
 376 size_t CompactibleFreeListSpace::used() const {
 377   return capacity() - free();
 378 }
 379 
 380 size_t CompactibleFreeListSpace::free() const {
 381   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 382   // if you do this while the structures are in flux you
 383   // may get an approximate answer only; for instance
 384   // because there is concurrent allocation either
 385   // directly by mutators or for promotion during a GC.
 386   // It's "MT-safe", however, in the sense that you are guaranteed
 387   // not to crash and burn, for instance, because of walking
 388   // pointers that could disappear as you were walking them.
 389   // The approximation is because the various components
 390   // that are read below are not read atomically (and
 391   // further the computation of totalSizeInIndexedFreeLists()
 392   // is itself a non-atomic computation. The normal use of
 393   // this is during a resize operation at the end of GC
 394   // and at that time you are guaranteed to get the
 395   // correct actual value. However, for instance, this is
 396   // also read completely asynchronously by the "perf-sampler"
 397   // that supports jvmstat, and you are apt to see the values
 398   // flicker in such cases.
 399   assert(_dictionary != NULL, "No _dictionary?");
 400   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 401           totalSizeInIndexedFreeLists() +
 402           _smallLinearAllocBlock._word_size) * HeapWordSize;
 403 }
 404 
 405 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 406   assert(_dictionary != NULL, "No _dictionary?");
 407   assert_locked();
 408   size_t res = _dictionary->max_chunk_size();
 409   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 410                        (size_t) SmallForLinearAlloc - 1));
 411   // XXX the following could potentially be pretty slow;
 412   // should one, pessimistically for the rare cases when res
 413   // calculated above is less than IndexSetSize,
 414   // just return res calculated above? My reasoning was that
 415   // those cases will be so rare that the extra time spent doesn't
 416   // really matter....
 417   // Note: do not change the loop test i >= res + IndexSetStride
 418   // to i > res below, because i is unsigned and res may be zero.
 419   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 420        i -= IndexSetStride) {
 421     if (_indexedFreeList[i].head() != NULL) {
 422       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 423       return i;
 424     }
 425   }
 426   return res;
 427 }
 428 
 429 void LinearAllocBlock::print_on(outputStream* st) const {
 430   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 431             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 432             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 433 }
 434 
 435 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 436   st->print_cr("COMPACTIBLE FREELIST SPACE");
 437   st->print_cr(" Space:");
 438   Space::print_on(st);
 439 
 440   st->print_cr("promoInfo:");
 441   _promoInfo.print_on(st);
 442 
 443   st->print_cr("_smallLinearAllocBlock");
 444   _smallLinearAllocBlock.print_on(st);
 445 
 446   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 447 
 448   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 449                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 450 }
 451 
 452 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 453 const {
 454   reportIndexedFreeListStatistics();
 455   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 456   gclog_or_tty->print_cr("---------------------------");
 457   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 458   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 459     _indexedFreeList[i].print_on(gclog_or_tty);
 460     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 461          fc = fc->next()) {
 462       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 463                           p2i(fc), p2i((HeapWord*)fc + i),
 464                           fc->cantCoalesce() ? "\t CC" : "");
 465     }
 466   }
 467 }
 468 
 469 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 470 const {
 471   _promoInfo.print_on(st);
 472 }
 473 
 474 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 475 const {
 476   _dictionary->report_statistics();
 477   st->print_cr("Layout of Freelists in Tree");
 478   st->print_cr("---------------------------");
 479   _dictionary->print_free_lists(st);
 480 }
 481 
 482 class BlkPrintingClosure: public BlkClosure {
 483   const CMSCollector*             _collector;
 484   const CompactibleFreeListSpace* _sp;
 485   const CMSBitMap*                _live_bit_map;
 486   const bool                      _post_remark;
 487   outputStream*                   _st;
 488 public:
 489   BlkPrintingClosure(const CMSCollector* collector,
 490                      const CompactibleFreeListSpace* sp,
 491                      const CMSBitMap* live_bit_map,
 492                      outputStream* st):
 493     _collector(collector),
 494     _sp(sp),
 495     _live_bit_map(live_bit_map),
 496     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 497     _st(st) { }
 498   size_t do_blk(HeapWord* addr);
 499 };
 500 
 501 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 502   size_t sz = _sp->block_size_no_stall(addr, _collector);
 503   assert(sz != 0, "Should always be able to compute a size");
 504   if (_sp->block_is_obj(addr)) {
 505     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 506     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 507       p2i(addr),
 508       dead ? "dead" : "live",
 509       sz,
 510       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 511     if (CMSPrintObjectsInDump && !dead) {
 512       oop(addr)->print_on(_st);
 513       _st->print_cr("--------------------------------------");
 514     }
 515   } else { // free block
 516     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 517       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 518     if (CMSPrintChunksInDump) {
 519       ((FreeChunk*)addr)->print_on(_st);
 520       _st->print_cr("--------------------------------------");
 521     }
 522   }
 523   return sz;
 524 }
 525 
 526 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 527   outputStream* st) {
 528   st->print_cr("\n=========================");
 529   st->print_cr("Block layout in CMS Heap:");
 530   st->print_cr("=========================");
 531   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 532   blk_iterate(&bpcl);
 533 
 534   st->print_cr("\n=======================================");
 535   st->print_cr("Order & Layout of Promotion Info Blocks");
 536   st->print_cr("=======================================");
 537   print_promo_info_blocks(st);
 538 
 539   st->print_cr("\n===========================");
 540   st->print_cr("Order of Indexed Free Lists");
 541   st->print_cr("=========================");
 542   print_indexed_free_lists(st);
 543 
 544   st->print_cr("\n=================================");
 545   st->print_cr("Order of Free Lists in Dictionary");
 546   st->print_cr("=================================");
 547   print_dictionary_free_lists(st);
 548 }
 549 
 550 
 551 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 552   assert_lock_strong(&_freelistLock);
 553   assert(PrintFLSStatistics != 0, "Reporting error");
 554   _dictionary->report_statistics();
 555   if (PrintFLSStatistics > 1) {
 556     reportIndexedFreeListStatistics();
 557     size_t total_size = totalSizeInIndexedFreeLists() +
 558                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 559     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 560   }
 561 }
 562 
 563 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 564   assert_lock_strong(&_freelistLock);
 565   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 566                       "--------------------------------\n");
 567   size_t total_size = totalSizeInIndexedFreeLists();
 568   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 569   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 570   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 571   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 572   if (free_blocks != 0) {
 573     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 574   }
 575 }
 576 
 577 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 578   size_t res = 0;
 579   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 580     debug_only(
 581       ssize_t recount = 0;
 582       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 583          fc = fc->next()) {
 584         recount += 1;
 585       }
 586       assert(recount == _indexedFreeList[i].count(),
 587         "Incorrect count in list");
 588     )
 589     res += _indexedFreeList[i].count();
 590   }
 591   return res;
 592 }
 593 
 594 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 595   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 596     if (_indexedFreeList[i].head() != NULL) {
 597       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 598       return (size_t)i;
 599     }
 600   }
 601   return 0;
 602 }
 603 
 604 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 605   HeapWord* prevEnd = end();
 606   assert(prevEnd != value, "unnecessary set_end call");
 607   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 608         "New end is below unallocated block");
 609   _end = value;
 610   if (prevEnd != NULL) {
 611     // Resize the underlying block offset table.
 612     _bt.resize(pointer_delta(value, bottom()));
 613     if (value <= prevEnd) {
 614       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 615              "New end is below unallocated block");
 616     } else {
 617       // Now, take this new chunk and add it to the free blocks.
 618       // Note that the BOT has not yet been updated for this block.
 619       size_t newFcSize = pointer_delta(value, prevEnd);
 620       // XXX This is REALLY UGLY and should be fixed up. XXX
 621       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 622         // Mark the boundary of the new block in BOT
 623         _bt.mark_block(prevEnd, value);
 624         // put it all in the linAB
 625         if (ParallelGCThreads == 0) {
 626           _smallLinearAllocBlock._ptr = prevEnd;
 627           _smallLinearAllocBlock._word_size = newFcSize;
 628           repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         } else { // ParallelGCThreads > 0
 630           MutexLockerEx x(parDictionaryAllocLock(),
 631                           Mutex::_no_safepoint_check_flag);
 632           _smallLinearAllocBlock._ptr = prevEnd;
 633           _smallLinearAllocBlock._word_size = newFcSize;
 634           repairLinearAllocBlock(&_smallLinearAllocBlock);
 635         }
 636         // Births of chunks put into a LinAB are not recorded.  Births
 637         // of chunks as they are allocated out of a LinAB are.
 638       } else {
 639         // Add the block to the free lists, if possible coalescing it
 640         // with the last free block, and update the BOT and census data.
 641         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 642       }
 643     }
 644   }
 645 }
 646 
 647 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 648   CompactibleFreeListSpace* _cfls;
 649   CMSCollector* _collector;
 650 protected:
 651   // Override.
 652 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 653   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl);                \
 656       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 657                                        HeapWord* bottom, HeapWord* top, \
 658                                        ClosureType* cl);                \
 659     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 660                                        HeapWord* bottom, HeapWord* top, \
 661                                        ClosureType* cl)
 662   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 663   walk_mem_region_with_cl_DECL(FilteringClosure);
 664 
 665 public:
 666   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 667                       CMSCollector* collector,
 668                       ExtendedOopClosure* cl,
 669                       CardTableModRefBS::PrecisionStyle precision,
 670                       HeapWord* boundary) :
 671     Filtering_DCTOC(sp, cl, precision, boundary),
 672     _cfls(sp), _collector(collector) {}
 673 };
 674 
 675 // We de-virtualize the block-related calls below, since we know that our
 676 // space is a CompactibleFreeListSpace.
 677 
 678 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 679 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 680                                                  HeapWord* bottom,              \
 681                                                  HeapWord* top,                 \
 682                                                  ClosureType* cl) {             \
 683    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 684    if (is_par) {                                                                \
 685      assert(SharedHeap::heap()->n_par_threads() ==                              \
 686             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 687      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 688    } else {                                                                     \
 689      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 690    }                                                                            \
 691 }                                                                               \
 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 693                                                       HeapWord* bottom,         \
 694                                                       HeapWord* top,            \
 695                                                       ClosureType* cl) {        \
 696   /* Skip parts that are before "mr", in case "block_start" sent us             \
 697      back too far. */                                                           \
 698   HeapWord* mr_start = mr.start();                                              \
 699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 700   HeapWord* next = bottom + bot_size;                                           \
 701   while (next < mr_start) {                                                     \
 702     bottom = next;                                                              \
 703     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 704     next = bottom + bot_size;                                                   \
 705   }                                                                             \
 706                                                                                 \
 707   while (bottom < top) {                                                        \
 708     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 710                     oop(bottom)) &&                                             \
 711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 713       bottom += _cfls->adjustObjectSize(word_sz);                               \
 714     } else {                                                                    \
 715       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 716     }                                                                           \
 717   }                                                                             \
 718 }                                                                               \
 719 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 720                                                         HeapWord* bottom,       \
 721                                                         HeapWord* top,          \
 722                                                         ClosureType* cl) {      \
 723   /* Skip parts that are before "mr", in case "block_start" sent us             \
 724      back too far. */                                                           \
 725   HeapWord* mr_start = mr.start();                                              \
 726   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 727   HeapWord* next = bottom + bot_size;                                           \
 728   while (next < mr_start) {                                                     \
 729     bottom = next;                                                              \
 730     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 731     next = bottom + bot_size;                                                   \
 732   }                                                                             \
 733                                                                                 \
 734   while (bottom < top) {                                                        \
 735     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 736         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 737                     oop(bottom)) &&                                             \
 738         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 739       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 740       bottom += _cfls->adjustObjectSize(word_sz);                               \
 741     } else {                                                                    \
 742       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 743     }                                                                           \
 744   }                                                                             \
 745 }
 746 
 747 // (There are only two of these, rather than N, because the split is due
 748 // only to the introduction of the FilteringClosure, a local part of the
 749 // impl of this abstraction.)
 750 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 751 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 752 
 753 DirtyCardToOopClosure*
 754 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 755                                       CardTableModRefBS::PrecisionStyle precision,
 756                                       HeapWord* boundary) {
 757   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 758 }
 759 
 760 
 761 // Note on locking for the space iteration functions:
 762 // since the collector's iteration activities are concurrent with
 763 // allocation activities by mutators, absent a suitable mutual exclusion
 764 // mechanism the iterators may go awry. For instance a block being iterated
 765 // may suddenly be allocated or divided up and part of it allocated and
 766 // so on.
 767 
 768 // Apply the given closure to each block in the space.
 769 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 770   assert_lock_strong(freelistLock());
 771   HeapWord *cur, *limit;
 772   for (cur = bottom(), limit = end(); cur < limit;
 773        cur += cl->do_blk_careful(cur));
 774 }
 775 
 776 // Apply the given closure to each block in the space.
 777 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 778   assert_lock_strong(freelistLock());
 779   HeapWord *cur, *limit;
 780   for (cur = bottom(), limit = end(); cur < limit;
 781        cur += cl->do_blk(cur));
 782 }
 783 
 784 // Apply the given closure to each oop in the space.
 785 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 786   assert_lock_strong(freelistLock());
 787   HeapWord *cur, *limit;
 788   size_t curSize;
 789   for (cur = bottom(), limit = end(); cur < limit;
 790        cur += curSize) {
 791     curSize = block_size(cur);
 792     if (block_is_obj(cur)) {
 793       oop(cur)->oop_iterate(cl);
 794     }
 795   }
 796 }
 797 
 798 // NOTE: In the following methods, in order to safely be able to
 799 // apply the closure to an object, we need to be sure that the
 800 // object has been initialized. We are guaranteed that an object
 801 // is initialized if we are holding the Heap_lock with the
 802 // world stopped.
 803 void CompactibleFreeListSpace::verify_objects_initialized() const {
 804   if (is_init_completed()) {
 805     assert_locked_or_safepoint(Heap_lock);
 806     if (Universe::is_fully_initialized()) {
 807       guarantee(SafepointSynchronize::is_at_safepoint(),
 808                 "Required for objects to be initialized");
 809     }
 810   } // else make a concession at vm start-up
 811 }
 812 
 813 // Apply the given closure to each object in the space
 814 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 815   assert_lock_strong(freelistLock());
 816   NOT_PRODUCT(verify_objects_initialized());
 817   HeapWord *cur, *limit;
 818   size_t curSize;
 819   for (cur = bottom(), limit = end(); cur < limit;
 820        cur += curSize) {
 821     curSize = block_size(cur);
 822     if (block_is_obj(cur)) {
 823       blk->do_object(oop(cur));
 824     }
 825   }
 826 }
 827 
 828 // Apply the given closure to each live object in the space
 829 //   The usage of CompactibleFreeListSpace
 830 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 831 // objects in the space with references to objects that are no longer
 832 // valid.  For example, an object may reference another object
 833 // that has already been sweep up (collected).  This method uses
 834 // obj_is_alive() to determine whether it is safe to apply the closure to
 835 // an object.  See obj_is_alive() for details on how liveness of an
 836 // object is decided.
 837 
 838 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 839   assert_lock_strong(freelistLock());
 840   NOT_PRODUCT(verify_objects_initialized());
 841   HeapWord *cur, *limit;
 842   size_t curSize;
 843   for (cur = bottom(), limit = end(); cur < limit;
 844        cur += curSize) {
 845     curSize = block_size(cur);
 846     if (block_is_obj(cur) && obj_is_alive(cur)) {
 847       blk->do_object(oop(cur));
 848     }
 849   }
 850 }
 851 
 852 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 853                                                   UpwardsObjectClosure* cl) {
 854   assert_locked(freelistLock());
 855   NOT_PRODUCT(verify_objects_initialized());
 856   assert(!mr.is_empty(), "Should be non-empty");
 857   // We use MemRegion(bottom(), end()) rather than used_region() below
 858   // because the two are not necessarily equal for some kinds of
 859   // spaces, in particular, certain kinds of free list spaces.
 860   // We could use the more complicated but more precise:
 861   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 862   // but the slight imprecision seems acceptable in the assertion check.
 863   assert(MemRegion(bottom(), end()).contains(mr),
 864          "Should be within used space");
 865   HeapWord* prev = cl->previous();   // max address from last time
 866   if (prev >= mr.end()) { // nothing to do
 867     return;
 868   }
 869   // This assert will not work when we go from cms space to perm
 870   // space, and use same closure. Easy fix deferred for later. XXX YSR
 871   // assert(prev == NULL || contains(prev), "Should be within space");
 872 
 873   bool last_was_obj_array = false;
 874   HeapWord *blk_start_addr, *region_start_addr;
 875   if (prev > mr.start()) {
 876     region_start_addr = prev;
 877     blk_start_addr    = prev;
 878     // The previous invocation may have pushed "prev" beyond the
 879     // last allocated block yet there may be still be blocks
 880     // in this region due to a particular coalescing policy.
 881     // Relax the assertion so that the case where the unallocated
 882     // block is maintained and "prev" is beyond the unallocated
 883     // block does not cause the assertion to fire.
 884     assert((BlockOffsetArrayUseUnallocatedBlock &&
 885             (!is_in(prev))) ||
 886            (blk_start_addr == block_start(region_start_addr)), "invariant");
 887   } else {
 888     region_start_addr = mr.start();
 889     blk_start_addr    = block_start(region_start_addr);
 890   }
 891   HeapWord* region_end_addr = mr.end();
 892   MemRegion derived_mr(region_start_addr, region_end_addr);
 893   while (blk_start_addr < region_end_addr) {
 894     const size_t size = block_size(blk_start_addr);
 895     if (block_is_obj(blk_start_addr)) {
 896       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 897     } else {
 898       last_was_obj_array = false;
 899     }
 900     blk_start_addr += size;
 901   }
 902   if (!last_was_obj_array) {
 903     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 904            "Should be within (closed) used space");
 905     assert(blk_start_addr > prev, "Invariant");
 906     cl->set_previous(blk_start_addr); // min address for next time
 907   }
 908 }
 909 
 910 // Callers of this iterator beware: The closure application should
 911 // be robust in the face of uninitialized objects and should (always)
 912 // return a correct size so that the next addr + size below gives us a
 913 // valid block boundary. [See for instance,
 914 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 915 // in ConcurrentMarkSweepGeneration.cpp.]
 916 HeapWord*
 917 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 918   ObjectClosureCareful* cl) {
 919   assert_lock_strong(freelistLock());
 920   // Can't use used_region() below because it may not necessarily
 921   // be the same as [bottom(),end()); although we could
 922   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 923   // that appears too cumbersome, so we just do the simpler check
 924   // in the assertion below.
 925   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 926          "mr should be non-empty and within used space");
 927   HeapWord *addr, *end;
 928   size_t size;
 929   for (addr = block_start_careful(mr.start()), end  = mr.end();
 930        addr < end; addr += size) {
 931     FreeChunk* fc = (FreeChunk*)addr;
 932     if (fc->is_free()) {
 933       // Since we hold the free list lock, which protects direct
 934       // allocation in this generation by mutators, a free object
 935       // will remain free throughout this iteration code.
 936       size = fc->size();
 937     } else {
 938       // Note that the object need not necessarily be initialized,
 939       // because (for instance) the free list lock does NOT protect
 940       // object initialization. The closure application below must
 941       // therefore be correct in the face of uninitialized objects.
 942       size = cl->do_object_careful_m(oop(addr), mr);
 943       if (size == 0) {
 944         // An unparsable object found. Signal early termination.
 945         return addr;
 946       }
 947     }
 948   }
 949   return NULL;
 950 }
 951 
 952 
 953 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 954   NOT_PRODUCT(verify_objects_initialized());
 955   return _bt.block_start(p);
 956 }
 957 
 958 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 959   return _bt.block_start_careful(p);
 960 }
 961 
 962 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 963   NOT_PRODUCT(verify_objects_initialized());
 964   // This must be volatile, or else there is a danger that the compiler
 965   // will compile the code below into a sometimes-infinite loop, by keeping
 966   // the value read the first time in a register.
 967   while (true) {
 968     // We must do this until we get a consistent view of the object.
 969     if (FreeChunk::indicatesFreeChunk(p)) {
 970       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 971       size_t res = fc->size();
 972 
 973       // Bugfix for systems with weak memory model (PPC64/IA64). The
 974       // block's free bit was set and we have read the size of the
 975       // block. Acquire and check the free bit again. If the block is
 976       // still free, the read size is correct.
 977       OrderAccess::acquire();
 978 
 979       // If the object is still a free chunk, return the size, else it
 980       // has been allocated so try again.
 981       if (FreeChunk::indicatesFreeChunk(p)) {
 982         assert(res != 0, "Block size should not be 0");
 983         return res;
 984       }
 985     } else {
 986       // must read from what 'p' points to in each loop.
 987       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 988       if (k != NULL) {
 989         assert(k->is_klass(), "Should really be klass oop.");
 990         oop o = (oop)p;
 991         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 992 
 993         // Bugfix for systems with weak memory model (PPC64/IA64).
 994         // The object o may be an array. Acquire to make sure that the array
 995         // size (third word) is consistent.
 996         OrderAccess::acquire();
 997 
 998         size_t res = o->size_given_klass(k);
 999         res = adjustObjectSize(res);
1000         assert(res != 0, "Block size should not be 0");
1001         return res;
1002       }
1003     }
1004   }
1005 }
1006 
1007 // TODO: Now that is_parsable is gone, we should combine these two functions.
1008 // A variant of the above that uses the Printezis bits for
1009 // unparsable but allocated objects. This avoids any possible
1010 // stalls waiting for mutators to initialize objects, and is
1011 // thus potentially faster than the variant above. However,
1012 // this variant may return a zero size for a block that is
1013 // under mutation and for which a consistent size cannot be
1014 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1015 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1016                                                      const CMSCollector* c)
1017 const {
1018   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1019   // This must be volatile, or else there is a danger that the compiler
1020   // will compile the code below into a sometimes-infinite loop, by keeping
1021   // the value read the first time in a register.
1022   DEBUG_ONLY(uint loops = 0;)
1023   while (true) {
1024     // We must do this until we get a consistent view of the object.
1025     if (FreeChunk::indicatesFreeChunk(p)) {
1026       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1027       size_t res = fc->size();
1028 
1029       // Bugfix for systems with weak memory model (PPC64/IA64). The
1030       // free bit of the block was set and we have read the size of
1031       // the block. Acquire and check the free bit again. If the
1032       // block is still free, the read size is correct.
1033       OrderAccess::acquire();
1034 
1035       if (FreeChunk::indicatesFreeChunk(p)) {
1036         assert(res != 0, "Block size should not be 0");
1037         assert(loops == 0, "Should be 0");
1038         return res;
1039       }
1040     } else {
1041       // must read from what 'p' points to in each loop.
1042       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1043       // We trust the size of any object that has a non-NULL
1044       // klass and (for those in the perm gen) is parsable
1045       // -- irrespective of its conc_safe-ty.
1046       if (k != NULL) {
1047         assert(k->is_klass(), "Should really be klass oop.");
1048         oop o = (oop)p;
1049         assert(o->is_oop(), "Should be an oop");
1050 
1051         // Bugfix for systems with weak memory model (PPC64/IA64).
1052         // The object o may be an array. Acquire to make sure that the array
1053         // size (third word) is consistent.
1054         OrderAccess::acquire();
1055 
1056         size_t res = o->size_given_klass(k);
1057         res = adjustObjectSize(res);
1058         assert(res != 0, "Block size should not be 0");
1059         return res;
1060       } else {
1061         // May return 0 if P-bits not present.
1062         return c->block_size_if_printezis_bits(p);
1063       }
1064     }
1065     assert(loops == 0, "Can loop at most once");
1066     DEBUG_ONLY(loops++;)
1067   }
1068 }
1069 
1070 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1071   NOT_PRODUCT(verify_objects_initialized());
1072   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1073   FreeChunk* fc = (FreeChunk*)p;
1074   if (fc->is_free()) {
1075     return fc->size();
1076   } else {
1077     // Ignore mark word because this may be a recently promoted
1078     // object whose mark word is used to chain together grey
1079     // objects (the last one would have a null value).
1080     assert(oop(p)->is_oop(true), "Should be an oop");
1081     return adjustObjectSize(oop(p)->size());
1082   }
1083 }
1084 
1085 // This implementation assumes that the property of "being an object" is
1086 // stable.  But being a free chunk may not be (because of parallel
1087 // promotion.)
1088 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1089   FreeChunk* fc = (FreeChunk*)p;
1090   assert(is_in_reserved(p), "Should be in space");
1091   // When doing a mark-sweep-compact of the CMS generation, this
1092   // assertion may fail because prepare_for_compaction() uses
1093   // space that is garbage to maintain information on ranges of
1094   // live objects so that these live ranges can be moved as a whole.
1095   // Comment out this assertion until that problem can be solved
1096   // (i.e., that the block start calculation may look at objects
1097   // at address below "p" in finding the object that contains "p"
1098   // and those objects (if garbage) may have been modified to hold
1099   // live range information.
1100   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1101   //        "Should be a block boundary");
1102   if (FreeChunk::indicatesFreeChunk(p)) return false;
1103   Klass* k = oop(p)->klass_or_null();
1104   if (k != NULL) {
1105     // Ignore mark word because it may have been used to
1106     // chain together promoted objects (the last one
1107     // would have a null value).
1108     assert(oop(p)->is_oop(true), "Should be an oop");
1109     return true;
1110   } else {
1111     return false;  // Was not an object at the start of collection.
1112   }
1113 }
1114 
1115 // Check if the object is alive. This fact is checked either by consulting
1116 // the main marking bitmap in the sweeping phase or, if it's a permanent
1117 // generation and we're not in the sweeping phase, by checking the
1118 // perm_gen_verify_bit_map where we store the "deadness" information if
1119 // we did not sweep the perm gen in the most recent previous GC cycle.
1120 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1121   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1122          "Else races are possible");
1123   assert(block_is_obj(p), "The address should point to an object");
1124 
1125   // If we're sweeping, we use object liveness information from the main bit map
1126   // for both perm gen and old gen.
1127   // We don't need to lock the bitmap (live_map or dead_map below), because
1128   // EITHER we are in the middle of the sweeping phase, and the
1129   // main marking bit map (live_map below) is locked,
1130   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1131   // is stable, because it's mutated only in the sweeping phase.
1132   // NOTE: This method is also used by jmap where, if class unloading is
1133   // off, the results can return "false" for legitimate perm objects,
1134   // when we are not in the midst of a sweeping phase, which can result
1135   // in jmap not reporting certain perm gen objects. This will be moot
1136   // if/when the perm gen goes away in the future.
1137   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1138     CMSBitMap* live_map = _collector->markBitMap();
1139     return live_map->par_isMarked((HeapWord*) p);
1140   }
1141   return true;
1142 }
1143 
1144 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1145   FreeChunk* fc = (FreeChunk*)p;
1146   assert(is_in_reserved(p), "Should be in space");
1147   assert(_bt.block_start(p) == p, "Should be a block boundary");
1148   if (!fc->is_free()) {
1149     // Ignore mark word because it may have been used to
1150     // chain together promoted objects (the last one
1151     // would have a null value).
1152     assert(oop(p)->is_oop(true), "Should be an oop");
1153     return true;
1154   }
1155   return false;
1156 }
1157 
1158 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1159 // approximate answer if you don't hold the freelistlock when you call this.
1160 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1161   size_t size = 0;
1162   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1163     debug_only(
1164       // We may be calling here without the lock in which case we
1165       // won't do this modest sanity check.
1166       if (freelistLock()->owned_by_self()) {
1167         size_t total_list_size = 0;
1168         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1169           fc = fc->next()) {
1170           total_list_size += i;
1171         }
1172         assert(total_list_size == i * _indexedFreeList[i].count(),
1173                "Count in list is incorrect");
1174       }
1175     )
1176     size += i * _indexedFreeList[i].count();
1177   }
1178   return size;
1179 }
1180 
1181 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1182   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1183   return allocate(size);
1184 }
1185 
1186 HeapWord*
1187 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1188   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1189 }
1190 
1191 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1192   assert_lock_strong(freelistLock());
1193   HeapWord* res = NULL;
1194   assert(size == adjustObjectSize(size),
1195          "use adjustObjectSize() before calling into allocate()");
1196 
1197   if (_adaptive_freelists) {
1198     res = allocate_adaptive_freelists(size);
1199   } else {  // non-adaptive free lists
1200     res = allocate_non_adaptive_freelists(size);
1201   }
1202 
1203   if (res != NULL) {
1204     // check that res does lie in this space!
1205     assert(is_in_reserved(res), "Not in this space!");
1206     assert(is_aligned((void*)res), "alignment check");
1207 
1208     FreeChunk* fc = (FreeChunk*)res;
1209     fc->markNotFree();
1210     assert(!fc->is_free(), "shouldn't be marked free");
1211     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1212     // Verify that the block offset table shows this to
1213     // be a single block, but not one which is unallocated.
1214     _bt.verify_single_block(res, size);
1215     _bt.verify_not_unallocated(res, size);
1216     // mangle a just allocated object with a distinct pattern.
1217     debug_only(fc->mangleAllocated(size));
1218   }
1219 
1220   return res;
1221 }
1222 
1223 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1224   HeapWord* res = NULL;
1225   // try and use linear allocation for smaller blocks
1226   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1227     // if successful, the following also adjusts block offset table
1228     res = getChunkFromSmallLinearAllocBlock(size);
1229   }
1230   // Else triage to indexed lists for smaller sizes
1231   if (res == NULL) {
1232     if (size < SmallForDictionary) {
1233       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1234     } else {
1235       // else get it from the big dictionary; if even this doesn't
1236       // work we are out of luck.
1237       res = (HeapWord*)getChunkFromDictionaryExact(size);
1238     }
1239   }
1240 
1241   return res;
1242 }
1243 
1244 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1245   assert_lock_strong(freelistLock());
1246   HeapWord* res = NULL;
1247   assert(size == adjustObjectSize(size),
1248          "use adjustObjectSize() before calling into allocate()");
1249 
1250   // Strategy
1251   //   if small
1252   //     exact size from small object indexed list if small
1253   //     small or large linear allocation block (linAB) as appropriate
1254   //     take from lists of greater sized chunks
1255   //   else
1256   //     dictionary
1257   //     small or large linear allocation block if it has the space
1258   // Try allocating exact size from indexTable first
1259   if (size < IndexSetSize) {
1260     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1261     if(res != NULL) {
1262       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1263         "Not removed from free list");
1264       // no block offset table adjustment is necessary on blocks in
1265       // the indexed lists.
1266 
1267     // Try allocating from the small LinAB
1268     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1269         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1270         // if successful, the above also adjusts block offset table
1271         // Note that this call will refill the LinAB to
1272         // satisfy the request.  This is different that
1273         // evm.
1274         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1275     } else {
1276       // Raid the exact free lists larger than size, even if they are not
1277       // overpopulated.
1278       res = (HeapWord*) getChunkFromGreater(size);
1279     }
1280   } else {
1281     // Big objects get allocated directly from the dictionary.
1282     res = (HeapWord*) getChunkFromDictionaryExact(size);
1283     if (res == NULL) {
1284       // Try hard not to fail since an allocation failure will likely
1285       // trigger a synchronous GC.  Try to get the space from the
1286       // allocation blocks.
1287       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1288     }
1289   }
1290 
1291   return res;
1292 }
1293 
1294 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1295 // when promoting obj.
1296 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1297   // Depending on the object size, expansion may require refilling either a
1298   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1299   // is added because the dictionary may over-allocate to avoid fragmentation.
1300   size_t space = obj_size;
1301   if (!_adaptive_freelists) {
1302     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1303   }
1304   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1305   return space;
1306 }
1307 
1308 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1309   FreeChunk* ret;
1310 
1311   assert(numWords >= MinChunkSize, "Size is less than minimum");
1312   assert(linearAllocationWouldFail() || bestFitFirst(),
1313     "Should not be here");
1314 
1315   size_t i;
1316   size_t currSize = numWords + MinChunkSize;
1317   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1318   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1319     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1320     if (fl->head()) {
1321       ret = getFromListGreater(fl, numWords);
1322       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1323       return ret;
1324     }
1325   }
1326 
1327   currSize = MAX2((size_t)SmallForDictionary,
1328                   (size_t)(numWords + MinChunkSize));
1329 
1330   /* Try to get a chunk that satisfies request, while avoiding
1331      fragmentation that can't be handled. */
1332   {
1333     ret =  dictionary()->get_chunk(currSize);
1334     if (ret != NULL) {
1335       assert(ret->size() - numWords >= MinChunkSize,
1336              "Chunk is too small");
1337       _bt.allocated((HeapWord*)ret, ret->size());
1338       /* Carve returned chunk. */
1339       (void) splitChunkAndReturnRemainder(ret, numWords);
1340       /* Label this as no longer a free chunk. */
1341       assert(ret->is_free(), "This chunk should be free");
1342       ret->link_prev(NULL);
1343     }
1344     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1345     return ret;
1346   }
1347   ShouldNotReachHere();
1348 }
1349 
1350 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1351   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1352   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1353 }
1354 
1355 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1356   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1357          (_smallLinearAllocBlock._word_size == fc->size()),
1358          "Linear allocation block shows incorrect size");
1359   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1360           (_smallLinearAllocBlock._word_size == fc->size()));
1361 }
1362 
1363 // Check if the purported free chunk is present either as a linear
1364 // allocation block, the size-indexed table of (smaller) free blocks,
1365 // or the larger free blocks kept in the binary tree dictionary.
1366 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1367   if (verify_chunk_is_linear_alloc_block(fc)) {
1368     return true;
1369   } else if (fc->size() < IndexSetSize) {
1370     return verifyChunkInIndexedFreeLists(fc);
1371   } else {
1372     return dictionary()->verify_chunk_in_free_list(fc);
1373   }
1374 }
1375 
1376 #ifndef PRODUCT
1377 void CompactibleFreeListSpace::assert_locked() const {
1378   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1379 }
1380 
1381 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1382   CMSLockVerifier::assert_locked(lock);
1383 }
1384 #endif
1385 
1386 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1387   // In the parallel case, the main thread holds the free list lock
1388   // on behalf the parallel threads.
1389   FreeChunk* fc;
1390   {
1391     // If GC is parallel, this might be called by several threads.
1392     // This should be rare enough that the locking overhead won't affect
1393     // the sequential code.
1394     MutexLockerEx x(parDictionaryAllocLock(),
1395                     Mutex::_no_safepoint_check_flag);
1396     fc = getChunkFromDictionary(size);
1397   }
1398   if (fc != NULL) {
1399     fc->dontCoalesce();
1400     assert(fc->is_free(), "Should be free, but not coalescable");
1401     // Verify that the block offset table shows this to
1402     // be a single block, but not one which is unallocated.
1403     _bt.verify_single_block((HeapWord*)fc, fc->size());
1404     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1405   }
1406   return fc;
1407 }
1408 
1409 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1410   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1411   assert_locked();
1412 
1413   // if we are tracking promotions, then first ensure space for
1414   // promotion (including spooling space for saving header if necessary).
1415   // then allocate and copy, then track promoted info if needed.
1416   // When tracking (see PromotionInfo::track()), the mark word may
1417   // be displaced and in this case restoration of the mark word
1418   // occurs in the (oop_since_save_marks_)iterate phase.
1419   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1420     return NULL;
1421   }
1422   // Call the allocate(size_t, bool) form directly to avoid the
1423   // additional call through the allocate(size_t) form.  Having
1424   // the compile inline the call is problematic because allocate(size_t)
1425   // is a virtual method.
1426   HeapWord* res = allocate(adjustObjectSize(obj_size));
1427   if (res != NULL) {
1428     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1429     // if we should be tracking promotions, do so.
1430     if (_promoInfo.tracking()) {
1431         _promoInfo.track((PromotedObject*)res);
1432     }
1433   }
1434   return oop(res);
1435 }
1436 
1437 HeapWord*
1438 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1439   assert_locked();
1440   assert(size >= MinChunkSize, "minimum chunk size");
1441   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1442     "maximum from smallLinearAllocBlock");
1443   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1444 }
1445 
1446 HeapWord*
1447 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1448                                                        size_t size) {
1449   assert_locked();
1450   assert(size >= MinChunkSize, "too small");
1451   HeapWord* res = NULL;
1452   // Try to do linear allocation from blk, making sure that
1453   if (blk->_word_size == 0) {
1454     // We have probably been unable to fill this either in the prologue or
1455     // when it was exhausted at the last linear allocation. Bail out until
1456     // next time.
1457     assert(blk->_ptr == NULL, "consistency check");
1458     return NULL;
1459   }
1460   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1461   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1462   if (res != NULL) return res;
1463 
1464   // about to exhaust this linear allocation block
1465   if (blk->_word_size == size) { // exactly satisfied
1466     res = blk->_ptr;
1467     _bt.allocated(res, blk->_word_size);
1468   } else if (size + MinChunkSize <= blk->_refillSize) {
1469     size_t sz = blk->_word_size;
1470     // Update _unallocated_block if the size is such that chunk would be
1471     // returned to the indexed free list.  All other chunks in the indexed
1472     // free lists are allocated from the dictionary so that _unallocated_block
1473     // has already been adjusted for them.  Do it here so that the cost
1474     // for all chunks added back to the indexed free lists.
1475     if (sz < SmallForDictionary) {
1476       _bt.allocated(blk->_ptr, sz);
1477     }
1478     // Return the chunk that isn't big enough, and then refill below.
1479     addChunkToFreeLists(blk->_ptr, sz);
1480     split_birth(sz);
1481     // Don't keep statistics on adding back chunk from a LinAB.
1482   } else {
1483     // A refilled block would not satisfy the request.
1484     return NULL;
1485   }
1486 
1487   blk->_ptr = NULL; blk->_word_size = 0;
1488   refillLinearAllocBlock(blk);
1489   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1490          "block was replenished");
1491   if (res != NULL) {
1492     split_birth(size);
1493     repairLinearAllocBlock(blk);
1494   } else if (blk->_ptr != NULL) {
1495     res = blk->_ptr;
1496     size_t blk_size = blk->_word_size;
1497     blk->_word_size -= size;
1498     blk->_ptr  += size;
1499     split_birth(size);
1500     repairLinearAllocBlock(blk);
1501     // Update BOT last so that other (parallel) GC threads see a consistent
1502     // view of the BOT and free blocks.
1503     // Above must occur before BOT is updated below.
1504     OrderAccess::storestore();
1505     _bt.split_block(res, blk_size, size);  // adjust block offset table
1506   }
1507   return res;
1508 }
1509 
1510 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1511                                         LinearAllocBlock* blk,
1512                                         size_t size) {
1513   assert_locked();
1514   assert(size >= MinChunkSize, "too small");
1515 
1516   HeapWord* res = NULL;
1517   // This is the common case.  Keep it simple.
1518   if (blk->_word_size >= size + MinChunkSize) {
1519     assert(blk->_ptr != NULL, "consistency check");
1520     res = blk->_ptr;
1521     // Note that the BOT is up-to-date for the linAB before allocation.  It
1522     // indicates the start of the linAB.  The split_block() updates the
1523     // BOT for the linAB after the allocation (indicates the start of the
1524     // next chunk to be allocated).
1525     size_t blk_size = blk->_word_size;
1526     blk->_word_size -= size;
1527     blk->_ptr  += size;
1528     split_birth(size);
1529     repairLinearAllocBlock(blk);
1530     // Update BOT last so that other (parallel) GC threads see a consistent
1531     // view of the BOT and free blocks.
1532     // Above must occur before BOT is updated below.
1533     OrderAccess::storestore();
1534     _bt.split_block(res, blk_size, size);  // adjust block offset table
1535     _bt.allocated(res, size);
1536   }
1537   return res;
1538 }
1539 
1540 FreeChunk*
1541 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1542   assert_locked();
1543   assert(size < SmallForDictionary, "just checking");
1544   FreeChunk* res;
1545   res = _indexedFreeList[size].get_chunk_at_head();
1546   if (res == NULL) {
1547     res = getChunkFromIndexedFreeListHelper(size);
1548   }
1549   _bt.verify_not_unallocated((HeapWord*) res, size);
1550   assert(res == NULL || res->size() == size, "Incorrect block size");
1551   return res;
1552 }
1553 
1554 FreeChunk*
1555 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1556   bool replenish) {
1557   assert_locked();
1558   FreeChunk* fc = NULL;
1559   if (size < SmallForDictionary) {
1560     assert(_indexedFreeList[size].head() == NULL ||
1561       _indexedFreeList[size].surplus() <= 0,
1562       "List for this size should be empty or under populated");
1563     // Try best fit in exact lists before replenishing the list
1564     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1565       // Replenish list.
1566       //
1567       // Things tried that failed.
1568       //   Tried allocating out of the two LinAB's first before
1569       // replenishing lists.
1570       //   Tried small linAB of size 256 (size in indexed list)
1571       // and replenishing indexed lists from the small linAB.
1572       //
1573       FreeChunk* newFc = NULL;
1574       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1575       if (replenish_size < SmallForDictionary) {
1576         // Do not replenish from an underpopulated size.
1577         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1578             _indexedFreeList[replenish_size].head() != NULL) {
1579           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1580         } else if (bestFitFirst()) {
1581           newFc = bestFitSmall(replenish_size);
1582         }
1583       }
1584       if (newFc == NULL && replenish_size > size) {
1585         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1586         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1587       }
1588       // Note: The stats update re split-death of block obtained above
1589       // will be recorded below precisely when we know we are going to
1590       // be actually splitting it into more than one pieces below.
1591       if (newFc != NULL) {
1592         if  (replenish || CMSReplenishIntermediate) {
1593           // Replenish this list and return one block to caller.
1594           size_t i;
1595           FreeChunk *curFc, *nextFc;
1596           size_t num_blk = newFc->size() / size;
1597           assert(num_blk >= 1, "Smaller than requested?");
1598           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1599           if (num_blk > 1) {
1600             // we are sure we will be splitting the block just obtained
1601             // into multiple pieces; record the split-death of the original
1602             splitDeath(replenish_size);
1603           }
1604           // carve up and link blocks 0, ..., num_blk - 2
1605           // The last chunk is not added to the lists but is returned as the
1606           // free chunk.
1607           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1608                i = 0;
1609                i < (num_blk - 1);
1610                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1611                i++) {
1612             curFc->set_size(size);
1613             // Don't record this as a return in order to try and
1614             // determine the "returns" from a GC.
1615             _bt.verify_not_unallocated((HeapWord*) fc, size);
1616             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1617             _bt.mark_block((HeapWord*)curFc, size);
1618             split_birth(size);
1619             // Don't record the initial population of the indexed list
1620             // as a split birth.
1621           }
1622 
1623           // check that the arithmetic was OK above
1624           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1625             "inconsistency in carving newFc");
1626           curFc->set_size(size);
1627           _bt.mark_block((HeapWord*)curFc, size);
1628           split_birth(size);
1629           fc = curFc;
1630         } else {
1631           // Return entire block to caller
1632           fc = newFc;
1633         }
1634       }
1635     }
1636   } else {
1637     // Get a free chunk from the free chunk dictionary to be returned to
1638     // replenish the indexed free list.
1639     fc = getChunkFromDictionaryExact(size);
1640   }
1641   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1642   return fc;
1643 }
1644 
1645 FreeChunk*
1646 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1647   assert_locked();
1648   FreeChunk* fc = _dictionary->get_chunk(size,
1649                                          FreeBlockDictionary<FreeChunk>::atLeast);
1650   if (fc == NULL) {
1651     return NULL;
1652   }
1653   _bt.allocated((HeapWord*)fc, fc->size());
1654   if (fc->size() >= size + MinChunkSize) {
1655     fc = splitChunkAndReturnRemainder(fc, size);
1656   }
1657   assert(fc->size() >= size, "chunk too small");
1658   assert(fc->size() < size + MinChunkSize, "chunk too big");
1659   _bt.verify_single_block((HeapWord*)fc, fc->size());
1660   return fc;
1661 }
1662 
1663 FreeChunk*
1664 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1665   assert_locked();
1666   FreeChunk* fc = _dictionary->get_chunk(size,
1667                                          FreeBlockDictionary<FreeChunk>::atLeast);
1668   if (fc == NULL) {
1669     return fc;
1670   }
1671   _bt.allocated((HeapWord*)fc, fc->size());
1672   if (fc->size() == size) {
1673     _bt.verify_single_block((HeapWord*)fc, size);
1674     return fc;
1675   }
1676   assert(fc->size() > size, "get_chunk() guarantee");
1677   if (fc->size() < size + MinChunkSize) {
1678     // Return the chunk to the dictionary and go get a bigger one.
1679     returnChunkToDictionary(fc);
1680     fc = _dictionary->get_chunk(size + MinChunkSize,
1681                                 FreeBlockDictionary<FreeChunk>::atLeast);
1682     if (fc == NULL) {
1683       return NULL;
1684     }
1685     _bt.allocated((HeapWord*)fc, fc->size());
1686   }
1687   assert(fc->size() >= size + MinChunkSize, "tautology");
1688   fc = splitChunkAndReturnRemainder(fc, size);
1689   assert(fc->size() == size, "chunk is wrong size");
1690   _bt.verify_single_block((HeapWord*)fc, size);
1691   return fc;
1692 }
1693 
1694 void
1695 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1696   assert_locked();
1697 
1698   size_t size = chunk->size();
1699   _bt.verify_single_block((HeapWord*)chunk, size);
1700   // adjust _unallocated_block downward, as necessary
1701   _bt.freed((HeapWord*)chunk, size);
1702   _dictionary->return_chunk(chunk);
1703 #ifndef PRODUCT
1704   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1705     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1706     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1707     tl->verify_stats();
1708   }
1709 #endif // PRODUCT
1710 }
1711 
1712 void
1713 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1714   assert_locked();
1715   size_t size = fc->size();
1716   _bt.verify_single_block((HeapWord*) fc, size);
1717   _bt.verify_not_unallocated((HeapWord*) fc, size);
1718   if (_adaptive_freelists) {
1719     _indexedFreeList[size].return_chunk_at_tail(fc);
1720   } else {
1721     _indexedFreeList[size].return_chunk_at_head(fc);
1722   }
1723 #ifndef PRODUCT
1724   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1725      _indexedFreeList[size].verify_stats();
1726   }
1727 #endif // PRODUCT
1728 }
1729 
1730 // Add chunk to end of last block -- if it's the largest
1731 // block -- and update BOT and census data. We would
1732 // of course have preferred to coalesce it with the
1733 // last block, but it's currently less expensive to find the
1734 // largest block than it is to find the last.
1735 void
1736 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1737   HeapWord* chunk, size_t     size) {
1738   // check that the chunk does lie in this space!
1739   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1740   // One of the parallel gc task threads may be here
1741   // whilst others are allocating.
1742   Mutex* lock = NULL;
1743   if (ParallelGCThreads != 0) {
1744     lock = &_parDictionaryAllocLock;
1745   }
1746   FreeChunk* ec;
1747   {
1748     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1749     ec = dictionary()->find_largest_dict();  // get largest block
1750     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1751       // It's a coterminal block - we can coalesce.
1752       size_t old_size = ec->size();
1753       coalDeath(old_size);
1754       removeChunkFromDictionary(ec);
1755       size += old_size;
1756     } else {
1757       ec = (FreeChunk*)chunk;
1758     }
1759   }
1760   ec->set_size(size);
1761   debug_only(ec->mangleFreed(size));
1762   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1763     lock = _indexedFreeListParLocks[size];
1764   }
1765   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1766   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1767   // record the birth under the lock since the recording involves
1768   // manipulation of the list on which the chunk lives and
1769   // if the chunk is allocated and is the last on the list,
1770   // the list can go away.
1771   coalBirth(size);
1772 }
1773 
1774 void
1775 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1776                                               size_t     size) {
1777   // check that the chunk does lie in this space!
1778   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1779   assert_locked();
1780   _bt.verify_single_block(chunk, size);
1781 
1782   FreeChunk* fc = (FreeChunk*) chunk;
1783   fc->set_size(size);
1784   debug_only(fc->mangleFreed(size));
1785   if (size < SmallForDictionary) {
1786     returnChunkToFreeList(fc);
1787   } else {
1788     returnChunkToDictionary(fc);
1789   }
1790 }
1791 
1792 void
1793 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1794   size_t size, bool coalesced) {
1795   assert_locked();
1796   assert(chunk != NULL, "null chunk");
1797   if (coalesced) {
1798     // repair BOT
1799     _bt.single_block(chunk, size);
1800   }
1801   addChunkToFreeLists(chunk, size);
1802 }
1803 
1804 // We _must_ find the purported chunk on our free lists;
1805 // we assert if we don't.
1806 void
1807 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1808   size_t size = fc->size();
1809   assert_locked();
1810   debug_only(verifyFreeLists());
1811   if (size < SmallForDictionary) {
1812     removeChunkFromIndexedFreeList(fc);
1813   } else {
1814     removeChunkFromDictionary(fc);
1815   }
1816   _bt.verify_single_block((HeapWord*)fc, size);
1817   debug_only(verifyFreeLists());
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1822   size_t size = fc->size();
1823   assert_locked();
1824   assert(fc != NULL, "null chunk");
1825   _bt.verify_single_block((HeapWord*)fc, size);
1826   _dictionary->remove_chunk(fc);
1827   // adjust _unallocated_block upward, as necessary
1828   _bt.allocated((HeapWord*)fc, size);
1829 }
1830 
1831 void
1832 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1833   assert_locked();
1834   size_t size = fc->size();
1835   _bt.verify_single_block((HeapWord*)fc, size);
1836   NOT_PRODUCT(
1837     if (FLSVerifyIndexTable) {
1838       verifyIndexedFreeList(size);
1839     }
1840   )
1841   _indexedFreeList[size].remove_chunk(fc);
1842   NOT_PRODUCT(
1843     if (FLSVerifyIndexTable) {
1844       verifyIndexedFreeList(size);
1845     }
1846   )
1847 }
1848 
1849 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1850   /* A hint is the next larger size that has a surplus.
1851      Start search at a size large enough to guarantee that
1852      the excess is >= MIN_CHUNK. */
1853   size_t start = align_object_size(numWords + MinChunkSize);
1854   if (start < IndexSetSize) {
1855     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1856     size_t    hint = _indexedFreeList[start].hint();
1857     while (hint < IndexSetSize) {
1858       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1859       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1860       if (fl->surplus() > 0 && fl->head() != NULL) {
1861         // Found a list with surplus, reset original hint
1862         // and split out a free chunk which is returned.
1863         _indexedFreeList[start].set_hint(hint);
1864         FreeChunk* res = getFromListGreater(fl, numWords);
1865         assert(res == NULL || res->is_free(),
1866           "Should be returning a free chunk");
1867         return res;
1868       }
1869       hint = fl->hint(); /* keep looking */
1870     }
1871     /* None found. */
1872     it[start].set_hint(IndexSetSize);
1873   }
1874   return NULL;
1875 }
1876 
1877 /* Requires fl->size >= numWords + MinChunkSize */
1878 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1879   size_t numWords) {
1880   FreeChunk *curr = fl->head();
1881   size_t oldNumWords = curr->size();
1882   assert(numWords >= MinChunkSize, "Word size is too small");
1883   assert(curr != NULL, "List is empty");
1884   assert(oldNumWords >= numWords + MinChunkSize,
1885         "Size of chunks in the list is too small");
1886 
1887   fl->remove_chunk(curr);
1888   // recorded indirectly by splitChunkAndReturnRemainder -
1889   // smallSplit(oldNumWords, numWords);
1890   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1891   // Does anything have to be done for the remainder in terms of
1892   // fixing the card table?
1893   assert(new_chunk == NULL || new_chunk->is_free(),
1894     "Should be returning a free chunk");
1895   return new_chunk;
1896 }
1897 
1898 FreeChunk*
1899 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1900   size_t new_size) {
1901   assert_locked();
1902   size_t size = chunk->size();
1903   assert(size > new_size, "Split from a smaller block?");
1904   assert(is_aligned(chunk), "alignment problem");
1905   assert(size == adjustObjectSize(size), "alignment problem");
1906   size_t rem_sz = size - new_size;
1907   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1908   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1909   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1910   assert(is_aligned(ffc), "alignment problem");
1911   ffc->set_size(rem_sz);
1912   ffc->link_next(NULL);
1913   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1914   // Above must occur before BOT is updated below.
1915   // adjust block offset table
1916   OrderAccess::storestore();
1917   assert(chunk->is_free() && ffc->is_free(), "Error");
1918   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1919   if (rem_sz < SmallForDictionary) {
1920     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1921     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1922     assert(!is_par ||
1923            (SharedHeap::heap()->n_par_threads() ==
1924             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1925     returnChunkToFreeList(ffc);
1926     split(size, rem_sz);
1927     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1928   } else {
1929     returnChunkToDictionary(ffc);
1930     split(size, rem_sz);
1931   }
1932   chunk->set_size(new_size);
1933   return chunk;
1934 }
1935 
1936 void
1937 CompactibleFreeListSpace::sweep_completed() {
1938   // Now that space is probably plentiful, refill linear
1939   // allocation blocks as needed.
1940   refillLinearAllocBlocksIfNeeded();
1941 }
1942 
1943 void
1944 CompactibleFreeListSpace::gc_prologue() {
1945   assert_locked();
1946   if (PrintFLSStatistics != 0) {
1947     gclog_or_tty->print("Before GC:\n");
1948     reportFreeListStatistics();
1949   }
1950   refillLinearAllocBlocksIfNeeded();
1951 }
1952 
1953 void
1954 CompactibleFreeListSpace::gc_epilogue() {
1955   assert_locked();
1956   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1957     if (_smallLinearAllocBlock._word_size == 0)
1958       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1959   }
1960   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1961   _promoInfo.stopTrackingPromotions();
1962   repairLinearAllocationBlocks();
1963   // Print Space's stats
1964   if (PrintFLSStatistics != 0) {
1965     gclog_or_tty->print("After GC:\n");
1966     reportFreeListStatistics();
1967   }
1968 }
1969 
1970 // Iteration support, mostly delegated from a CMS generation
1971 
1972 void CompactibleFreeListSpace::save_marks() {
1973   assert(Thread::current()->is_VM_thread(),
1974          "Global variable should only be set when single-threaded");
1975   // Mark the "end" of the used space at the time of this call;
1976   // note, however, that promoted objects from this point
1977   // on are tracked in the _promoInfo below.
1978   set_saved_mark_word(unallocated_block());
1979 #ifdef ASSERT
1980   // Check the sanity of save_marks() etc.
1981   MemRegion ur    = used_region();
1982   MemRegion urasm = used_region_at_save_marks();
1983   assert(ur.contains(urasm),
1984          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1985                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1986                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1987 #endif
1988   // inform allocator that promotions should be tracked.
1989   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1990   _promoInfo.startTrackingPromotions();
1991 }
1992 
1993 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1994   assert(_promoInfo.tracking(), "No preceding save_marks?");
1995   assert(SharedHeap::heap()->n_par_threads() == 0,
1996          "Shouldn't be called if using parallel gc.");
1997   return _promoInfo.noPromotions();
1998 }
1999 
2000 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2001                                                                             \
2002 void CompactibleFreeListSpace::                                             \
2003 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2004   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2005          "Shouldn't be called (yet) during parallel part of gc.");          \
2006   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2007   /*                                                                        \
2008    * This also restores any displaced headers and removes the elements from \
2009    * the iteration set as they are processed, so that we have a clean slate \
2010    * at the end of the iteration. Note, thus, that if new objects are       \
2011    * promoted as a result of the iteration they are iterated over as well.  \
2012    */                                                                       \
2013   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2014 }
2015 
2016 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2017 
2018 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2019   return _smallLinearAllocBlock._word_size == 0;
2020 }
2021 
2022 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2023   // Fix up linear allocation blocks to look like free blocks
2024   repairLinearAllocBlock(&_smallLinearAllocBlock);
2025 }
2026 
2027 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2028   assert_locked();
2029   if (blk->_ptr != NULL) {
2030     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2031            "Minimum block size requirement");
2032     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2033     fc->set_size(blk->_word_size);
2034     fc->link_prev(NULL);   // mark as free
2035     fc->dontCoalesce();
2036     assert(fc->is_free(), "just marked it free");
2037     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2038   }
2039 }
2040 
2041 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2042   assert_locked();
2043   if (_smallLinearAllocBlock._ptr == NULL) {
2044     assert(_smallLinearAllocBlock._word_size == 0,
2045       "Size of linAB should be zero if the ptr is NULL");
2046     // Reset the linAB refill and allocation size limit.
2047     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2048   }
2049   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2050 }
2051 
2052 void
2053 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2054   assert_locked();
2055   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2056          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2057          "blk invariant");
2058   if (blk->_ptr == NULL) {
2059     refillLinearAllocBlock(blk);
2060   }
2061   if (PrintMiscellaneous && Verbose) {
2062     if (blk->_word_size == 0) {
2063       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2064     }
2065   }
2066 }
2067 
2068 void
2069 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2070   assert_locked();
2071   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2072          "linear allocation block should be empty");
2073   FreeChunk* fc;
2074   if (blk->_refillSize < SmallForDictionary &&
2075       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2076     // A linAB's strategy might be to use small sizes to reduce
2077     // fragmentation but still get the benefits of allocation from a
2078     // linAB.
2079   } else {
2080     fc = getChunkFromDictionary(blk->_refillSize);
2081   }
2082   if (fc != NULL) {
2083     blk->_ptr  = (HeapWord*)fc;
2084     blk->_word_size = fc->size();
2085     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2086   }
2087 }
2088 
2089 // Support for concurrent collection policy decisions.
2090 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2091   // In the future we might want to add in fragmentation stats --
2092   // including erosion of the "mountain" into this decision as well.
2093   return !adaptive_freelists() && linearAllocationWouldFail();
2094 }
2095 
2096 // Support for compaction
2097 
2098 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2099   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2100   // Prepare_for_compaction() uses the space between live objects
2101   // so that later phase can skip dead space quickly.  So verification
2102   // of the free lists doesn't work after.
2103 }
2104 
2105 #define obj_size(q) adjustObjectSize(oop(q)->size())
2106 #define adjust_obj_size(s) adjustObjectSize(s)
2107 
2108 void CompactibleFreeListSpace::adjust_pointers() {
2109   // In other versions of adjust_pointers(), a bail out
2110   // based on the amount of live data in the generation
2111   // (i.e., if 0, bail out) may be used.
2112   // Cannot test used() == 0 here because the free lists have already
2113   // been mangled by the compaction.
2114 
2115   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2116   // See note about verification in prepare_for_compaction().
2117 }
2118 
2119 void CompactibleFreeListSpace::compact() {
2120   SCAN_AND_COMPACT(obj_size);
2121 }
2122 
2123 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2124 // where fbs is free block sizes
2125 double CompactibleFreeListSpace::flsFrag() const {
2126   size_t itabFree = totalSizeInIndexedFreeLists();
2127   double frag = 0.0;
2128   size_t i;
2129 
2130   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2131     double sz  = i;
2132     frag      += _indexedFreeList[i].count() * (sz * sz);
2133   }
2134 
2135   double totFree = itabFree +
2136                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2137   if (totFree > 0) {
2138     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2139             (totFree * totFree));
2140     frag = (double)1.0  - frag;
2141   } else {
2142     assert(frag == 0.0, "Follows from totFree == 0");
2143   }
2144   return frag;
2145 }
2146 
2147 void CompactibleFreeListSpace::beginSweepFLCensus(
2148   float inter_sweep_current,
2149   float inter_sweep_estimate,
2150   float intra_sweep_estimate) {
2151   assert_locked();
2152   size_t i;
2153   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2154     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2155     if (PrintFLSStatistics > 1) {
2156       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2157     }
2158     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2159     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2160     fl->set_before_sweep(fl->count());
2161     fl->set_bfr_surp(fl->surplus());
2162   }
2163   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2164                                     inter_sweep_current,
2165                                     inter_sweep_estimate,
2166                                     intra_sweep_estimate);
2167 }
2168 
2169 void CompactibleFreeListSpace::setFLSurplus() {
2170   assert_locked();
2171   size_t i;
2172   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2173     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2174     fl->set_surplus(fl->count() -
2175                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2176   }
2177 }
2178 
2179 void CompactibleFreeListSpace::setFLHints() {
2180   assert_locked();
2181   size_t i;
2182   size_t h = IndexSetSize;
2183   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2184     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2185     fl->set_hint(h);
2186     if (fl->surplus() > 0) {
2187       h = i;
2188     }
2189   }
2190 }
2191 
2192 void CompactibleFreeListSpace::clearFLCensus() {
2193   assert_locked();
2194   size_t i;
2195   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2196     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2197     fl->set_prev_sweep(fl->count());
2198     fl->set_coal_births(0);
2199     fl->set_coal_deaths(0);
2200     fl->set_split_births(0);
2201     fl->set_split_deaths(0);
2202   }
2203 }
2204 
2205 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2206   if (PrintFLSStatistics > 0) {
2207     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2208     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2209                            p2i(largestAddr));
2210   }
2211   setFLSurplus();
2212   setFLHints();
2213   if (PrintGC && PrintFLSCensus > 0) {
2214     printFLCensus(sweep_count);
2215   }
2216   clearFLCensus();
2217   assert_locked();
2218   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2219 }
2220 
2221 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2222   if (size < SmallForDictionary) {
2223     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2224     return (fl->coal_desired() < 0) ||
2225            ((int)fl->count() > fl->coal_desired());
2226   } else {
2227     return dictionary()->coal_dict_over_populated(size);
2228   }
2229 }
2230 
2231 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2232   assert(size < SmallForDictionary, "Size too large for indexed list");
2233   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2234   fl->increment_coal_births();
2235   fl->increment_surplus();
2236 }
2237 
2238 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2239   assert(size < SmallForDictionary, "Size too large for indexed list");
2240   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2241   fl->increment_coal_deaths();
2242   fl->decrement_surplus();
2243 }
2244 
2245 void CompactibleFreeListSpace::coalBirth(size_t size) {
2246   if (size  < SmallForDictionary) {
2247     smallCoalBirth(size);
2248   } else {
2249     dictionary()->dict_census_update(size,
2250                                    false /* split */,
2251                                    true /* birth */);
2252   }
2253 }
2254 
2255 void CompactibleFreeListSpace::coalDeath(size_t size) {
2256   if(size  < SmallForDictionary) {
2257     smallCoalDeath(size);
2258   } else {
2259     dictionary()->dict_census_update(size,
2260                                    false /* split */,
2261                                    false /* birth */);
2262   }
2263 }
2264 
2265 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2266   assert(size < SmallForDictionary, "Size too large for indexed list");
2267   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2268   fl->increment_split_births();
2269   fl->increment_surplus();
2270 }
2271 
2272 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2273   assert(size < SmallForDictionary, "Size too large for indexed list");
2274   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2275   fl->increment_split_deaths();
2276   fl->decrement_surplus();
2277 }
2278 
2279 void CompactibleFreeListSpace::split_birth(size_t size) {
2280   if (size  < SmallForDictionary) {
2281     smallSplitBirth(size);
2282   } else {
2283     dictionary()->dict_census_update(size,
2284                                    true /* split */,
2285                                    true /* birth */);
2286   }
2287 }
2288 
2289 void CompactibleFreeListSpace::splitDeath(size_t size) {
2290   if (size  < SmallForDictionary) {
2291     smallSplitDeath(size);
2292   } else {
2293     dictionary()->dict_census_update(size,
2294                                    true /* split */,
2295                                    false /* birth */);
2296   }
2297 }
2298 
2299 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2300   size_t to2 = from - to1;
2301   splitDeath(from);
2302   split_birth(to1);
2303   split_birth(to2);
2304 }
2305 
2306 void CompactibleFreeListSpace::print() const {
2307   print_on(tty);
2308 }
2309 
2310 void CompactibleFreeListSpace::prepare_for_verify() {
2311   assert_locked();
2312   repairLinearAllocationBlocks();
2313   // Verify that the SpoolBlocks look like free blocks of
2314   // appropriate sizes... To be done ...
2315 }
2316 
2317 class VerifyAllBlksClosure: public BlkClosure {
2318  private:
2319   const CompactibleFreeListSpace* _sp;
2320   const MemRegion                 _span;
2321   HeapWord*                       _last_addr;
2322   size_t                          _last_size;
2323   bool                            _last_was_obj;
2324   bool                            _last_was_live;
2325 
2326  public:
2327   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2328     MemRegion span) :  _sp(sp), _span(span),
2329                        _last_addr(NULL), _last_size(0),
2330                        _last_was_obj(false), _last_was_live(false) { }
2331 
2332   virtual size_t do_blk(HeapWord* addr) {
2333     size_t res;
2334     bool   was_obj  = false;
2335     bool   was_live = false;
2336     if (_sp->block_is_obj(addr)) {
2337       was_obj = true;
2338       oop p = oop(addr);
2339       guarantee(p->is_oop(), "Should be an oop");
2340       res = _sp->adjustObjectSize(p->size());
2341       if (_sp->obj_is_alive(addr)) {
2342         was_live = true;
2343         p->verify();
2344       }
2345     } else {
2346       FreeChunk* fc = (FreeChunk*)addr;
2347       res = fc->size();
2348       if (FLSVerifyLists && !fc->cantCoalesce()) {
2349         guarantee(_sp->verify_chunk_in_free_list(fc),
2350                   "Chunk should be on a free list");
2351       }
2352     }
2353     if (res == 0) {
2354       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2355       gclog_or_tty->print_cr(
2356         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2357         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2358         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2359         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2360       _sp->print_on(gclog_or_tty);
2361       guarantee(false, "Seppuku!");
2362     }
2363     _last_addr = addr;
2364     _last_size = res;
2365     _last_was_obj  = was_obj;
2366     _last_was_live = was_live;
2367     return res;
2368   }
2369 };
2370 
2371 class VerifyAllOopsClosure: public OopClosure {
2372  private:
2373   const CMSCollector*             _collector;
2374   const CompactibleFreeListSpace* _sp;
2375   const MemRegion                 _span;
2376   const bool                      _past_remark;
2377   const CMSBitMap*                _bit_map;
2378 
2379  protected:
2380   void do_oop(void* p, oop obj) {
2381     if (_span.contains(obj)) { // the interior oop points into CMS heap
2382       if (!_span.contains(p)) { // reference from outside CMS heap
2383         // Should be a valid object; the first disjunct below allows
2384         // us to sidestep an assertion in block_is_obj() that insists
2385         // that p be in _sp. Note that several generations (and spaces)
2386         // are spanned by _span (CMS heap) above.
2387         guarantee(!_sp->is_in_reserved(obj) ||
2388                   _sp->block_is_obj((HeapWord*)obj),
2389                   "Should be an object");
2390         guarantee(obj->is_oop(), "Should be an oop");
2391         obj->verify();
2392         if (_past_remark) {
2393           // Remark has been completed, the object should be marked
2394           _bit_map->isMarked((HeapWord*)obj);
2395         }
2396       } else { // reference within CMS heap
2397         if (_past_remark) {
2398           // Remark has been completed -- so the referent should have
2399           // been marked, if referring object is.
2400           if (_bit_map->isMarked(_collector->block_start(p))) {
2401             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2402           }
2403         }
2404       }
2405     } else if (_sp->is_in_reserved(p)) {
2406       // the reference is from FLS, and points out of FLS
2407       guarantee(obj->is_oop(), "Should be an oop");
2408       obj->verify();
2409     }
2410   }
2411 
2412   template <class T> void do_oop_work(T* p) {
2413     T heap_oop = oopDesc::load_heap_oop(p);
2414     if (!oopDesc::is_null(heap_oop)) {
2415       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2416       do_oop(p, obj);
2417     }
2418   }
2419 
2420  public:
2421   VerifyAllOopsClosure(const CMSCollector* collector,
2422     const CompactibleFreeListSpace* sp, MemRegion span,
2423     bool past_remark, CMSBitMap* bit_map) :
2424     _collector(collector), _sp(sp), _span(span),
2425     _past_remark(past_remark), _bit_map(bit_map) { }
2426 
2427   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2428   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2429 };
2430 
2431 void CompactibleFreeListSpace::verify() const {
2432   assert_lock_strong(&_freelistLock);
2433   verify_objects_initialized();
2434   MemRegion span = _collector->_span;
2435   bool past_remark = (_collector->abstract_state() ==
2436                       CMSCollector::Sweeping);
2437 
2438   ResourceMark rm;
2439   HandleMark  hm;
2440 
2441   // Check integrity of CFL data structures
2442   _promoInfo.verify();
2443   _dictionary->verify();
2444   if (FLSVerifyIndexTable) {
2445     verifyIndexedFreeLists();
2446   }
2447   // Check integrity of all objects and free blocks in space
2448   {
2449     VerifyAllBlksClosure cl(this, span);
2450     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2451   }
2452   // Check that all references in the heap to FLS
2453   // are to valid objects in FLS or that references in
2454   // FLS are to valid objects elsewhere in the heap
2455   if (FLSVerifyAllHeapReferences)
2456   {
2457     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2458       _collector->markBitMap());
2459     CollectedHeap* ch = Universe::heap();
2460 
2461     // Iterate over all oops in the heap. Uses the _no_header version
2462     // since we are not interested in following the klass pointers.
2463     ch->oop_iterate_no_header(&cl);
2464   }
2465 
2466   if (VerifyObjectStartArray) {
2467     // Verify the block offset table
2468     _bt.verify();
2469   }
2470 }
2471 
2472 #ifndef PRODUCT
2473 void CompactibleFreeListSpace::verifyFreeLists() const {
2474   if (FLSVerifyLists) {
2475     _dictionary->verify();
2476     verifyIndexedFreeLists();
2477   } else {
2478     if (FLSVerifyDictionary) {
2479       _dictionary->verify();
2480     }
2481     if (FLSVerifyIndexTable) {
2482       verifyIndexedFreeLists();
2483     }
2484   }
2485 }
2486 #endif
2487 
2488 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2489   size_t i = 0;
2490   for (; i < IndexSetStart; i++) {
2491     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2492   }
2493   for (; i < IndexSetSize; i++) {
2494     verifyIndexedFreeList(i);
2495   }
2496 }
2497 
2498 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2499   FreeChunk* fc   =  _indexedFreeList[size].head();
2500   FreeChunk* tail =  _indexedFreeList[size].tail();
2501   size_t    num = _indexedFreeList[size].count();
2502   size_t      n = 0;
2503   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2504             "Slot should have been empty");
2505   for (; fc != NULL; fc = fc->next(), n++) {
2506     guarantee(fc->size() == size, "Size inconsistency");
2507     guarantee(fc->is_free(), "!free?");
2508     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2509     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2510   }
2511   guarantee(n == num, "Incorrect count");
2512 }
2513 
2514 #ifndef PRODUCT
2515 void CompactibleFreeListSpace::check_free_list_consistency() const {
2516   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2517     "Some sizes can't be allocated without recourse to"
2518     " linear allocation buffers");
2519   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2520     "else MIN_TREE_CHUNK_SIZE is wrong");
2521   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2522   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2523 }
2524 #endif
2525 
2526 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2527   assert_lock_strong(&_freelistLock);
2528   AdaptiveFreeList<FreeChunk> total;
2529   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2530   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2531   size_t total_free = 0;
2532   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2533     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2534     total_free += fl->count() * fl->size();
2535     if (i % (40*IndexSetStride) == 0) {
2536       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2537     }
2538     fl->print_on(gclog_or_tty);
2539     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2540     total.set_surplus(    total.surplus()     + fl->surplus()    );
2541     total.set_desired(    total.desired()     + fl->desired()    );
2542     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2543     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2544     total.set_count(      total.count()       + fl->count()      );
2545     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2546     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2547     total.set_split_births(total.split_births() + fl->split_births());
2548     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2549   }
2550   total.print_on(gclog_or_tty, "TOTAL");
2551   gclog_or_tty->print_cr("Total free in indexed lists "
2552                          SIZE_FORMAT " words", total_free);
2553   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2554     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2555             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2556     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2557   _dictionary->print_dict_census();
2558 }
2559 
2560 ///////////////////////////////////////////////////////////////////////////
2561 // CFLS_LAB
2562 ///////////////////////////////////////////////////////////////////////////
2563 
2564 #define VECTOR_257(x)                                                                                  \
2565   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2566   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2567      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2568      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2569      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2570      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2571      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2572      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2573      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2574      x }
2575 
2576 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2577 // OldPLABSize, whose static default is different; if overridden at the
2578 // command-line, this will get reinitialized via a call to
2579 // modify_initialization() below.
2580 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2581   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2582 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2583 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2584 
2585 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2586   _cfls(cfls)
2587 {
2588   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2589   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2590        i < CompactibleFreeListSpace::IndexSetSize;
2591        i += CompactibleFreeListSpace::IndexSetStride) {
2592     _indexedFreeList[i].set_size(i);
2593     _num_blocks[i] = 0;
2594   }
2595 }
2596 
2597 static bool _CFLS_LAB_modified = false;
2598 
2599 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2600   assert(!_CFLS_LAB_modified, "Call only once");
2601   _CFLS_LAB_modified = true;
2602   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2603        i < CompactibleFreeListSpace::IndexSetSize;
2604        i += CompactibleFreeListSpace::IndexSetStride) {
2605     _blocks_to_claim[i].modify(n, wt, true /* force */);
2606   }
2607 }
2608 
2609 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2610   FreeChunk* res;
2611   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2612   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2613     // This locking manages sync with other large object allocations.
2614     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2615                     Mutex::_no_safepoint_check_flag);
2616     res = _cfls->getChunkFromDictionaryExact(word_sz);
2617     if (res == NULL) return NULL;
2618   } else {
2619     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2620     if (fl->count() == 0) {
2621       // Attempt to refill this local free list.
2622       get_from_global_pool(word_sz, fl);
2623       // If it didn't work, give up.
2624       if (fl->count() == 0) return NULL;
2625     }
2626     res = fl->get_chunk_at_head();
2627     assert(res != NULL, "Why was count non-zero?");
2628   }
2629   res->markNotFree();
2630   assert(!res->is_free(), "shouldn't be marked free");
2631   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2632   // mangle a just allocated object with a distinct pattern.
2633   debug_only(res->mangleAllocated(word_sz));
2634   return (HeapWord*)res;
2635 }
2636 
2637 // Get a chunk of blocks of the right size and update related
2638 // book-keeping stats
2639 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2640   // Get the #blocks we want to claim
2641   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2642   assert(n_blks > 0, "Error");
2643   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2644   // In some cases, when the application has a phase change,
2645   // there may be a sudden and sharp shift in the object survival
2646   // profile, and updating the counts at the end of a scavenge
2647   // may not be quick enough, giving rise to large scavenge pauses
2648   // during these phase changes. It is beneficial to detect such
2649   // changes on-the-fly during a scavenge and avoid such a phase-change
2650   // pothole. The following code is a heuristic attempt to do that.
2651   // It is protected by a product flag until we have gained
2652   // enough experience with this heuristic and fine-tuned its behavior.
2653   // WARNING: This might increase fragmentation if we overreact to
2654   // small spikes, so some kind of historical smoothing based on
2655   // previous experience with the greater reactivity might be useful.
2656   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2657   // default.
2658   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2659     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2660     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2661     n_blks = MIN2(n_blks, CMSOldPLABMax);
2662   }
2663   assert(n_blks > 0, "Error");
2664   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2665   // Update stats table entry for this block size
2666   _num_blocks[word_sz] += fl->count();
2667 }
2668 
2669 void CFLS_LAB::compute_desired_plab_size() {
2670   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2671        i < CompactibleFreeListSpace::IndexSetSize;
2672        i += CompactibleFreeListSpace::IndexSetStride) {
2673     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2674            "Counter inconsistency");
2675     if (_global_num_workers[i] > 0) {
2676       // Need to smooth wrt historical average
2677       if (ResizeOldPLAB) {
2678         _blocks_to_claim[i].sample(
2679           MAX2((size_t)CMSOldPLABMin,
2680           MIN2((size_t)CMSOldPLABMax,
2681                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2682       }
2683       // Reset counters for next round
2684       _global_num_workers[i] = 0;
2685       _global_num_blocks[i] = 0;
2686       if (PrintOldPLAB) {
2687         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2688                                i, (size_t)_blocks_to_claim[i].average());
2689       }
2690     }
2691   }
2692 }
2693 
2694 // If this is changed in the future to allow parallel
2695 // access, one would need to take the FL locks and,
2696 // depending on how it is used, stagger access from
2697 // parallel threads to reduce contention.
2698 void CFLS_LAB::retire(int tid) {
2699   // We run this single threaded with the world stopped;
2700   // so no need for locks and such.
2701   NOT_PRODUCT(Thread* t = Thread::current();)
2702   assert(Thread::current()->is_VM_thread(), "Error");
2703   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2704        i < CompactibleFreeListSpace::IndexSetSize;
2705        i += CompactibleFreeListSpace::IndexSetStride) {
2706     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2707            "Can't retire more than what we obtained");
2708     if (_num_blocks[i] > 0) {
2709       size_t num_retire =  _indexedFreeList[i].count();
2710       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2711       {
2712         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2713         //                Mutex::_no_safepoint_check_flag);
2714 
2715         // Update globals stats for num_blocks used
2716         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2717         _global_num_workers[i]++;
2718         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2719         if (num_retire > 0) {
2720           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2721           // Reset this list.
2722           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2723           _indexedFreeList[i].set_size(i);
2724         }
2725       }
2726       if (PrintOldPLAB) {
2727         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2728                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2729       }
2730       // Reset stats for next round
2731       _num_blocks[i]         = 0;
2732     }
2733   }
2734 }
2735 
2736 // Used by par_get_chunk_of_blocks() for the chunks from the
2737 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2738 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2739 // to the free list "fl".  "n" is the maximum number of chunks to
2740 // be added to "fl".
2741 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2742 
2743   // We'll try all multiples of word_sz in the indexed set, starting with
2744   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2745   // then try getting a big chunk and splitting it.
2746   {
2747     bool found;
2748     int  k;
2749     size_t cur_sz;
2750     for (k = 1, cur_sz = k * word_sz, found = false;
2751          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2752          (CMSSplitIndexedFreeListBlocks || k <= 1);
2753          k++, cur_sz = k * word_sz) {
2754       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2755       fl_for_cur_sz.set_size(cur_sz);
2756       {
2757         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2758                         Mutex::_no_safepoint_check_flag);
2759         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2760         if (gfl->count() != 0) {
2761           // nn is the number of chunks of size cur_sz that
2762           // we'd need to split k-ways each, in order to create
2763           // "n" chunks of size word_sz each.
2764           const size_t nn = MAX2(n/k, (size_t)1);
2765           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2766           found = true;
2767           if (k > 1) {
2768             // Update split death stats for the cur_sz-size blocks list:
2769             // we increment the split death count by the number of blocks
2770             // we just took from the cur_sz-size blocks list and which
2771             // we will be splitting below.
2772             ssize_t deaths = gfl->split_deaths() +
2773                              fl_for_cur_sz.count();
2774             gfl->set_split_deaths(deaths);
2775           }
2776         }
2777       }
2778       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2779       if (found) {
2780         if (k == 1) {
2781           fl->prepend(&fl_for_cur_sz);
2782         } else {
2783           // Divide each block on fl_for_cur_sz up k ways.
2784           FreeChunk* fc;
2785           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2786             // Must do this in reverse order, so that anybody attempting to
2787             // access the main chunk sees it as a single free block until we
2788             // change it.
2789             size_t fc_size = fc->size();
2790             assert(fc->is_free(), "Error");
2791             for (int i = k-1; i >= 0; i--) {
2792               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2793               assert((i != 0) ||
2794                         ((fc == ffc) && ffc->is_free() &&
2795                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2796                         "Counting error");
2797               ffc->set_size(word_sz);
2798               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2799               ffc->link_next(NULL);
2800               // Above must occur before BOT is updated below.
2801               OrderAccess::storestore();
2802               // splitting from the right, fc_size == i * word_sz
2803               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2804               fc_size -= word_sz;
2805               assert(fc_size == i*word_sz, "Error");
2806               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2807               _bt.verify_single_block((HeapWord*)fc, fc_size);
2808               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2809               // Push this on "fl".
2810               fl->return_chunk_at_head(ffc);
2811             }
2812             // TRAP
2813             assert(fl->tail()->next() == NULL, "List invariant.");
2814           }
2815         }
2816         // Update birth stats for this block size.
2817         size_t num = fl->count();
2818         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2819                         Mutex::_no_safepoint_check_flag);
2820         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2821         _indexedFreeList[word_sz].set_split_births(births);
2822         return true;
2823       }
2824     }
2825     return found;
2826   }
2827 }
2828 
2829 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2830 
2831   FreeChunk* fc = NULL;
2832   FreeChunk* rem_fc = NULL;
2833   size_t rem;
2834   {
2835     MutexLockerEx x(parDictionaryAllocLock(),
2836                     Mutex::_no_safepoint_check_flag);
2837     while (n > 0) {
2838       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2839                                   FreeBlockDictionary<FreeChunk>::atLeast);
2840       if (fc != NULL) {
2841         break;
2842       } else {
2843         n--;
2844       }
2845     }
2846     if (fc == NULL) return NULL;
2847     // Otherwise, split up that block.
2848     assert((ssize_t)n >= 1, "Control point invariant");
2849     assert(fc->is_free(), "Error: should be a free block");
2850     _bt.verify_single_block((HeapWord*)fc, fc->size());
2851     const size_t nn = fc->size() / word_sz;
2852     n = MIN2(nn, n);
2853     assert((ssize_t)n >= 1, "Control point invariant");
2854     rem = fc->size() - n * word_sz;
2855     // If there is a remainder, and it's too small, allocate one fewer.
2856     if (rem > 0 && rem < MinChunkSize) {
2857       n--; rem += word_sz;
2858     }
2859     // Note that at this point we may have n == 0.
2860     assert((ssize_t)n >= 0, "Control point invariant");
2861 
2862     // If n is 0, the chunk fc that was found is not large
2863     // enough to leave a viable remainder.  We are unable to
2864     // allocate even one block.  Return fc to the
2865     // dictionary and return, leaving "fl" empty.
2866     if (n == 0) {
2867       returnChunkToDictionary(fc);
2868       return NULL;
2869     }
2870 
2871     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2872     dictionary()->dict_census_update(fc->size(),
2873                                      true /*split*/,
2874                                      false /*birth*/);
2875 
2876     // First return the remainder, if any.
2877     // Note that we hold the lock until we decide if we're going to give
2878     // back the remainder to the dictionary, since a concurrent allocation
2879     // may otherwise see the heap as empty.  (We're willing to take that
2880     // hit if the block is a small block.)
2881     if (rem > 0) {
2882       size_t prefix_size = n * word_sz;
2883       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2884       rem_fc->set_size(rem);
2885       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2886       rem_fc->link_next(NULL);
2887       // Above must occur before BOT is updated below.
2888       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2889       OrderAccess::storestore();
2890       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2891       assert(fc->is_free(), "Error");
2892       fc->set_size(prefix_size);
2893       if (rem >= IndexSetSize) {
2894         returnChunkToDictionary(rem_fc);
2895         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2896         rem_fc = NULL;
2897       }
2898       // Otherwise, return it to the small list below.
2899     }
2900   }
2901   if (rem_fc != NULL) {
2902     MutexLockerEx x(_indexedFreeListParLocks[rem],
2903                     Mutex::_no_safepoint_check_flag);
2904     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2905     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2906     smallSplitBirth(rem);
2907   }
2908   assert(n * word_sz == fc->size(),
2909     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2910     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2911     fc->size(), n, word_sz));
2912   return fc;
2913 }
2914 
2915 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2916 
2917   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2918 
2919   if (fc == NULL) {
2920     return;
2921   }
2922 
2923   size_t n = fc->size() / word_sz;
2924 
2925   assert((ssize_t)n > 0, "Consistency");
2926   // Now do the splitting up.
2927   // Must do this in reverse order, so that anybody attempting to
2928   // access the main chunk sees it as a single free block until we
2929   // change it.
2930   size_t fc_size = n * word_sz;
2931   // All but first chunk in this loop
2932   for (ssize_t i = n-1; i > 0; i--) {
2933     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2934     ffc->set_size(word_sz);
2935     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2936     ffc->link_next(NULL);
2937     // Above must occur before BOT is updated below.
2938     OrderAccess::storestore();
2939     // splitting from the right, fc_size == (n - i + 1) * wordsize
2940     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2941     fc_size -= word_sz;
2942     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2943     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2944     _bt.verify_single_block((HeapWord*)fc, fc_size);
2945     // Push this on "fl".
2946     fl->return_chunk_at_head(ffc);
2947   }
2948   // First chunk
2949   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2950   // The blocks above should show their new sizes before the first block below
2951   fc->set_size(word_sz);
2952   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2953   fc->link_next(NULL);
2954   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2955   _bt.verify_single_block((HeapWord*)fc, fc->size());
2956   fl->return_chunk_at_head(fc);
2957 
2958   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2959   {
2960     // Update the stats for this block size.
2961     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2962                     Mutex::_no_safepoint_check_flag);
2963     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2964     _indexedFreeList[word_sz].set_split_births(births);
2965     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2966     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2967   }
2968 
2969   // TRAP
2970   assert(fl->tail()->next() == NULL, "List invariant.");
2971 }
2972 
2973 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2974   assert(fl->count() == 0, "Precondition.");
2975   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2976          "Precondition");
2977 
2978   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2979     // Got it
2980     return;
2981   }
2982 
2983   // Otherwise, we'll split a block from the dictionary.
2984   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2985 }
2986 
2987 // Set up the space's par_seq_tasks structure for work claiming
2988 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2989 // XXX Need to suitably abstract and generalize this and the next
2990 // method into one.
2991 void
2992 CompactibleFreeListSpace::
2993 initialize_sequential_subtasks_for_rescan(int n_threads) {
2994   // The "size" of each task is fixed according to rescan_task_size.
2995   assert(n_threads > 0, "Unexpected n_threads argument");
2996   const size_t task_size = rescan_task_size();
2997   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2998   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2999   assert(n_tasks == 0 ||
3000          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3001           (used_region().start() + n_tasks*task_size >= used_region().end())),
3002          "n_tasks calculation incorrect");
3003   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3004   assert(!pst->valid(), "Clobbering existing data?");
3005   // Sets the condition for completion of the subtask (how many threads
3006   // need to finish in order to be done).
3007   pst->set_n_threads(n_threads);
3008   pst->set_n_tasks((int)n_tasks);
3009 }
3010 
3011 // Set up the space's par_seq_tasks structure for work claiming
3012 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3013 void
3014 CompactibleFreeListSpace::
3015 initialize_sequential_subtasks_for_marking(int n_threads,
3016                                            HeapWord* low) {
3017   // The "size" of each task is fixed according to rescan_task_size.
3018   assert(n_threads > 0, "Unexpected n_threads argument");
3019   const size_t task_size = marking_task_size();
3020   assert(task_size > CardTableModRefBS::card_size_in_words &&
3021          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3022          "Otherwise arithmetic below would be incorrect");
3023   MemRegion span = _gen->reserved();
3024   if (low != NULL) {
3025     if (span.contains(low)) {
3026       // Align low down to  a card boundary so that
3027       // we can use block_offset_careful() on span boundaries.
3028       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3029                                  CardTableModRefBS::card_size);
3030       // Clip span prefix at aligned_low
3031       span = span.intersection(MemRegion(aligned_low, span.end()));
3032     } else if (low > span.end()) {
3033       span = MemRegion(low, low);  // Null region
3034     } // else use entire span
3035   }
3036   assert(span.is_empty() ||
3037          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3038         "span should start at a card boundary");
3039   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3040   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3041   assert(n_tasks == 0 ||
3042          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3043           (span.start() + n_tasks*task_size >= span.end())),
3044          "n_tasks calculation incorrect");
3045   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3046   assert(!pst->valid(), "Clobbering existing data?");
3047   // Sets the condition for completion of the subtask (how many threads
3048   // need to finish in order to be done).
3049   pst->set_n_threads(n_threads);
3050   pst->set_n_tasks((int)n_tasks);
3051 }