1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94          "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 105       _dictionary = new AFLBinaryTreeDictionary(mr);
 106       break;
 107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 109     default:
 110       warning("dictionaryChoice: selected option not understood; using"
 111               " default BinaryTreeDictionary implementation instead.");
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 126                                            FreeBlockDictionary<FreeChunk>::atLeast);
 127     // The small linAB initially has all the space and will allocate
 128     // a chunk of any size.
 129     HeapWord* addr = (HeapWord*) fc;
 130     _smallLinearAllocBlock.set(addr, fc->size() ,
 131       1024*SmallForLinearAlloc, fc->size());
 132     // Note that _unallocated_block is not updated here.
 133     // Allocations from the linear allocation block should
 134     // update it.
 135   } else {
 136     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 137                                SmallForLinearAlloc);
 138   }
 139   // CMSIndexedFreeListReplenish should be at least 1
 140   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 141   _promoInfo.setSpace(this);
 142   if (UseCMSBestFit) {
 143     _fitStrategy = FreeBlockBestFitFirst;
 144   } else {
 145     _fitStrategy = FreeBlockStrategyNone;
 146   }
 147   check_free_list_consistency();
 148 
 149   // Initialize locks for parallel case.
 150 
 151   if (CollectedHeap::use_parallel_gc_threads()) {
 152     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                               "a freelist par lock",
 155                                               true);
 156       DEBUG_ONLY(
 157         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 158       )
 159     }
 160     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 161   }
 162 }
 163 
 164 // Like CompactibleSpace forward() but always calls cross_threshold() to
 165 // update the block offset table.  Removed initialize_threshold call because
 166 // CFLS does not use a block offset array for contiguous spaces.
 167 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 168                                     CompactPoint* cp, HeapWord* compact_top) {
 169   // q is alive
 170   // First check if we should switch compaction space
 171   assert(this == cp->space, "'this' should be current compaction space.");
 172   size_t compaction_max_size = pointer_delta(end(), compact_top);
 173   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 174     "virtual adjustObjectSize_v() method is not correct");
 175   size_t adjusted_size = adjustObjectSize(size);
 176   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 177          "no small fragments allowed");
 178   assert(minimum_free_block_size() == MinChunkSize,
 179          "for de-virtualized reference below");
 180   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 181   if (adjusted_size + MinChunkSize > compaction_max_size &&
 182       adjusted_size != compaction_max_size) {
 183     do {
 184       // switch to next compaction space
 185       cp->space->set_compaction_top(compact_top);
 186       cp->space = cp->space->next_compaction_space();
 187       if (cp->space == NULL) {
 188         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 189         assert(cp->gen != NULL, "compaction must succeed");
 190         cp->space = cp->gen->first_compaction_space();
 191         assert(cp->space != NULL, "generation must have a first compaction space");
 192       }
 193       compact_top = cp->space->bottom();
 194       cp->space->set_compaction_top(compact_top);
 195       // The correct adjusted_size may not be the same as that for this method
 196       // (i.e., cp->space may no longer be "this" so adjust the size again.
 197       // Use the virtual method which is not used above to save the virtual
 198       // dispatch.
 199       adjusted_size = cp->space->adjust_object_size_v(size);
 200       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 201       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 202     } while (adjusted_size > compaction_max_size);
 203   }
 204 
 205   // store the forwarding pointer into the mark word
 206   if ((HeapWord*)q != compact_top) {
 207     q->forward_to(oop(compact_top));
 208     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 209   } else {
 210     // if the object isn't moving we can just set the mark to the default
 211     // mark and handle it specially later on.
 212     q->init_mark();
 213     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 214   }
 215 
 216   compact_top += adjusted_size;
 217 
 218   // we need to update the offset table so that the beginnings of objects can be
 219   // found during scavenge.  Note that we are updating the offset table based on
 220   // where the object will be once the compaction phase finishes.
 221 
 222   // Always call cross_threshold().  A contiguous space can only call it when
 223   // the compaction_top exceeds the current threshold but not for an
 224   // non-contiguous space.
 225   cp->threshold =
 226     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 227   return compact_top;
 228 }
 229 
 230 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 231 // and use of single_block instead of alloc_block.  The name here is not really
 232 // appropriate - maybe a more general name could be invented for both the
 233 // contiguous and noncontiguous spaces.
 234 
 235 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 236   _bt.single_block(start, the_end);
 237   return end();
 238 }
 239 
 240 // Initialize them to NULL.
 241 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 242   for (size_t i = 0; i < IndexSetSize; i++) {
 243     // Note that on platforms where objects are double word aligned,
 244     // the odd array elements are not used.  It is convenient, however,
 245     // to map directly from the object size to the array element.
 246     _indexedFreeList[i].reset(IndexSetSize);
 247     _indexedFreeList[i].set_size(i);
 248     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 249     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 250     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 252   }
 253 }
 254 
 255 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 256   for (size_t i = 1; i < IndexSetSize; i++) {
 257     assert(_indexedFreeList[i].size() == (size_t) i,
 258       "Indexed free list sizes are incorrect");
 259     _indexedFreeList[i].reset(IndexSetSize);
 260     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 261     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 262     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 264   }
 265 }
 266 
 267 void CompactibleFreeListSpace::reset(MemRegion mr) {
 268   resetIndexedFreeListArray();
 269   dictionary()->reset();
 270   if (BlockOffsetArrayUseUnallocatedBlock) {
 271     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 272     // Everything's allocated until proven otherwise.
 273     _bt.set_unallocated_block(end());
 274   }
 275   if (!mr.is_empty()) {
 276     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 277     _bt.single_block(mr.start(), mr.word_size());
 278     FreeChunk* fc = (FreeChunk*) mr.start();
 279     fc->set_size(mr.word_size());
 280     if (mr.word_size() >= IndexSetSize ) {
 281       returnChunkToDictionary(fc);
 282     } else {
 283       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 284       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 285     }
 286     coalBirth(mr.word_size());
 287   }
 288   _promoInfo.reset();
 289   _smallLinearAllocBlock._ptr = NULL;
 290   _smallLinearAllocBlock._word_size = 0;
 291 }
 292 
 293 void CompactibleFreeListSpace::reset_after_compaction() {
 294   // Reset the space to the new reality - one free chunk.
 295   MemRegion mr(compaction_top(), end());
 296   reset(mr);
 297   // Now refill the linear allocation block(s) if possible.
 298   if (_adaptive_freelists) {
 299     refillLinearAllocBlocksIfNeeded();
 300   } else {
 301     // Place as much of mr in the linAB as we can get,
 302     // provided it was big enough to go into the dictionary.
 303     FreeChunk* fc = dictionary()->find_largest_dict();
 304     if (fc != NULL) {
 305       assert(fc->size() == mr.word_size(),
 306              "Why was the chunk broken up?");
 307       removeChunkFromDictionary(fc);
 308       HeapWord* addr = (HeapWord*) fc;
 309       _smallLinearAllocBlock.set(addr, fc->size() ,
 310         1024*SmallForLinearAlloc, fc->size());
 311       // Note that _unallocated_block is not updated here.
 312     }
 313   }
 314 }
 315 
 316 // Walks the entire dictionary, returning a coterminal
 317 // chunk, if it exists. Use with caution since it involves
 318 // a potentially complete walk of a potentially large tree.
 319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 320 
 321   assert_lock_strong(&_freelistLock);
 322 
 323   return dictionary()->find_chunk_ends_at(end());
 324 }
 325 
 326 
 327 #ifndef PRODUCT
 328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 329   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 330     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 331   }
 332 }
 333 
 334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 335   size_t sum = 0;
 336   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 337     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 338   }
 339   return sum;
 340 }
 341 
 342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 343   size_t count = 0;
 344   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 345     debug_only(
 346       ssize_t total_list_count = 0;
 347       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 348          fc = fc->next()) {
 349         total_list_count++;
 350       }
 351       assert(total_list_count ==  _indexedFreeList[i].count(),
 352         "Count in list is incorrect");
 353     )
 354     count += _indexedFreeList[i].count();
 355   }
 356   return count;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::totalCount() {
 360   size_t num = totalCountInIndexedFreeLists();
 361   num +=  dictionary()->total_count();
 362   if (_smallLinearAllocBlock._word_size != 0) {
 363     num++;
 364   }
 365   return num;
 366 }
 367 #endif
 368 
 369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 370   FreeChunk* fc = (FreeChunk*) p;
 371   return fc->is_free();
 372 }
 373 
 374 size_t CompactibleFreeListSpace::used() const {
 375   return capacity() - free();
 376 }
 377 
 378 size_t CompactibleFreeListSpace::free() const {
 379   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 380   // if you do this while the structures are in flux you
 381   // may get an approximate answer only; for instance
 382   // because there is concurrent allocation either
 383   // directly by mutators or for promotion during a GC.
 384   // It's "MT-safe", however, in the sense that you are guaranteed
 385   // not to crash and burn, for instance, because of walking
 386   // pointers that could disappear as you were walking them.
 387   // The approximation is because the various components
 388   // that are read below are not read atomically (and
 389   // further the computation of totalSizeInIndexedFreeLists()
 390   // is itself a non-atomic computation. The normal use of
 391   // this is during a resize operation at the end of GC
 392   // and at that time you are guaranteed to get the
 393   // correct actual value. However, for instance, this is
 394   // also read completely asynchronously by the "perf-sampler"
 395   // that supports jvmstat, and you are apt to see the values
 396   // flicker in such cases.
 397   assert(_dictionary != NULL, "No _dictionary?");
 398   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 399           totalSizeInIndexedFreeLists() +
 400           _smallLinearAllocBlock._word_size) * HeapWordSize;
 401 }
 402 
 403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 404   assert(_dictionary != NULL, "No _dictionary?");
 405   assert_locked();
 406   size_t res = _dictionary->max_chunk_size();
 407   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 408                        (size_t) SmallForLinearAlloc - 1));
 409   // XXX the following could potentially be pretty slow;
 410   // should one, pesimally for the rare cases when res
 411   // caclulated above is less than IndexSetSize,
 412   // just return res calculated above? My reasoning was that
 413   // those cases will be so rare that the extra time spent doesn't
 414   // really matter....
 415   // Note: do not change the loop test i >= res + IndexSetStride
 416   // to i > res below, because i is unsigned and res may be zero.
 417   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 418        i -= IndexSetStride) {
 419     if (_indexedFreeList[i].head() != NULL) {
 420       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 421       return i;
 422     }
 423   }
 424   return res;
 425 }
 426 
 427 void LinearAllocBlock::print_on(outputStream* st) const {
 428   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 429             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 430             _ptr, _word_size, _refillSize, _allocation_size_limit);
 431 }
 432 
 433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 434   st->print_cr("COMPACTIBLE FREELIST SPACE");
 435   st->print_cr(" Space:");
 436   Space::print_on(st);
 437 
 438   st->print_cr("promoInfo:");
 439   _promoInfo.print_on(st);
 440 
 441   st->print_cr("_smallLinearAllocBlock");
 442   _smallLinearAllocBlock.print_on(st);
 443 
 444   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 445 
 446   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 447                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 448 }
 449 
 450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 451 const {
 452   reportIndexedFreeListStatistics();
 453   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 454   gclog_or_tty->print_cr("---------------------------");
 455   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 456   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 457     _indexedFreeList[i].print_on(gclog_or_tty);
 458     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 459          fc = fc->next()) {
 460       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 461                           fc, (HeapWord*)fc + i,
 462                           fc->cantCoalesce() ? "\t CC" : "");
 463     }
 464   }
 465 }
 466 
 467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 468 const {
 469   _promoInfo.print_on(st);
 470 }
 471 
 472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 473 const {
 474   _dictionary->report_statistics();
 475   st->print_cr("Layout of Freelists in Tree");
 476   st->print_cr("---------------------------");
 477   _dictionary->print_free_lists(st);
 478 }
 479 
 480 class BlkPrintingClosure: public BlkClosure {
 481   const CMSCollector*             _collector;
 482   const CompactibleFreeListSpace* _sp;
 483   const CMSBitMap*                _live_bit_map;
 484   const bool                      _post_remark;
 485   outputStream*                   _st;
 486 public:
 487   BlkPrintingClosure(const CMSCollector* collector,
 488                      const CompactibleFreeListSpace* sp,
 489                      const CMSBitMap* live_bit_map,
 490                      outputStream* st):
 491     _collector(collector),
 492     _sp(sp),
 493     _live_bit_map(live_bit_map),
 494     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 495     _st(st) { }
 496   size_t do_blk(HeapWord* addr);
 497 };
 498 
 499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 500   size_t sz = _sp->block_size_no_stall(addr, _collector);
 501   assert(sz != 0, "Should always be able to compute a size");
 502   if (_sp->block_is_obj(addr)) {
 503     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 504     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 505       addr,
 506       dead ? "dead" : "live",
 507       sz,
 508       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 509     if (CMSPrintObjectsInDump && !dead) {
 510       oop(addr)->print_on(_st);
 511       _st->print_cr("--------------------------------------");
 512     }
 513   } else { // free block
 514     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 515       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 516     if (CMSPrintChunksInDump) {
 517       ((FreeChunk*)addr)->print_on(_st);
 518       _st->print_cr("--------------------------------------");
 519     }
 520   }
 521   return sz;
 522 }
 523 
 524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 525   outputStream* st) {
 526   st->print_cr("\n=========================");
 527   st->print_cr("Block layout in CMS Heap:");
 528   st->print_cr("=========================");
 529   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 530   blk_iterate(&bpcl);
 531 
 532   st->print_cr("\n=======================================");
 533   st->print_cr("Order & Layout of Promotion Info Blocks");
 534   st->print_cr("=======================================");
 535   print_promo_info_blocks(st);
 536 
 537   st->print_cr("\n===========================");
 538   st->print_cr("Order of Indexed Free Lists");
 539   st->print_cr("=========================");
 540   print_indexed_free_lists(st);
 541 
 542   st->print_cr("\n=================================");
 543   st->print_cr("Order of Free Lists in Dictionary");
 544   st->print_cr("=================================");
 545   print_dictionary_free_lists(st);
 546 }
 547 
 548 
 549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 550   assert_lock_strong(&_freelistLock);
 551   assert(PrintFLSStatistics != 0, "Reporting error");
 552   _dictionary->report_statistics();
 553   if (PrintFLSStatistics > 1) {
 554     reportIndexedFreeListStatistics();
 555     size_t total_size = totalSizeInIndexedFreeLists() +
 556                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 557     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 558   }
 559 }
 560 
 561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 562   assert_lock_strong(&_freelistLock);
 563   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 564                       "--------------------------------\n");
 565   size_t total_size = totalSizeInIndexedFreeLists();
 566   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 567   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 568   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 569   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 570   if (free_blocks != 0) {
 571     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 572   }
 573 }
 574 
 575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 576   size_t res = 0;
 577   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 578     debug_only(
 579       ssize_t recount = 0;
 580       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 581          fc = fc->next()) {
 582         recount += 1;
 583       }
 584       assert(recount == _indexedFreeList[i].count(),
 585         "Incorrect count in list");
 586     )
 587     res += _indexedFreeList[i].count();
 588   }
 589   return res;
 590 }
 591 
 592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 593   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 594     if (_indexedFreeList[i].head() != NULL) {
 595       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 596       return (size_t)i;
 597     }
 598   }
 599   return 0;
 600 }
 601 
 602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 603   HeapWord* prevEnd = end();
 604   assert(prevEnd != value, "unnecessary set_end call");
 605   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 606         "New end is below unallocated block");
 607   _end = value;
 608   if (prevEnd != NULL) {
 609     // Resize the underlying block offset table.
 610     _bt.resize(pointer_delta(value, bottom()));
 611     if (value <= prevEnd) {
 612       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 613              "New end is below unallocated block");
 614     } else {
 615       // Now, take this new chunk and add it to the free blocks.
 616       // Note that the BOT has not yet been updated for this block.
 617       size_t newFcSize = pointer_delta(value, prevEnd);
 618       // XXX This is REALLY UGLY and should be fixed up. XXX
 619       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 620         // Mark the boundary of the new block in BOT
 621         _bt.mark_block(prevEnd, value);
 622         // put it all in the linAB
 623         if (ParallelGCThreads == 0) {
 624           _smallLinearAllocBlock._ptr = prevEnd;
 625           _smallLinearAllocBlock._word_size = newFcSize;
 626           repairLinearAllocBlock(&_smallLinearAllocBlock);
 627         } else { // ParallelGCThreads > 0
 628           MutexLockerEx x(parDictionaryAllocLock(),
 629                           Mutex::_no_safepoint_check_flag);
 630           _smallLinearAllocBlock._ptr = prevEnd;
 631           _smallLinearAllocBlock._word_size = newFcSize;
 632           repairLinearAllocBlock(&_smallLinearAllocBlock);
 633         }
 634         // Births of chunks put into a LinAB are not recorded.  Births
 635         // of chunks as they are allocated out of a LinAB are.
 636       } else {
 637         // Add the block to the free lists, if possible coalescing it
 638         // with the last free block, and update the BOT and census data.
 639         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 640       }
 641     }
 642   }
 643 }
 644 
 645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 646   CompactibleFreeListSpace* _cfls;
 647   CMSCollector* _collector;
 648 protected:
 649   // Override.
 650 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 651   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 652                                        HeapWord* bottom, HeapWord* top, \
 653                                        ClosureType* cl);                \
 654       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 655                                        HeapWord* bottom, HeapWord* top, \
 656                                        ClosureType* cl);                \
 657     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 658                                        HeapWord* bottom, HeapWord* top, \
 659                                        ClosureType* cl)
 660   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 661   walk_mem_region_with_cl_DECL(FilteringClosure);
 662 
 663 public:
 664   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 665                       CMSCollector* collector,
 666                       ExtendedOopClosure* cl,
 667                       CardTableModRefBS::PrecisionStyle precision,
 668                       HeapWord* boundary) :
 669     Filtering_DCTOC(sp, cl, precision, boundary),
 670     _cfls(sp), _collector(collector) {}
 671 };
 672 
 673 // We de-virtualize the block-related calls below, since we know that our
 674 // space is a CompactibleFreeListSpace.
 675 
 676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 678                                                  HeapWord* bottom,              \
 679                                                  HeapWord* top,                 \
 680                                                  ClosureType* cl) {             \
 681    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 682    if (is_par) {                                                                \
 683      assert(SharedHeap::heap()->n_par_threads() ==                              \
 684             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 685      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 686    } else {                                                                     \
 687      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 688    }                                                                            \
 689 }                                                                               \
 690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 691                                                       HeapWord* bottom,         \
 692                                                       HeapWord* top,            \
 693                                                       ClosureType* cl) {        \
 694   /* Skip parts that are before "mr", in case "block_start" sent us             \
 695      back too far. */                                                           \
 696   HeapWord* mr_start = mr.start();                                              \
 697   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 698   HeapWord* next = bottom + bot_size;                                           \
 699   while (next < mr_start) {                                                     \
 700     bottom = next;                                                              \
 701     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 702     next = bottom + bot_size;                                                   \
 703   }                                                                             \
 704                                                                                 \
 705   while (bottom < top) {                                                        \
 706     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 707         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 708                     oop(bottom)) &&                                             \
 709         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 710       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 711       bottom += _cfls->adjustObjectSize(word_sz);                               \
 712     } else {                                                                    \
 713       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 714     }                                                                           \
 715   }                                                                             \
 716 }                                                                               \
 717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 718                                                         HeapWord* bottom,       \
 719                                                         HeapWord* top,          \
 720                                                         ClosureType* cl) {      \
 721   /* Skip parts that are before "mr", in case "block_start" sent us             \
 722      back too far. */                                                           \
 723   HeapWord* mr_start = mr.start();                                              \
 724   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 725   HeapWord* next = bottom + bot_size;                                           \
 726   while (next < mr_start) {                                                     \
 727     bottom = next;                                                              \
 728     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 729     next = bottom + bot_size;                                                   \
 730   }                                                                             \
 731                                                                                 \
 732   while (bottom < top) {                                                        \
 733     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 734         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 735                     oop(bottom)) &&                                             \
 736         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 737       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 738       bottom += _cfls->adjustObjectSize(word_sz);                               \
 739     } else {                                                                    \
 740       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 741     }                                                                           \
 742   }                                                                             \
 743 }
 744 
 745 // (There are only two of these, rather than N, because the split is due
 746 // only to the introduction of the FilteringClosure, a local part of the
 747 // impl of this abstraction.)
 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 750 
 751 DirtyCardToOopClosure*
 752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 753                                       CardTableModRefBS::PrecisionStyle precision,
 754                                       HeapWord* boundary) {
 755   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 756 }
 757 
 758 
 759 // Note on locking for the space iteration functions:
 760 // since the collector's iteration activities are concurrent with
 761 // allocation activities by mutators, absent a suitable mutual exclusion
 762 // mechanism the iterators may go awry. For instace a block being iterated
 763 // may suddenly be allocated or divided up and part of it allocated and
 764 // so on.
 765 
 766 // Apply the given closure to each block in the space.
 767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 768   assert_lock_strong(freelistLock());
 769   HeapWord *cur, *limit;
 770   for (cur = bottom(), limit = end(); cur < limit;
 771        cur += cl->do_blk_careful(cur));
 772 }
 773 
 774 // Apply the given closure to each block in the space.
 775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 776   assert_lock_strong(freelistLock());
 777   HeapWord *cur, *limit;
 778   for (cur = bottom(), limit = end(); cur < limit;
 779        cur += cl->do_blk(cur));
 780 }
 781 
 782 // Apply the given closure to each oop in the space.
 783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 784   assert_lock_strong(freelistLock());
 785   HeapWord *cur, *limit;
 786   size_t curSize;
 787   for (cur = bottom(), limit = end(); cur < limit;
 788        cur += curSize) {
 789     curSize = block_size(cur);
 790     if (block_is_obj(cur)) {
 791       oop(cur)->oop_iterate(cl);
 792     }
 793   }
 794 }
 795 
 796 // Apply the given closure to each oop in the space \intersect memory region.
 797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
 798   assert_lock_strong(freelistLock());
 799   if (is_empty()) {
 800     return;
 801   }
 802   MemRegion cur = MemRegion(bottom(), end());
 803   mr = mr.intersection(cur);
 804   if (mr.is_empty()) {
 805     return;
 806   }
 807   if (mr.equals(cur)) {
 808     oop_iterate(cl);
 809     return;
 810   }
 811   assert(mr.end() <= end(), "just took an intersection above");
 812   HeapWord* obj_addr = block_start(mr.start());
 813   HeapWord* t = mr.end();
 814 
 815   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 816   if (block_is_obj(obj_addr)) {
 817     // Handle first object specially.
 818     oop obj = oop(obj_addr);
 819     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 820   } else {
 821     FreeChunk* fc = (FreeChunk*)obj_addr;
 822     obj_addr += fc->size();
 823   }
 824   while (obj_addr < t) {
 825     HeapWord* obj = obj_addr;
 826     obj_addr += block_size(obj_addr);
 827     // If "obj_addr" is not greater than top, then the
 828     // entire object "obj" is within the region.
 829     if (obj_addr <= t) {
 830       if (block_is_obj(obj)) {
 831         oop(obj)->oop_iterate(cl);
 832       }
 833     } else {
 834       // "obj" extends beyond end of region
 835       if (block_is_obj(obj)) {
 836         oop(obj)->oop_iterate(&smr_blk);
 837       }
 838       break;
 839     }
 840   }
 841 }
 842 
 843 // NOTE: In the following methods, in order to safely be able to
 844 // apply the closure to an object, we need to be sure that the
 845 // object has been initialized. We are guaranteed that an object
 846 // is initialized if we are holding the Heap_lock with the
 847 // world stopped.
 848 void CompactibleFreeListSpace::verify_objects_initialized() const {
 849   if (is_init_completed()) {
 850     assert_locked_or_safepoint(Heap_lock);
 851     if (Universe::is_fully_initialized()) {
 852       guarantee(SafepointSynchronize::is_at_safepoint(),
 853                 "Required for objects to be initialized");
 854     }
 855   } // else make a concession at vm start-up
 856 }
 857 
 858 // Apply the given closure to each object in the space
 859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 860   assert_lock_strong(freelistLock());
 861   NOT_PRODUCT(verify_objects_initialized());
 862   HeapWord *cur, *limit;
 863   size_t curSize;
 864   for (cur = bottom(), limit = end(); cur < limit;
 865        cur += curSize) {
 866     curSize = block_size(cur);
 867     if (block_is_obj(cur)) {
 868       blk->do_object(oop(cur));
 869     }
 870   }
 871 }
 872 
 873 // Apply the given closure to each live object in the space
 874 //   The usage of CompactibleFreeListSpace
 875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 876 // objects in the space with references to objects that are no longer
 877 // valid.  For example, an object may reference another object
 878 // that has already been sweep up (collected).  This method uses
 879 // obj_is_alive() to determine whether it is safe to apply the closure to
 880 // an object.  See obj_is_alive() for details on how liveness of an
 881 // object is decided.
 882 
 883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 884   assert_lock_strong(freelistLock());
 885   NOT_PRODUCT(verify_objects_initialized());
 886   HeapWord *cur, *limit;
 887   size_t curSize;
 888   for (cur = bottom(), limit = end(); cur < limit;
 889        cur += curSize) {
 890     curSize = block_size(cur);
 891     if (block_is_obj(cur) && obj_is_alive(cur)) {
 892       blk->do_object(oop(cur));
 893     }
 894   }
 895 }
 896 
 897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 898                                                   UpwardsObjectClosure* cl) {
 899   assert_locked(freelistLock());
 900   NOT_PRODUCT(verify_objects_initialized());
 901   Space::object_iterate_mem(mr, cl);
 902 }
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 912   assert_lock_strong(freelistLock());
 913   HeapWord *addr, *last;
 914   size_t size;
 915   for (addr = bottom(), last  = end();
 916        addr < last; addr += size) {
 917     FreeChunk* fc = (FreeChunk*)addr;
 918     if (fc->is_free()) {
 919       // Since we hold the free list lock, which protects direct
 920       // allocation in this generation by mutators, a free object
 921       // will remain free throughout this iteration code.
 922       size = fc->size();
 923     } else {
 924       // Note that the object need not necessarily be initialized,
 925       // because (for instance) the free list lock does NOT protect
 926       // object initialization. The closure application below must
 927       // therefore be correct in the face of uninitialized objects.
 928       size = cl->do_object_careful(oop(addr));
 929       if (size == 0) {
 930         // An unparsable object found. Signal early termination.
 931         return addr;
 932       }
 933     }
 934   }
 935   return NULL;
 936 }
 937 
 938 // Callers of this iterator beware: The closure application should
 939 // be robust in the face of uninitialized objects and should (always)
 940 // return a correct size so that the next addr + size below gives us a
 941 // valid block boundary. [See for instance,
 942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 943 // in ConcurrentMarkSweepGeneration.cpp.]
 944 HeapWord*
 945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 946   ObjectClosureCareful* cl) {
 947   assert_lock_strong(freelistLock());
 948   // Can't use used_region() below because it may not necessarily
 949   // be the same as [bottom(),end()); although we could
 950   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 951   // that appears too cumbersome, so we just do the simpler check
 952   // in the assertion below.
 953   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 954          "mr should be non-empty and within used space");
 955   HeapWord *addr, *end;
 956   size_t size;
 957   for (addr = block_start_careful(mr.start()), end  = mr.end();
 958        addr < end; addr += size) {
 959     FreeChunk* fc = (FreeChunk*)addr;
 960     if (fc->is_free()) {
 961       // Since we hold the free list lock, which protects direct
 962       // allocation in this generation by mutators, a free object
 963       // will remain free throughout this iteration code.
 964       size = fc->size();
 965     } else {
 966       // Note that the object need not necessarily be initialized,
 967       // because (for instance) the free list lock does NOT protect
 968       // object initialization. The closure application below must
 969       // therefore be correct in the face of uninitialized objects.
 970       size = cl->do_object_careful_m(oop(addr), mr);
 971       if (size == 0) {
 972         // An unparsable object found. Signal early termination.
 973         return addr;
 974       }
 975     }
 976   }
 977   return NULL;
 978 }
 979 
 980 
 981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 982   NOT_PRODUCT(verify_objects_initialized());
 983   return _bt.block_start(p);
 984 }
 985 
 986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 987   return _bt.block_start_careful(p);
 988 }
 989 
 990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 991   NOT_PRODUCT(verify_objects_initialized());
 992   // This must be volatile, or else there is a danger that the compiler
 993   // will compile the code below into a sometimes-infinite loop, by keeping
 994   // the value read the first time in a register.
 995   while (true) {
 996     // We must do this until we get a consistent view of the object.
 997     if (FreeChunk::indicatesFreeChunk(p)) {
 998       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 999       size_t res = fc->size();
1000 
1001       // Bugfix for systems with weak memory model (PPC64/IA64). The
1002       // block's free bit was set and we have read the size of the
1003       // block. Acquire and check the free bit again. If the block is
1004       // still free, the read size is correct.
1005       OrderAccess::acquire();
1006 
1007       // If the object is still a free chunk, return the size, else it
1008       // has been allocated so try again.
1009       if (FreeChunk::indicatesFreeChunk(p)) {
1010         assert(res != 0, "Block size should not be 0");
1011         return res;
1012       }
1013     } else {
1014       // must read from what 'p' points to in each loop.
1015       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1016       if (k != NULL) {
1017         assert(k->is_klass(), "Should really be klass oop.");
1018         oop o = (oop)p;
1019         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1020 
1021         // Bugfix for systems with weak memory model (PPC64/IA64).
1022         // The object o may be an array. Acquire to make sure that the array
1023         // size (third word) is consistent.
1024         OrderAccess::acquire();
1025 
1026         size_t res = o->size_given_klass(k);
1027         res = adjustObjectSize(res);
1028         assert(res != 0, "Block size should not be 0");
1029         return res;
1030       }
1031     }
1032   }
1033 }
1034 
1035 // TODO: Now that is_parsable is gone, we should combine these two functions.
1036 // A variant of the above that uses the Printezis bits for
1037 // unparsable but allocated objects. This avoids any possible
1038 // stalls waiting for mutators to initialize objects, and is
1039 // thus potentially faster than the variant above. However,
1040 // this variant may return a zero size for a block that is
1041 // under mutation and for which a consistent size cannot be
1042 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1043 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1044                                                      const CMSCollector* c)
1045 const {
1046   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1047   // This must be volatile, or else there is a danger that the compiler
1048   // will compile the code below into a sometimes-infinite loop, by keeping
1049   // the value read the first time in a register.
1050   DEBUG_ONLY(uint loops = 0;)
1051   while (true) {
1052     // We must do this until we get a consistent view of the object.
1053     if (FreeChunk::indicatesFreeChunk(p)) {
1054       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1055       size_t res = fc->size();
1056 
1057       // Bugfix for systems with weak memory model (PPC64/IA64). The
1058       // free bit of the block was set and we have read the size of
1059       // the block. Acquire and check the free bit again. If the
1060       // block is still free, the read size is correct.
1061       OrderAccess::acquire();
1062 
1063       if (FreeChunk::indicatesFreeChunk(p)) {
1064         assert(res != 0, "Block size should not be 0");
1065         assert(loops == 0, "Should be 0");
1066         return res;
1067       }
1068     } else {
1069       // must read from what 'p' points to in each loop.
1070       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1071       // We trust the size of any object that has a non-NULL
1072       // klass and (for those in the perm gen) is parsable
1073       // -- irrespective of its conc_safe-ty.
1074       if (k != NULL) {
1075         assert(k->is_klass(), "Should really be klass oop.");
1076         oop o = (oop)p;
1077         assert(o->is_oop(), "Should be an oop");
1078 
1079         // Bugfix for systems with weak memory model (PPC64/IA64).
1080         // The object o may be an array. Acquire to make sure that the array
1081         // size (third word) is consistent.
1082         OrderAccess::acquire();
1083 
1084         size_t res = o->size_given_klass(k);
1085         res = adjustObjectSize(res);
1086         assert(res != 0, "Block size should not be 0");
1087         return res;
1088       } else {
1089         // May return 0 if P-bits not present.
1090         return c->block_size_if_printezis_bits(p);
1091       }
1092     }
1093     assert(loops == 0, "Can loop at most once");
1094     DEBUG_ONLY(loops++;)
1095   }
1096 }
1097 
1098 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1099   NOT_PRODUCT(verify_objects_initialized());
1100   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1101   FreeChunk* fc = (FreeChunk*)p;
1102   if (fc->is_free()) {
1103     return fc->size();
1104   } else {
1105     // Ignore mark word because this may be a recently promoted
1106     // object whose mark word is used to chain together grey
1107     // objects (the last one would have a null value).
1108     assert(oop(p)->is_oop(true), "Should be an oop");
1109     return adjustObjectSize(oop(p)->size());
1110   }
1111 }
1112 
1113 // This implementation assumes that the property of "being an object" is
1114 // stable.  But being a free chunk may not be (because of parallel
1115 // promotion.)
1116 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1117   FreeChunk* fc = (FreeChunk*)p;
1118   assert(is_in_reserved(p), "Should be in space");
1119   // When doing a mark-sweep-compact of the CMS generation, this
1120   // assertion may fail because prepare_for_compaction() uses
1121   // space that is garbage to maintain information on ranges of
1122   // live objects so that these live ranges can be moved as a whole.
1123   // Comment out this assertion until that problem can be solved
1124   // (i.e., that the block start calculation may look at objects
1125   // at address below "p" in finding the object that contains "p"
1126   // and those objects (if garbage) may have been modified to hold
1127   // live range information.
1128   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1129   //        "Should be a block boundary");
1130   if (FreeChunk::indicatesFreeChunk(p)) return false;
1131   Klass* k = oop(p)->klass_or_null();
1132   if (k != NULL) {
1133     // Ignore mark word because it may have been used to
1134     // chain together promoted objects (the last one
1135     // would have a null value).
1136     assert(oop(p)->is_oop(true), "Should be an oop");
1137     return true;
1138   } else {
1139     return false;  // Was not an object at the start of collection.
1140   }
1141 }
1142 
1143 // Check if the object is alive. This fact is checked either by consulting
1144 // the main marking bitmap in the sweeping phase or, if it's a permanent
1145 // generation and we're not in the sweeping phase, by checking the
1146 // perm_gen_verify_bit_map where we store the "deadness" information if
1147 // we did not sweep the perm gen in the most recent previous GC cycle.
1148 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1149   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1150          "Else races are possible");
1151   assert(block_is_obj(p), "The address should point to an object");
1152 
1153   // If we're sweeping, we use object liveness information from the main bit map
1154   // for both perm gen and old gen.
1155   // We don't need to lock the bitmap (live_map or dead_map below), because
1156   // EITHER we are in the middle of the sweeping phase, and the
1157   // main marking bit map (live_map below) is locked,
1158   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1159   // is stable, because it's mutated only in the sweeping phase.
1160   // NOTE: This method is also used by jmap where, if class unloading is
1161   // off, the results can return "false" for legitimate perm objects,
1162   // when we are not in the midst of a sweeping phase, which can result
1163   // in jmap not reporting certain perm gen objects. This will be moot
1164   // if/when the perm gen goes away in the future.
1165   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1166     CMSBitMap* live_map = _collector->markBitMap();
1167     return live_map->par_isMarked((HeapWord*) p);
1168   }
1169   return true;
1170 }
1171 
1172 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1173   FreeChunk* fc = (FreeChunk*)p;
1174   assert(is_in_reserved(p), "Should be in space");
1175   assert(_bt.block_start(p) == p, "Should be a block boundary");
1176   if (!fc->is_free()) {
1177     // Ignore mark word because it may have been used to
1178     // chain together promoted objects (the last one
1179     // would have a null value).
1180     assert(oop(p)->is_oop(true), "Should be an oop");
1181     return true;
1182   }
1183   return false;
1184 }
1185 
1186 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1187 // approximate answer if you don't hold the freelistlock when you call this.
1188 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1189   size_t size = 0;
1190   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1191     debug_only(
1192       // We may be calling here without the lock in which case we
1193       // won't do this modest sanity check.
1194       if (freelistLock()->owned_by_self()) {
1195         size_t total_list_size = 0;
1196         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1197           fc = fc->next()) {
1198           total_list_size += i;
1199         }
1200         assert(total_list_size == i * _indexedFreeList[i].count(),
1201                "Count in list is incorrect");
1202       }
1203     )
1204     size += i * _indexedFreeList[i].count();
1205   }
1206   return size;
1207 }
1208 
1209 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1210   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1211   return allocate(size);
1212 }
1213 
1214 HeapWord*
1215 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1216   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1217 }
1218 
1219 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1220   assert_lock_strong(freelistLock());
1221   HeapWord* res = NULL;
1222   assert(size == adjustObjectSize(size),
1223          "use adjustObjectSize() before calling into allocate()");
1224 
1225   if (_adaptive_freelists) {
1226     res = allocate_adaptive_freelists(size);
1227   } else {  // non-adaptive free lists
1228     res = allocate_non_adaptive_freelists(size);
1229   }
1230 
1231   if (res != NULL) {
1232     // check that res does lie in this space!
1233     assert(is_in_reserved(res), "Not in this space!");
1234     assert(is_aligned((void*)res), "alignment check");
1235 
1236     FreeChunk* fc = (FreeChunk*)res;
1237     fc->markNotFree();
1238     assert(!fc->is_free(), "shouldn't be marked free");
1239     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1240     // Verify that the block offset table shows this to
1241     // be a single block, but not one which is unallocated.
1242     _bt.verify_single_block(res, size);
1243     _bt.verify_not_unallocated(res, size);
1244     // mangle a just allocated object with a distinct pattern.
1245     debug_only(fc->mangleAllocated(size));
1246   }
1247 
1248   return res;
1249 }
1250 
1251 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1252   HeapWord* res = NULL;
1253   // try and use linear allocation for smaller blocks
1254   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1255     // if successful, the following also adjusts block offset table
1256     res = getChunkFromSmallLinearAllocBlock(size);
1257   }
1258   // Else triage to indexed lists for smaller sizes
1259   if (res == NULL) {
1260     if (size < SmallForDictionary) {
1261       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1262     } else {
1263       // else get it from the big dictionary; if even this doesn't
1264       // work we are out of luck.
1265       res = (HeapWord*)getChunkFromDictionaryExact(size);
1266     }
1267   }
1268 
1269   return res;
1270 }
1271 
1272 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1273   assert_lock_strong(freelistLock());
1274   HeapWord* res = NULL;
1275   assert(size == adjustObjectSize(size),
1276          "use adjustObjectSize() before calling into allocate()");
1277 
1278   // Strategy
1279   //   if small
1280   //     exact size from small object indexed list if small
1281   //     small or large linear allocation block (linAB) as appropriate
1282   //     take from lists of greater sized chunks
1283   //   else
1284   //     dictionary
1285   //     small or large linear allocation block if it has the space
1286   // Try allocating exact size from indexTable first
1287   if (size < IndexSetSize) {
1288     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1289     if(res != NULL) {
1290       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1291         "Not removed from free list");
1292       // no block offset table adjustment is necessary on blocks in
1293       // the indexed lists.
1294 
1295     // Try allocating from the small LinAB
1296     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1297         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1298         // if successful, the above also adjusts block offset table
1299         // Note that this call will refill the LinAB to
1300         // satisfy the request.  This is different that
1301         // evm.
1302         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1303     } else {
1304       // Raid the exact free lists larger than size, even if they are not
1305       // overpopulated.
1306       res = (HeapWord*) getChunkFromGreater(size);
1307     }
1308   } else {
1309     // Big objects get allocated directly from the dictionary.
1310     res = (HeapWord*) getChunkFromDictionaryExact(size);
1311     if (res == NULL) {
1312       // Try hard not to fail since an allocation failure will likely
1313       // trigger a synchronous GC.  Try to get the space from the
1314       // allocation blocks.
1315       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1316     }
1317   }
1318 
1319   return res;
1320 }
1321 
1322 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1323 // when promoting obj.
1324 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1325   // Depending on the object size, expansion may require refilling either a
1326   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1327   // is added because the dictionary may over-allocate to avoid fragmentation.
1328   size_t space = obj_size;
1329   if (!_adaptive_freelists) {
1330     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1331   }
1332   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1333   return space;
1334 }
1335 
1336 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1337   FreeChunk* ret;
1338 
1339   assert(numWords >= MinChunkSize, "Size is less than minimum");
1340   assert(linearAllocationWouldFail() || bestFitFirst(),
1341     "Should not be here");
1342 
1343   size_t i;
1344   size_t currSize = numWords + MinChunkSize;
1345   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1346   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1347     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1348     if (fl->head()) {
1349       ret = getFromListGreater(fl, numWords);
1350       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1351       return ret;
1352     }
1353   }
1354 
1355   currSize = MAX2((size_t)SmallForDictionary,
1356                   (size_t)(numWords + MinChunkSize));
1357 
1358   /* Try to get a chunk that satisfies request, while avoiding
1359      fragmentation that can't be handled. */
1360   {
1361     ret =  dictionary()->get_chunk(currSize);
1362     if (ret != NULL) {
1363       assert(ret->size() - numWords >= MinChunkSize,
1364              "Chunk is too small");
1365       _bt.allocated((HeapWord*)ret, ret->size());
1366       /* Carve returned chunk. */
1367       (void) splitChunkAndReturnRemainder(ret, numWords);
1368       /* Label this as no longer a free chunk. */
1369       assert(ret->is_free(), "This chunk should be free");
1370       ret->link_prev(NULL);
1371     }
1372     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1373     return ret;
1374   }
1375   ShouldNotReachHere();
1376 }
1377 
1378 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1379   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1380   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1381 }
1382 
1383 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1384   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1385          (_smallLinearAllocBlock._word_size == fc->size()),
1386          "Linear allocation block shows incorrect size");
1387   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1388           (_smallLinearAllocBlock._word_size == fc->size()));
1389 }
1390 
1391 // Check if the purported free chunk is present either as a linear
1392 // allocation block, the size-indexed table of (smaller) free blocks,
1393 // or the larger free blocks kept in the binary tree dictionary.
1394 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1395   if (verify_chunk_is_linear_alloc_block(fc)) {
1396     return true;
1397   } else if (fc->size() < IndexSetSize) {
1398     return verifyChunkInIndexedFreeLists(fc);
1399   } else {
1400     return dictionary()->verify_chunk_in_free_list(fc);
1401   }
1402 }
1403 
1404 #ifndef PRODUCT
1405 void CompactibleFreeListSpace::assert_locked() const {
1406   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1407 }
1408 
1409 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1410   CMSLockVerifier::assert_locked(lock);
1411 }
1412 #endif
1413 
1414 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1415   // In the parallel case, the main thread holds the free list lock
1416   // on behalf the parallel threads.
1417   FreeChunk* fc;
1418   {
1419     // If GC is parallel, this might be called by several threads.
1420     // This should be rare enough that the locking overhead won't affect
1421     // the sequential code.
1422     MutexLockerEx x(parDictionaryAllocLock(),
1423                     Mutex::_no_safepoint_check_flag);
1424     fc = getChunkFromDictionary(size);
1425   }
1426   if (fc != NULL) {
1427     fc->dontCoalesce();
1428     assert(fc->is_free(), "Should be free, but not coalescable");
1429     // Verify that the block offset table shows this to
1430     // be a single block, but not one which is unallocated.
1431     _bt.verify_single_block((HeapWord*)fc, fc->size());
1432     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1433   }
1434   return fc;
1435 }
1436 
1437 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1438   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1439   assert_locked();
1440 
1441   // if we are tracking promotions, then first ensure space for
1442   // promotion (including spooling space for saving header if necessary).
1443   // then allocate and copy, then track promoted info if needed.
1444   // When tracking (see PromotionInfo::track()), the mark word may
1445   // be displaced and in this case restoration of the mark word
1446   // occurs in the (oop_since_save_marks_)iterate phase.
1447   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1448     return NULL;
1449   }
1450   // Call the allocate(size_t, bool) form directly to avoid the
1451   // additional call through the allocate(size_t) form.  Having
1452   // the compile inline the call is problematic because allocate(size_t)
1453   // is a virtual method.
1454   HeapWord* res = allocate(adjustObjectSize(obj_size));
1455   if (res != NULL) {
1456     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1457     // if we should be tracking promotions, do so.
1458     if (_promoInfo.tracking()) {
1459         _promoInfo.track((PromotedObject*)res);
1460     }
1461   }
1462   return oop(res);
1463 }
1464 
1465 HeapWord*
1466 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1467   assert_locked();
1468   assert(size >= MinChunkSize, "minimum chunk size");
1469   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1470     "maximum from smallLinearAllocBlock");
1471   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1472 }
1473 
1474 HeapWord*
1475 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1476                                                        size_t size) {
1477   assert_locked();
1478   assert(size >= MinChunkSize, "too small");
1479   HeapWord* res = NULL;
1480   // Try to do linear allocation from blk, making sure that
1481   if (blk->_word_size == 0) {
1482     // We have probably been unable to fill this either in the prologue or
1483     // when it was exhausted at the last linear allocation. Bail out until
1484     // next time.
1485     assert(blk->_ptr == NULL, "consistency check");
1486     return NULL;
1487   }
1488   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1489   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1490   if (res != NULL) return res;
1491 
1492   // about to exhaust this linear allocation block
1493   if (blk->_word_size == size) { // exactly satisfied
1494     res = blk->_ptr;
1495     _bt.allocated(res, blk->_word_size);
1496   } else if (size + MinChunkSize <= blk->_refillSize) {
1497     size_t sz = blk->_word_size;
1498     // Update _unallocated_block if the size is such that chunk would be
1499     // returned to the indexed free list.  All other chunks in the indexed
1500     // free lists are allocated from the dictionary so that _unallocated_block
1501     // has already been adjusted for them.  Do it here so that the cost
1502     // for all chunks added back to the indexed free lists.
1503     if (sz < SmallForDictionary) {
1504       _bt.allocated(blk->_ptr, sz);
1505     }
1506     // Return the chunk that isn't big enough, and then refill below.
1507     addChunkToFreeLists(blk->_ptr, sz);
1508     split_birth(sz);
1509     // Don't keep statistics on adding back chunk from a LinAB.
1510   } else {
1511     // A refilled block would not satisfy the request.
1512     return NULL;
1513   }
1514 
1515   blk->_ptr = NULL; blk->_word_size = 0;
1516   refillLinearAllocBlock(blk);
1517   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1518          "block was replenished");
1519   if (res != NULL) {
1520     split_birth(size);
1521     repairLinearAllocBlock(blk);
1522   } else if (blk->_ptr != NULL) {
1523     res = blk->_ptr;
1524     size_t blk_size = blk->_word_size;
1525     blk->_word_size -= size;
1526     blk->_ptr  += size;
1527     split_birth(size);
1528     repairLinearAllocBlock(blk);
1529     // Update BOT last so that other (parallel) GC threads see a consistent
1530     // view of the BOT and free blocks.
1531     // Above must occur before BOT is updated below.
1532     OrderAccess::storestore();
1533     _bt.split_block(res, blk_size, size);  // adjust block offset table
1534   }
1535   return res;
1536 }
1537 
1538 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1539                                         LinearAllocBlock* blk,
1540                                         size_t size) {
1541   assert_locked();
1542   assert(size >= MinChunkSize, "too small");
1543 
1544   HeapWord* res = NULL;
1545   // This is the common case.  Keep it simple.
1546   if (blk->_word_size >= size + MinChunkSize) {
1547     assert(blk->_ptr != NULL, "consistency check");
1548     res = blk->_ptr;
1549     // Note that the BOT is up-to-date for the linAB before allocation.  It
1550     // indicates the start of the linAB.  The split_block() updates the
1551     // BOT for the linAB after the allocation (indicates the start of the
1552     // next chunk to be allocated).
1553     size_t blk_size = blk->_word_size;
1554     blk->_word_size -= size;
1555     blk->_ptr  += size;
1556     split_birth(size);
1557     repairLinearAllocBlock(blk);
1558     // Update BOT last so that other (parallel) GC threads see a consistent
1559     // view of the BOT and free blocks.
1560     // Above must occur before BOT is updated below.
1561     OrderAccess::storestore();
1562     _bt.split_block(res, blk_size, size);  // adjust block offset table
1563     _bt.allocated(res, size);
1564   }
1565   return res;
1566 }
1567 
1568 FreeChunk*
1569 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1570   assert_locked();
1571   assert(size < SmallForDictionary, "just checking");
1572   FreeChunk* res;
1573   res = _indexedFreeList[size].get_chunk_at_head();
1574   if (res == NULL) {
1575     res = getChunkFromIndexedFreeListHelper(size);
1576   }
1577   _bt.verify_not_unallocated((HeapWord*) res, size);
1578   assert(res == NULL || res->size() == size, "Incorrect block size");
1579   return res;
1580 }
1581 
1582 FreeChunk*
1583 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1584   bool replenish) {
1585   assert_locked();
1586   FreeChunk* fc = NULL;
1587   if (size < SmallForDictionary) {
1588     assert(_indexedFreeList[size].head() == NULL ||
1589       _indexedFreeList[size].surplus() <= 0,
1590       "List for this size should be empty or under populated");
1591     // Try best fit in exact lists before replenishing the list
1592     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1593       // Replenish list.
1594       //
1595       // Things tried that failed.
1596       //   Tried allocating out of the two LinAB's first before
1597       // replenishing lists.
1598       //   Tried small linAB of size 256 (size in indexed list)
1599       // and replenishing indexed lists from the small linAB.
1600       //
1601       FreeChunk* newFc = NULL;
1602       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1603       if (replenish_size < SmallForDictionary) {
1604         // Do not replenish from an underpopulated size.
1605         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1606             _indexedFreeList[replenish_size].head() != NULL) {
1607           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1608         } else if (bestFitFirst()) {
1609           newFc = bestFitSmall(replenish_size);
1610         }
1611       }
1612       if (newFc == NULL && replenish_size > size) {
1613         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1614         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1615       }
1616       // Note: The stats update re split-death of block obtained above
1617       // will be recorded below precisely when we know we are going to
1618       // be actually splitting it into more than one pieces below.
1619       if (newFc != NULL) {
1620         if  (replenish || CMSReplenishIntermediate) {
1621           // Replenish this list and return one block to caller.
1622           size_t i;
1623           FreeChunk *curFc, *nextFc;
1624           size_t num_blk = newFc->size() / size;
1625           assert(num_blk >= 1, "Smaller than requested?");
1626           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1627           if (num_blk > 1) {
1628             // we are sure we will be splitting the block just obtained
1629             // into multiple pieces; record the split-death of the original
1630             splitDeath(replenish_size);
1631           }
1632           // carve up and link blocks 0, ..., num_blk - 2
1633           // The last chunk is not added to the lists but is returned as the
1634           // free chunk.
1635           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1636                i = 0;
1637                i < (num_blk - 1);
1638                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1639                i++) {
1640             curFc->set_size(size);
1641             // Don't record this as a return in order to try and
1642             // determine the "returns" from a GC.
1643             _bt.verify_not_unallocated((HeapWord*) fc, size);
1644             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1645             _bt.mark_block((HeapWord*)curFc, size);
1646             split_birth(size);
1647             // Don't record the initial population of the indexed list
1648             // as a split birth.
1649           }
1650 
1651           // check that the arithmetic was OK above
1652           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1653             "inconsistency in carving newFc");
1654           curFc->set_size(size);
1655           _bt.mark_block((HeapWord*)curFc, size);
1656           split_birth(size);
1657           fc = curFc;
1658         } else {
1659           // Return entire block to caller
1660           fc = newFc;
1661         }
1662       }
1663     }
1664   } else {
1665     // Get a free chunk from the free chunk dictionary to be returned to
1666     // replenish the indexed free list.
1667     fc = getChunkFromDictionaryExact(size);
1668   }
1669   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1670   return fc;
1671 }
1672 
1673 FreeChunk*
1674 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1675   assert_locked();
1676   FreeChunk* fc = _dictionary->get_chunk(size,
1677                                          FreeBlockDictionary<FreeChunk>::atLeast);
1678   if (fc == NULL) {
1679     return NULL;
1680   }
1681   _bt.allocated((HeapWord*)fc, fc->size());
1682   if (fc->size() >= size + MinChunkSize) {
1683     fc = splitChunkAndReturnRemainder(fc, size);
1684   }
1685   assert(fc->size() >= size, "chunk too small");
1686   assert(fc->size() < size + MinChunkSize, "chunk too big");
1687   _bt.verify_single_block((HeapWord*)fc, fc->size());
1688   return fc;
1689 }
1690 
1691 FreeChunk*
1692 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1693   assert_locked();
1694   FreeChunk* fc = _dictionary->get_chunk(size,
1695                                          FreeBlockDictionary<FreeChunk>::atLeast);
1696   if (fc == NULL) {
1697     return fc;
1698   }
1699   _bt.allocated((HeapWord*)fc, fc->size());
1700   if (fc->size() == size) {
1701     _bt.verify_single_block((HeapWord*)fc, size);
1702     return fc;
1703   }
1704   assert(fc->size() > size, "get_chunk() guarantee");
1705   if (fc->size() < size + MinChunkSize) {
1706     // Return the chunk to the dictionary and go get a bigger one.
1707     returnChunkToDictionary(fc);
1708     fc = _dictionary->get_chunk(size + MinChunkSize,
1709                                 FreeBlockDictionary<FreeChunk>::atLeast);
1710     if (fc == NULL) {
1711       return NULL;
1712     }
1713     _bt.allocated((HeapWord*)fc, fc->size());
1714   }
1715   assert(fc->size() >= size + MinChunkSize, "tautology");
1716   fc = splitChunkAndReturnRemainder(fc, size);
1717   assert(fc->size() == size, "chunk is wrong size");
1718   _bt.verify_single_block((HeapWord*)fc, size);
1719   return fc;
1720 }
1721 
1722 void
1723 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1724   assert_locked();
1725 
1726   size_t size = chunk->size();
1727   _bt.verify_single_block((HeapWord*)chunk, size);
1728   // adjust _unallocated_block downward, as necessary
1729   _bt.freed((HeapWord*)chunk, size);
1730   _dictionary->return_chunk(chunk);
1731 #ifndef PRODUCT
1732   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1733     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
1734     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
1735     tl->verify_stats();
1736   }
1737 #endif // PRODUCT
1738 }
1739 
1740 void
1741 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1742   assert_locked();
1743   size_t size = fc->size();
1744   _bt.verify_single_block((HeapWord*) fc, size);
1745   _bt.verify_not_unallocated((HeapWord*) fc, size);
1746   if (_adaptive_freelists) {
1747     _indexedFreeList[size].return_chunk_at_tail(fc);
1748   } else {
1749     _indexedFreeList[size].return_chunk_at_head(fc);
1750   }
1751 #ifndef PRODUCT
1752   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1753      _indexedFreeList[size].verify_stats();
1754   }
1755 #endif // PRODUCT
1756 }
1757 
1758 // Add chunk to end of last block -- if it's the largest
1759 // block -- and update BOT and census data. We would
1760 // of course have preferred to coalesce it with the
1761 // last block, but it's currently less expensive to find the
1762 // largest block than it is to find the last.
1763 void
1764 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1765   HeapWord* chunk, size_t     size) {
1766   // check that the chunk does lie in this space!
1767   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1768   // One of the parallel gc task threads may be here
1769   // whilst others are allocating.
1770   Mutex* lock = NULL;
1771   if (ParallelGCThreads != 0) {
1772     lock = &_parDictionaryAllocLock;
1773   }
1774   FreeChunk* ec;
1775   {
1776     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1777     ec = dictionary()->find_largest_dict();  // get largest block
1778     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1779       // It's a coterminal block - we can coalesce.
1780       size_t old_size = ec->size();
1781       coalDeath(old_size);
1782       removeChunkFromDictionary(ec);
1783       size += old_size;
1784     } else {
1785       ec = (FreeChunk*)chunk;
1786     }
1787   }
1788   ec->set_size(size);
1789   debug_only(ec->mangleFreed(size));
1790   if (size < SmallForDictionary && ParallelGCThreads != 0) {
1791     lock = _indexedFreeListParLocks[size];
1792   }
1793   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1794   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1795   // record the birth under the lock since the recording involves
1796   // manipulation of the list on which the chunk lives and
1797   // if the chunk is allocated and is the last on the list,
1798   // the list can go away.
1799   coalBirth(size);
1800 }
1801 
1802 void
1803 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1804                                               size_t     size) {
1805   // check that the chunk does lie in this space!
1806   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1807   assert_locked();
1808   _bt.verify_single_block(chunk, size);
1809 
1810   FreeChunk* fc = (FreeChunk*) chunk;
1811   fc->set_size(size);
1812   debug_only(fc->mangleFreed(size));
1813   if (size < SmallForDictionary) {
1814     returnChunkToFreeList(fc);
1815   } else {
1816     returnChunkToDictionary(fc);
1817   }
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1822   size_t size, bool coalesced) {
1823   assert_locked();
1824   assert(chunk != NULL, "null chunk");
1825   if (coalesced) {
1826     // repair BOT
1827     _bt.single_block(chunk, size);
1828   }
1829   addChunkToFreeLists(chunk, size);
1830 }
1831 
1832 // We _must_ find the purported chunk on our free lists;
1833 // we assert if we don't.
1834 void
1835 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1836   size_t size = fc->size();
1837   assert_locked();
1838   debug_only(verifyFreeLists());
1839   if (size < SmallForDictionary) {
1840     removeChunkFromIndexedFreeList(fc);
1841   } else {
1842     removeChunkFromDictionary(fc);
1843   }
1844   _bt.verify_single_block((HeapWord*)fc, size);
1845   debug_only(verifyFreeLists());
1846 }
1847 
1848 void
1849 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1850   size_t size = fc->size();
1851   assert_locked();
1852   assert(fc != NULL, "null chunk");
1853   _bt.verify_single_block((HeapWord*)fc, size);
1854   _dictionary->remove_chunk(fc);
1855   // adjust _unallocated_block upward, as necessary
1856   _bt.allocated((HeapWord*)fc, size);
1857 }
1858 
1859 void
1860 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1861   assert_locked();
1862   size_t size = fc->size();
1863   _bt.verify_single_block((HeapWord*)fc, size);
1864   NOT_PRODUCT(
1865     if (FLSVerifyIndexTable) {
1866       verifyIndexedFreeList(size);
1867     }
1868   )
1869   _indexedFreeList[size].remove_chunk(fc);
1870   NOT_PRODUCT(
1871     if (FLSVerifyIndexTable) {
1872       verifyIndexedFreeList(size);
1873     }
1874   )
1875 }
1876 
1877 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1878   /* A hint is the next larger size that has a surplus.
1879      Start search at a size large enough to guarantee that
1880      the excess is >= MIN_CHUNK. */
1881   size_t start = align_object_size(numWords + MinChunkSize);
1882   if (start < IndexSetSize) {
1883     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1884     size_t    hint = _indexedFreeList[start].hint();
1885     while (hint < IndexSetSize) {
1886       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1887       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1888       if (fl->surplus() > 0 && fl->head() != NULL) {
1889         // Found a list with surplus, reset original hint
1890         // and split out a free chunk which is returned.
1891         _indexedFreeList[start].set_hint(hint);
1892         FreeChunk* res = getFromListGreater(fl, numWords);
1893         assert(res == NULL || res->is_free(),
1894           "Should be returning a free chunk");
1895         return res;
1896       }
1897       hint = fl->hint(); /* keep looking */
1898     }
1899     /* None found. */
1900     it[start].set_hint(IndexSetSize);
1901   }
1902   return NULL;
1903 }
1904 
1905 /* Requires fl->size >= numWords + MinChunkSize */
1906 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1907   size_t numWords) {
1908   FreeChunk *curr = fl->head();
1909   size_t oldNumWords = curr->size();
1910   assert(numWords >= MinChunkSize, "Word size is too small");
1911   assert(curr != NULL, "List is empty");
1912   assert(oldNumWords >= numWords + MinChunkSize,
1913         "Size of chunks in the list is too small");
1914 
1915   fl->remove_chunk(curr);
1916   // recorded indirectly by splitChunkAndReturnRemainder -
1917   // smallSplit(oldNumWords, numWords);
1918   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1919   // Does anything have to be done for the remainder in terms of
1920   // fixing the card table?
1921   assert(new_chunk == NULL || new_chunk->is_free(),
1922     "Should be returning a free chunk");
1923   return new_chunk;
1924 }
1925 
1926 FreeChunk*
1927 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1928   size_t new_size) {
1929   assert_locked();
1930   size_t size = chunk->size();
1931   assert(size > new_size, "Split from a smaller block?");
1932   assert(is_aligned(chunk), "alignment problem");
1933   assert(size == adjustObjectSize(size), "alignment problem");
1934   size_t rem_size = size - new_size;
1935   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1936   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1937   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1938   assert(is_aligned(ffc), "alignment problem");
1939   ffc->set_size(rem_size);
1940   ffc->link_next(NULL);
1941   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1942   // Above must occur before BOT is updated below.
1943   // adjust block offset table
1944   OrderAccess::storestore();
1945   assert(chunk->is_free() && ffc->is_free(), "Error");
1946   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1947   if (rem_size < SmallForDictionary) {
1948     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1949     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1950     assert(!is_par ||
1951            (SharedHeap::heap()->n_par_threads() ==
1952             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1953     returnChunkToFreeList(ffc);
1954     split(size, rem_size);
1955     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1956   } else {
1957     returnChunkToDictionary(ffc);
1958     split(size ,rem_size);
1959   }
1960   chunk->set_size(new_size);
1961   return chunk;
1962 }
1963 
1964 void
1965 CompactibleFreeListSpace::sweep_completed() {
1966   // Now that space is probably plentiful, refill linear
1967   // allocation blocks as needed.
1968   refillLinearAllocBlocksIfNeeded();
1969 }
1970 
1971 void
1972 CompactibleFreeListSpace::gc_prologue() {
1973   assert_locked();
1974   if (PrintFLSStatistics != 0) {
1975     gclog_or_tty->print("Before GC:\n");
1976     reportFreeListStatistics();
1977   }
1978   refillLinearAllocBlocksIfNeeded();
1979 }
1980 
1981 void
1982 CompactibleFreeListSpace::gc_epilogue() {
1983   assert_locked();
1984   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1985     if (_smallLinearAllocBlock._word_size == 0)
1986       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1987   }
1988   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1989   _promoInfo.stopTrackingPromotions();
1990   repairLinearAllocationBlocks();
1991   // Print Space's stats
1992   if (PrintFLSStatistics != 0) {
1993     gclog_or_tty->print("After GC:\n");
1994     reportFreeListStatistics();
1995   }
1996 }
1997 
1998 // Iteration support, mostly delegated from a CMS generation
1999 
2000 void CompactibleFreeListSpace::save_marks() {
2001   assert(Thread::current()->is_VM_thread(),
2002          "Global variable should only be set when single-threaded");
2003   // Mark the "end" of the used space at the time of this call;
2004   // note, however, that promoted objects from this point
2005   // on are tracked in the _promoInfo below.
2006   set_saved_mark_word(unallocated_block());
2007 #ifdef ASSERT
2008   // Check the sanity of save_marks() etc.
2009   MemRegion ur    = used_region();
2010   MemRegion urasm = used_region_at_save_marks();
2011   assert(ur.contains(urasm),
2012          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
2013                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
2014                  ur.start(), ur.end(), urasm.start(), urasm.end()));
2015 #endif
2016   // inform allocator that promotions should be tracked.
2017   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
2018   _promoInfo.startTrackingPromotions();
2019 }
2020 
2021 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2022   assert(_promoInfo.tracking(), "No preceding save_marks?");
2023   assert(SharedHeap::heap()->n_par_threads() == 0,
2024          "Shouldn't be called if using parallel gc.");
2025   return _promoInfo.noPromotions();
2026 }
2027 
2028 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2029                                                                             \
2030 void CompactibleFreeListSpace::                                             \
2031 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2032   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2033          "Shouldn't be called (yet) during parallel part of gc.");          \
2034   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2035   /*                                                                        \
2036    * This also restores any displaced headers and removes the elements from \
2037    * the iteration set as they are processed, so that we have a clean slate \
2038    * at the end of the iteration. Note, thus, that if new objects are       \
2039    * promoted as a result of the iteration they are iterated over as well.  \
2040    */                                                                       \
2041   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2042 }
2043 
2044 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2045 
2046 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2047   return _smallLinearAllocBlock._word_size == 0;
2048 }
2049 
2050 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2051   // Fix up linear allocation blocks to look like free blocks
2052   repairLinearAllocBlock(&_smallLinearAllocBlock);
2053 }
2054 
2055 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2056   assert_locked();
2057   if (blk->_ptr != NULL) {
2058     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2059            "Minimum block size requirement");
2060     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2061     fc->set_size(blk->_word_size);
2062     fc->link_prev(NULL);   // mark as free
2063     fc->dontCoalesce();
2064     assert(fc->is_free(), "just marked it free");
2065     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2066   }
2067 }
2068 
2069 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2070   assert_locked();
2071   if (_smallLinearAllocBlock._ptr == NULL) {
2072     assert(_smallLinearAllocBlock._word_size == 0,
2073       "Size of linAB should be zero if the ptr is NULL");
2074     // Reset the linAB refill and allocation size limit.
2075     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2076   }
2077   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2078 }
2079 
2080 void
2081 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2082   assert_locked();
2083   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2084          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2085          "blk invariant");
2086   if (blk->_ptr == NULL) {
2087     refillLinearAllocBlock(blk);
2088   }
2089   if (PrintMiscellaneous && Verbose) {
2090     if (blk->_word_size == 0) {
2091       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2092     }
2093   }
2094 }
2095 
2096 void
2097 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2098   assert_locked();
2099   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2100          "linear allocation block should be empty");
2101   FreeChunk* fc;
2102   if (blk->_refillSize < SmallForDictionary &&
2103       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2104     // A linAB's strategy might be to use small sizes to reduce
2105     // fragmentation but still get the benefits of allocation from a
2106     // linAB.
2107   } else {
2108     fc = getChunkFromDictionary(blk->_refillSize);
2109   }
2110   if (fc != NULL) {
2111     blk->_ptr  = (HeapWord*)fc;
2112     blk->_word_size = fc->size();
2113     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2114   }
2115 }
2116 
2117 // Support for concurrent collection policy decisions.
2118 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2119   // In the future we might want to add in frgamentation stats --
2120   // including erosion of the "mountain" into this decision as well.
2121   return !adaptive_freelists() && linearAllocationWouldFail();
2122 }
2123 
2124 // Support for compaction
2125 
2126 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2127   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2128   // prepare_for_compaction() uses the space between live objects
2129   // so that later phase can skip dead space quickly.  So verification
2130   // of the free lists doesn't work after.
2131 }
2132 
2133 #define obj_size(q) adjustObjectSize(oop(q)->size())
2134 #define adjust_obj_size(s) adjustObjectSize(s)
2135 
2136 void CompactibleFreeListSpace::adjust_pointers() {
2137   // In other versions of adjust_pointers(), a bail out
2138   // based on the amount of live data in the generation
2139   // (i.e., if 0, bail out) may be used.
2140   // Cannot test used() == 0 here because the free lists have already
2141   // been mangled by the compaction.
2142 
2143   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2144   // See note about verification in prepare_for_compaction().
2145 }
2146 
2147 void CompactibleFreeListSpace::compact() {
2148   SCAN_AND_COMPACT(obj_size);
2149 }
2150 
2151 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2152 // where fbs is free block sizes
2153 double CompactibleFreeListSpace::flsFrag() const {
2154   size_t itabFree = totalSizeInIndexedFreeLists();
2155   double frag = 0.0;
2156   size_t i;
2157 
2158   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2159     double sz  = i;
2160     frag      += _indexedFreeList[i].count() * (sz * sz);
2161   }
2162 
2163   double totFree = itabFree +
2164                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2165   if (totFree > 0) {
2166     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2167             (totFree * totFree));
2168     frag = (double)1.0  - frag;
2169   } else {
2170     assert(frag == 0.0, "Follows from totFree == 0");
2171   }
2172   return frag;
2173 }
2174 
2175 void CompactibleFreeListSpace::beginSweepFLCensus(
2176   float inter_sweep_current,
2177   float inter_sweep_estimate,
2178   float intra_sweep_estimate) {
2179   assert_locked();
2180   size_t i;
2181   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2182     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2183     if (PrintFLSStatistics > 1) {
2184       gclog_or_tty->print("size[%d] : ", i);
2185     }
2186     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2187     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2188     fl->set_before_sweep(fl->count());
2189     fl->set_bfr_surp(fl->surplus());
2190   }
2191   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2192                                     inter_sweep_current,
2193                                     inter_sweep_estimate,
2194                                     intra_sweep_estimate);
2195 }
2196 
2197 void CompactibleFreeListSpace::setFLSurplus() {
2198   assert_locked();
2199   size_t i;
2200   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2201     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2202     fl->set_surplus(fl->count() -
2203                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2204   }
2205 }
2206 
2207 void CompactibleFreeListSpace::setFLHints() {
2208   assert_locked();
2209   size_t i;
2210   size_t h = IndexSetSize;
2211   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2212     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2213     fl->set_hint(h);
2214     if (fl->surplus() > 0) {
2215       h = i;
2216     }
2217   }
2218 }
2219 
2220 void CompactibleFreeListSpace::clearFLCensus() {
2221   assert_locked();
2222   size_t i;
2223   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2224     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2225     fl->set_prev_sweep(fl->count());
2226     fl->set_coal_births(0);
2227     fl->set_coal_deaths(0);
2228     fl->set_split_births(0);
2229     fl->set_split_deaths(0);
2230   }
2231 }
2232 
2233 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2234   if (PrintFLSStatistics > 0) {
2235     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2236     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2237                            largestAddr);
2238   }
2239   setFLSurplus();
2240   setFLHints();
2241   if (PrintGC && PrintFLSCensus > 0) {
2242     printFLCensus(sweep_count);
2243   }
2244   clearFLCensus();
2245   assert_locked();
2246   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2247 }
2248 
2249 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2250   if (size < SmallForDictionary) {
2251     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2252     return (fl->coal_desired() < 0) ||
2253            ((int)fl->count() > fl->coal_desired());
2254   } else {
2255     return dictionary()->coal_dict_over_populated(size);
2256   }
2257 }
2258 
2259 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2260   assert(size < SmallForDictionary, "Size too large for indexed list");
2261   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2262   fl->increment_coal_births();
2263   fl->increment_surplus();
2264 }
2265 
2266 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2267   assert(size < SmallForDictionary, "Size too large for indexed list");
2268   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2269   fl->increment_coal_deaths();
2270   fl->decrement_surplus();
2271 }
2272 
2273 void CompactibleFreeListSpace::coalBirth(size_t size) {
2274   if (size  < SmallForDictionary) {
2275     smallCoalBirth(size);
2276   } else {
2277     dictionary()->dict_census_update(size,
2278                                    false /* split */,
2279                                    true /* birth */);
2280   }
2281 }
2282 
2283 void CompactibleFreeListSpace::coalDeath(size_t size) {
2284   if(size  < SmallForDictionary) {
2285     smallCoalDeath(size);
2286   } else {
2287     dictionary()->dict_census_update(size,
2288                                    false /* split */,
2289                                    false /* birth */);
2290   }
2291 }
2292 
2293 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2294   assert(size < SmallForDictionary, "Size too large for indexed list");
2295   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2296   fl->increment_split_births();
2297   fl->increment_surplus();
2298 }
2299 
2300 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2301   assert(size < SmallForDictionary, "Size too large for indexed list");
2302   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2303   fl->increment_split_deaths();
2304   fl->decrement_surplus();
2305 }
2306 
2307 void CompactibleFreeListSpace::split_birth(size_t size) {
2308   if (size  < SmallForDictionary) {
2309     smallSplitBirth(size);
2310   } else {
2311     dictionary()->dict_census_update(size,
2312                                    true /* split */,
2313                                    true /* birth */);
2314   }
2315 }
2316 
2317 void CompactibleFreeListSpace::splitDeath(size_t size) {
2318   if (size  < SmallForDictionary) {
2319     smallSplitDeath(size);
2320   } else {
2321     dictionary()->dict_census_update(size,
2322                                    true /* split */,
2323                                    false /* birth */);
2324   }
2325 }
2326 
2327 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2328   size_t to2 = from - to1;
2329   splitDeath(from);
2330   split_birth(to1);
2331   split_birth(to2);
2332 }
2333 
2334 void CompactibleFreeListSpace::print() const {
2335   print_on(tty);
2336 }
2337 
2338 void CompactibleFreeListSpace::prepare_for_verify() {
2339   assert_locked();
2340   repairLinearAllocationBlocks();
2341   // Verify that the SpoolBlocks look like free blocks of
2342   // appropriate sizes... To be done ...
2343 }
2344 
2345 class VerifyAllBlksClosure: public BlkClosure {
2346  private:
2347   const CompactibleFreeListSpace* _sp;
2348   const MemRegion                 _span;
2349   HeapWord*                       _last_addr;
2350   size_t                          _last_size;
2351   bool                            _last_was_obj;
2352   bool                            _last_was_live;
2353 
2354  public:
2355   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2356     MemRegion span) :  _sp(sp), _span(span),
2357                        _last_addr(NULL), _last_size(0),
2358                        _last_was_obj(false), _last_was_live(false) { }
2359 
2360   virtual size_t do_blk(HeapWord* addr) {
2361     size_t res;
2362     bool   was_obj  = false;
2363     bool   was_live = false;
2364     if (_sp->block_is_obj(addr)) {
2365       was_obj = true;
2366       oop p = oop(addr);
2367       guarantee(p->is_oop(), "Should be an oop");
2368       res = _sp->adjustObjectSize(p->size());
2369       if (_sp->obj_is_alive(addr)) {
2370         was_live = true;
2371         p->verify();
2372       }
2373     } else {
2374       FreeChunk* fc = (FreeChunk*)addr;
2375       res = fc->size();
2376       if (FLSVerifyLists && !fc->cantCoalesce()) {
2377         guarantee(_sp->verify_chunk_in_free_list(fc),
2378                   "Chunk should be on a free list");
2379       }
2380     }
2381     if (res == 0) {
2382       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2383       gclog_or_tty->print_cr(
2384         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2385         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2386         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2387         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2388       _sp->print_on(gclog_or_tty);
2389       guarantee(false, "Seppuku!");
2390     }
2391     _last_addr = addr;
2392     _last_size = res;
2393     _last_was_obj  = was_obj;
2394     _last_was_live = was_live;
2395     return res;
2396   }
2397 };
2398 
2399 class VerifyAllOopsClosure: public OopClosure {
2400  private:
2401   const CMSCollector*             _collector;
2402   const CompactibleFreeListSpace* _sp;
2403   const MemRegion                 _span;
2404   const bool                      _past_remark;
2405   const CMSBitMap*                _bit_map;
2406 
2407  protected:
2408   void do_oop(void* p, oop obj) {
2409     if (_span.contains(obj)) { // the interior oop points into CMS heap
2410       if (!_span.contains(p)) { // reference from outside CMS heap
2411         // Should be a valid object; the first disjunct below allows
2412         // us to sidestep an assertion in block_is_obj() that insists
2413         // that p be in _sp. Note that several generations (and spaces)
2414         // are spanned by _span (CMS heap) above.
2415         guarantee(!_sp->is_in_reserved(obj) ||
2416                   _sp->block_is_obj((HeapWord*)obj),
2417                   "Should be an object");
2418         guarantee(obj->is_oop(), "Should be an oop");
2419         obj->verify();
2420         if (_past_remark) {
2421           // Remark has been completed, the object should be marked
2422           _bit_map->isMarked((HeapWord*)obj);
2423         }
2424       } else { // reference within CMS heap
2425         if (_past_remark) {
2426           // Remark has been completed -- so the referent should have
2427           // been marked, if referring object is.
2428           if (_bit_map->isMarked(_collector->block_start(p))) {
2429             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2430           }
2431         }
2432       }
2433     } else if (_sp->is_in_reserved(p)) {
2434       // the reference is from FLS, and points out of FLS
2435       guarantee(obj->is_oop(), "Should be an oop");
2436       obj->verify();
2437     }
2438   }
2439 
2440   template <class T> void do_oop_work(T* p) {
2441     T heap_oop = oopDesc::load_heap_oop(p);
2442     if (!oopDesc::is_null(heap_oop)) {
2443       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2444       do_oop(p, obj);
2445     }
2446   }
2447 
2448  public:
2449   VerifyAllOopsClosure(const CMSCollector* collector,
2450     const CompactibleFreeListSpace* sp, MemRegion span,
2451     bool past_remark, CMSBitMap* bit_map) :
2452     _collector(collector), _sp(sp), _span(span),
2453     _past_remark(past_remark), _bit_map(bit_map) { }
2454 
2455   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2456   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2457 };
2458 
2459 void CompactibleFreeListSpace::verify() const {
2460   assert_lock_strong(&_freelistLock);
2461   verify_objects_initialized();
2462   MemRegion span = _collector->_span;
2463   bool past_remark = (_collector->abstract_state() ==
2464                       CMSCollector::Sweeping);
2465 
2466   ResourceMark rm;
2467   HandleMark  hm;
2468 
2469   // Check integrity of CFL data structures
2470   _promoInfo.verify();
2471   _dictionary->verify();
2472   if (FLSVerifyIndexTable) {
2473     verifyIndexedFreeLists();
2474   }
2475   // Check integrity of all objects and free blocks in space
2476   {
2477     VerifyAllBlksClosure cl(this, span);
2478     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2479   }
2480   // Check that all references in the heap to FLS
2481   // are to valid objects in FLS or that references in
2482   // FLS are to valid objects elsewhere in the heap
2483   if (FLSVerifyAllHeapReferences)
2484   {
2485     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2486       _collector->markBitMap());
2487     CollectedHeap* ch = Universe::heap();
2488 
2489     // Iterate over all oops in the heap. Uses the _no_header version
2490     // since we are not interested in following the klass pointers.
2491     ch->oop_iterate_no_header(&cl);
2492   }
2493 
2494   if (VerifyObjectStartArray) {
2495     // Verify the block offset table
2496     _bt.verify();
2497   }
2498 }
2499 
2500 #ifndef PRODUCT
2501 void CompactibleFreeListSpace::verifyFreeLists() const {
2502   if (FLSVerifyLists) {
2503     _dictionary->verify();
2504     verifyIndexedFreeLists();
2505   } else {
2506     if (FLSVerifyDictionary) {
2507       _dictionary->verify();
2508     }
2509     if (FLSVerifyIndexTable) {
2510       verifyIndexedFreeLists();
2511     }
2512   }
2513 }
2514 #endif
2515 
2516 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2517   size_t i = 0;
2518   for (; i < IndexSetStart; i++) {
2519     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2520   }
2521   for (; i < IndexSetSize; i++) {
2522     verifyIndexedFreeList(i);
2523   }
2524 }
2525 
2526 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2527   FreeChunk* fc   =  _indexedFreeList[size].head();
2528   FreeChunk* tail =  _indexedFreeList[size].tail();
2529   size_t    num = _indexedFreeList[size].count();
2530   size_t      n = 0;
2531   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2532             "Slot should have been empty");
2533   for (; fc != NULL; fc = fc->next(), n++) {
2534     guarantee(fc->size() == size, "Size inconsistency");
2535     guarantee(fc->is_free(), "!free?");
2536     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2537     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2538   }
2539   guarantee(n == num, "Incorrect count");
2540 }
2541 
2542 #ifndef PRODUCT
2543 void CompactibleFreeListSpace::check_free_list_consistency() const {
2544   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
2545     "Some sizes can't be allocated without recourse to"
2546     " linear allocation buffers");
2547   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
2548     "else MIN_TREE_CHUNK_SIZE is wrong");
2549   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2550   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2551 }
2552 #endif
2553 
2554 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2555   assert_lock_strong(&_freelistLock);
2556   AdaptiveFreeList<FreeChunk> total;
2557   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2558   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2559   size_t total_free = 0;
2560   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2561     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2562     total_free += fl->count() * fl->size();
2563     if (i % (40*IndexSetStride) == 0) {
2564       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2565     }
2566     fl->print_on(gclog_or_tty);
2567     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2568     total.set_surplus(    total.surplus()     + fl->surplus()    );
2569     total.set_desired(    total.desired()     + fl->desired()    );
2570     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2571     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2572     total.set_count(      total.count()       + fl->count()      );
2573     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2574     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2575     total.set_split_births(total.split_births() + fl->split_births());
2576     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2577   }
2578   total.print_on(gclog_or_tty, "TOTAL");
2579   gclog_or_tty->print_cr("Total free in indexed lists "
2580                          SIZE_FORMAT " words", total_free);
2581   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2582     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2583             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2584     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2585   _dictionary->print_dict_census();
2586 }
2587 
2588 ///////////////////////////////////////////////////////////////////////////
2589 // CFLS_LAB
2590 ///////////////////////////////////////////////////////////////////////////
2591 
2592 #define VECTOR_257(x)                                                                                  \
2593   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2594   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2595      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2596      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2597      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2598      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2599      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2600      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2601      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2602      x }
2603 
2604 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2605 // OldPLABSize, whose static default is different; if overridden at the
2606 // command-line, this will get reinitialized via a call to
2607 // modify_initialization() below.
2608 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2609   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2610 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2611 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2612 
2613 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2614   _cfls(cfls)
2615 {
2616   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2617   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2618        i < CompactibleFreeListSpace::IndexSetSize;
2619        i += CompactibleFreeListSpace::IndexSetStride) {
2620     _indexedFreeList[i].set_size(i);
2621     _num_blocks[i] = 0;
2622   }
2623 }
2624 
2625 static bool _CFLS_LAB_modified = false;
2626 
2627 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2628   assert(!_CFLS_LAB_modified, "Call only once");
2629   _CFLS_LAB_modified = true;
2630   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2631        i < CompactibleFreeListSpace::IndexSetSize;
2632        i += CompactibleFreeListSpace::IndexSetStride) {
2633     _blocks_to_claim[i].modify(n, wt, true /* force */);
2634   }
2635 }
2636 
2637 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2638   FreeChunk* res;
2639   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2640   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2641     // This locking manages sync with other large object allocations.
2642     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2643                     Mutex::_no_safepoint_check_flag);
2644     res = _cfls->getChunkFromDictionaryExact(word_sz);
2645     if (res == NULL) return NULL;
2646   } else {
2647     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2648     if (fl->count() == 0) {
2649       // Attempt to refill this local free list.
2650       get_from_global_pool(word_sz, fl);
2651       // If it didn't work, give up.
2652       if (fl->count() == 0) return NULL;
2653     }
2654     res = fl->get_chunk_at_head();
2655     assert(res != NULL, "Why was count non-zero?");
2656   }
2657   res->markNotFree();
2658   assert(!res->is_free(), "shouldn't be marked free");
2659   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2660   // mangle a just allocated object with a distinct pattern.
2661   debug_only(res->mangleAllocated(word_sz));
2662   return (HeapWord*)res;
2663 }
2664 
2665 // Get a chunk of blocks of the right size and update related
2666 // book-keeping stats
2667 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2668   // Get the #blocks we want to claim
2669   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2670   assert(n_blks > 0, "Error");
2671   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2672   // In some cases, when the application has a phase change,
2673   // there may be a sudden and sharp shift in the object survival
2674   // profile, and updating the counts at the end of a scavenge
2675   // may not be quick enough, giving rise to large scavenge pauses
2676   // during these phase changes. It is beneficial to detect such
2677   // changes on-the-fly during a scavenge and avoid such a phase-change
2678   // pothole. The following code is a heuristic attempt to do that.
2679   // It is protected by a product flag until we have gained
2680   // enough experience with this heuristic and fine-tuned its behaviour.
2681   // WARNING: This might increase fragmentation if we overreact to
2682   // small spikes, so some kind of historical smoothing based on
2683   // previous experience with the greater reactivity might be useful.
2684   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2685   // default.
2686   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2687     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2688     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2689     n_blks = MIN2(n_blks, CMSOldPLABMax);
2690   }
2691   assert(n_blks > 0, "Error");
2692   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2693   // Update stats table entry for this block size
2694   _num_blocks[word_sz] += fl->count();
2695 }
2696 
2697 void CFLS_LAB::compute_desired_plab_size() {
2698   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2699        i < CompactibleFreeListSpace::IndexSetSize;
2700        i += CompactibleFreeListSpace::IndexSetStride) {
2701     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2702            "Counter inconsistency");
2703     if (_global_num_workers[i] > 0) {
2704       // Need to smooth wrt historical average
2705       if (ResizeOldPLAB) {
2706         _blocks_to_claim[i].sample(
2707           MAX2((size_t)CMSOldPLABMin,
2708           MIN2((size_t)CMSOldPLABMax,
2709                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2710       }
2711       // Reset counters for next round
2712       _global_num_workers[i] = 0;
2713       _global_num_blocks[i] = 0;
2714       if (PrintOldPLAB) {
2715         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2716       }
2717     }
2718   }
2719 }
2720 
2721 // If this is changed in the future to allow parallel
2722 // access, one would need to take the FL locks and,
2723 // depending on how it is used, stagger access from
2724 // parallel threads to reduce contention.
2725 void CFLS_LAB::retire(int tid) {
2726   // We run this single threaded with the world stopped;
2727   // so no need for locks and such.
2728   NOT_PRODUCT(Thread* t = Thread::current();)
2729   assert(Thread::current()->is_VM_thread(), "Error");
2730   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2731        i < CompactibleFreeListSpace::IndexSetSize;
2732        i += CompactibleFreeListSpace::IndexSetStride) {
2733     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2734            "Can't retire more than what we obtained");
2735     if (_num_blocks[i] > 0) {
2736       size_t num_retire =  _indexedFreeList[i].count();
2737       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2738       {
2739         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2740         //                Mutex::_no_safepoint_check_flag);
2741 
2742         // Update globals stats for num_blocks used
2743         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2744         _global_num_workers[i]++;
2745         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2746         if (num_retire > 0) {
2747           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2748           // Reset this list.
2749           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2750           _indexedFreeList[i].set_size(i);
2751         }
2752       }
2753       if (PrintOldPLAB) {
2754         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2755                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2756       }
2757       // Reset stats for next round
2758       _num_blocks[i]         = 0;
2759     }
2760   }
2761 }
2762 
2763 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2764   assert(fl->count() == 0, "Precondition.");
2765   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2766          "Precondition");
2767 
2768   // We'll try all multiples of word_sz in the indexed set, starting with
2769   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2770   // then try getting a big chunk and splitting it.
2771   {
2772     bool found;
2773     int  k;
2774     size_t cur_sz;
2775     for (k = 1, cur_sz = k * word_sz, found = false;
2776          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2777          (CMSSplitIndexedFreeListBlocks || k <= 1);
2778          k++, cur_sz = k * word_sz) {
2779       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2780       fl_for_cur_sz.set_size(cur_sz);
2781       {
2782         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2783                         Mutex::_no_safepoint_check_flag);
2784         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2785         if (gfl->count() != 0) {
2786           // nn is the number of chunks of size cur_sz that
2787           // we'd need to split k-ways each, in order to create
2788           // "n" chunks of size word_sz each.
2789           const size_t nn = MAX2(n/k, (size_t)1);
2790           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2791           found = true;
2792           if (k > 1) {
2793             // Update split death stats for the cur_sz-size blocks list:
2794             // we increment the split death count by the number of blocks
2795             // we just took from the cur_sz-size blocks list and which
2796             // we will be splitting below.
2797             ssize_t deaths = gfl->split_deaths() +
2798                              fl_for_cur_sz.count();
2799             gfl->set_split_deaths(deaths);
2800           }
2801         }
2802       }
2803       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2804       if (found) {
2805         if (k == 1) {
2806           fl->prepend(&fl_for_cur_sz);
2807         } else {
2808           // Divide each block on fl_for_cur_sz up k ways.
2809           FreeChunk* fc;
2810           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2811             // Must do this in reverse order, so that anybody attempting to
2812             // access the main chunk sees it as a single free block until we
2813             // change it.
2814             size_t fc_size = fc->size();
2815             assert(fc->is_free(), "Error");
2816             for (int i = k-1; i >= 0; i--) {
2817               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2818               assert((i != 0) ||
2819                         ((fc == ffc) && ffc->is_free() &&
2820                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2821                         "Counting error");
2822               ffc->set_size(word_sz);
2823               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2824               ffc->link_next(NULL);
2825               // Above must occur before BOT is updated below.
2826               OrderAccess::storestore();
2827               // splitting from the right, fc_size == i * word_sz
2828               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2829               fc_size -= word_sz;
2830               assert(fc_size == i*word_sz, "Error");
2831               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2832               _bt.verify_single_block((HeapWord*)fc, fc_size);
2833               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2834               // Push this on "fl".
2835               fl->return_chunk_at_head(ffc);
2836             }
2837             // TRAP
2838             assert(fl->tail()->next() == NULL, "List invariant.");
2839           }
2840         }
2841         // Update birth stats for this block size.
2842         size_t num = fl->count();
2843         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2844                         Mutex::_no_safepoint_check_flag);
2845         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2846         _indexedFreeList[word_sz].set_split_births(births);
2847         return;
2848       }
2849     }
2850   }
2851   // Otherwise, we'll split a block from the dictionary.
2852   FreeChunk* fc = NULL;
2853   FreeChunk* rem_fc = NULL;
2854   size_t rem;
2855   {
2856     MutexLockerEx x(parDictionaryAllocLock(),
2857                     Mutex::_no_safepoint_check_flag);
2858     while (n > 0) {
2859       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2860                                   FreeBlockDictionary<FreeChunk>::atLeast);
2861       if (fc != NULL) {
2862         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2863         dictionary()->dict_census_update(fc->size(),
2864                                        true /*split*/,
2865                                        false /*birth*/);
2866         break;
2867       } else {
2868         n--;
2869       }
2870     }
2871     if (fc == NULL) return;
2872     // Otherwise, split up that block.
2873     assert((ssize_t)n >= 1, "Control point invariant");
2874     assert(fc->is_free(), "Error: should be a free block");
2875     _bt.verify_single_block((HeapWord*)fc, fc->size());
2876     const size_t nn = fc->size() / word_sz;
2877     n = MIN2(nn, n);
2878     assert((ssize_t)n >= 1, "Control point invariant");
2879     rem = fc->size() - n * word_sz;
2880     // If there is a remainder, and it's too small, allocate one fewer.
2881     if (rem > 0 && rem < MinChunkSize) {
2882       n--; rem += word_sz;
2883     }
2884     // Note that at this point we may have n == 0.
2885     assert((ssize_t)n >= 0, "Control point invariant");
2886 
2887     // If n is 0, the chunk fc that was found is not large
2888     // enough to leave a viable remainder.  We are unable to
2889     // allocate even one block.  Return fc to the
2890     // dictionary and return, leaving "fl" empty.
2891     if (n == 0) {
2892       returnChunkToDictionary(fc);
2893       assert(fl->count() == 0, "We never allocated any blocks");
2894       return;
2895     }
2896 
2897     // First return the remainder, if any.
2898     // Note that we hold the lock until we decide if we're going to give
2899     // back the remainder to the dictionary, since a concurrent allocation
2900     // may otherwise see the heap as empty.  (We're willing to take that
2901     // hit if the block is a small block.)
2902     if (rem > 0) {
2903       size_t prefix_size = n * word_sz;
2904       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2905       rem_fc->set_size(rem);
2906       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2907       rem_fc->link_next(NULL);
2908       // Above must occur before BOT is updated below.
2909       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2910       OrderAccess::storestore();
2911       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2912       assert(fc->is_free(), "Error");
2913       fc->set_size(prefix_size);
2914       if (rem >= IndexSetSize) {
2915         returnChunkToDictionary(rem_fc);
2916         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2917         rem_fc = NULL;
2918       }
2919       // Otherwise, return it to the small list below.
2920     }
2921   }
2922   if (rem_fc != NULL) {
2923     MutexLockerEx x(_indexedFreeListParLocks[rem],
2924                     Mutex::_no_safepoint_check_flag);
2925     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2926     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2927     smallSplitBirth(rem);
2928   }
2929   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2930   // Now do the splitting up.
2931   // Must do this in reverse order, so that anybody attempting to
2932   // access the main chunk sees it as a single free block until we
2933   // change it.
2934   size_t fc_size = n * word_sz;
2935   // All but first chunk in this loop
2936   for (ssize_t i = n-1; i > 0; i--) {
2937     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2938     ffc->set_size(word_sz);
2939     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2940     ffc->link_next(NULL);
2941     // Above must occur before BOT is updated below.
2942     OrderAccess::storestore();
2943     // splitting from the right, fc_size == (n - i + 1) * wordsize
2944     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2945     fc_size -= word_sz;
2946     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2947     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2948     _bt.verify_single_block((HeapWord*)fc, fc_size);
2949     // Push this on "fl".
2950     fl->return_chunk_at_head(ffc);
2951   }
2952   // First chunk
2953   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2954   // The blocks above should show their new sizes before the first block below
2955   fc->set_size(word_sz);
2956   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2957   fc->link_next(NULL);
2958   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2959   _bt.verify_single_block((HeapWord*)fc, fc->size());
2960   fl->return_chunk_at_head(fc);
2961 
2962   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2963   {
2964     // Update the stats for this block size.
2965     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2966                     Mutex::_no_safepoint_check_flag);
2967     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2968     _indexedFreeList[word_sz].set_split_births(births);
2969     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2970     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2971   }
2972 
2973   // TRAP
2974   assert(fl->tail()->next() == NULL, "List invariant.");
2975 }
2976 
2977 // Set up the space's par_seq_tasks structure for work claiming
2978 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2979 // XXX Need to suitably abstract and generalize this and the next
2980 // method into one.
2981 void
2982 CompactibleFreeListSpace::
2983 initialize_sequential_subtasks_for_rescan(int n_threads) {
2984   // The "size" of each task is fixed according to rescan_task_size.
2985   assert(n_threads > 0, "Unexpected n_threads argument");
2986   const size_t task_size = rescan_task_size();
2987   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2988   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2989   assert(n_tasks == 0 ||
2990          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2991           (used_region().start() + n_tasks*task_size >= used_region().end())),
2992          "n_tasks calculation incorrect");
2993   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2994   assert(!pst->valid(), "Clobbering existing data?");
2995   // Sets the condition for completion of the subtask (how many threads
2996   // need to finish in order to be done).
2997   pst->set_n_threads(n_threads);
2998   pst->set_n_tasks((int)n_tasks);
2999 }
3000 
3001 // Set up the space's par_seq_tasks structure for work claiming
3002 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3003 void
3004 CompactibleFreeListSpace::
3005 initialize_sequential_subtasks_for_marking(int n_threads,
3006                                            HeapWord* low) {
3007   // The "size" of each task is fixed according to rescan_task_size.
3008   assert(n_threads > 0, "Unexpected n_threads argument");
3009   const size_t task_size = marking_task_size();
3010   assert(task_size > CardTableModRefBS::card_size_in_words &&
3011          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3012          "Otherwise arithmetic below would be incorrect");
3013   MemRegion span = _gen->reserved();
3014   if (low != NULL) {
3015     if (span.contains(low)) {
3016       // Align low down to  a card boundary so that
3017       // we can use block_offset_careful() on span boundaries.
3018       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3019                                  CardTableModRefBS::card_size);
3020       // Clip span prefix at aligned_low
3021       span = span.intersection(MemRegion(aligned_low, span.end()));
3022     } else if (low > span.end()) {
3023       span = MemRegion(low, low);  // Null region
3024     } // else use entire span
3025   }
3026   assert(span.is_empty() ||
3027          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3028         "span should start at a card boundary");
3029   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3030   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3031   assert(n_tasks == 0 ||
3032          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3033           (span.start() + n_tasks*task_size >= span.end())),
3034          "n_tasks calculation incorrect");
3035   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3036   assert(!pst->valid(), "Clobbering existing data?");
3037   // Sets the condition for completion of the subtask (how many threads
3038   // need to finish in order to be done).
3039   pst->set_n_threads(n_threads);
3040   pst->set_n_tasks((int)n_tasks);
3041 }