1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 int CompactibleFreeListSpace::IndexSetStart  = 0;
  54 int CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61   #define numQuanta(x,y) ((x+y-1)/y)
  62   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
  63 
  64   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  65   IndexSetStart  = MinObjAlignment;
  66   IndexSetStride = MinObjAlignment;
  67 }
  68 
  69 // Constructor
  70 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  71   MemRegion mr, bool use_adaptive_freelists,
  72   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
  73   _dictionaryChoice(dictionaryChoice),
  74   _adaptive_freelists(use_adaptive_freelists),
  75   _bt(bs, mr),
  76   // free list locks are in the range of values taken by _lockRank
  77   // This range currently is [_leaf+2, _leaf+3]
  78   // Note: this requires that CFLspace c'tors
  79   // are called serially in the order in which the locks are
  80   // are acquired in the program text. This is true today.
  81   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  82   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  83                           "CompactibleFreeListSpace._dict_par_lock", true),
  84   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  85                     CMSRescanMultiple),
  86   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  87                     CMSConcMarkMultiple),
  88   _collector(NULL)
  89 {
  90   _bt.set_space(this);
  91   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  92   // We have all of "mr", all of which we place in the dictionary
  93   // as one big chunk. We'll need to decide here which of several
  94   // possible alternative dictionary implementations to use. For
  95   // now the choice is easy, since we have only one working
  96   // implementation, namely, the simple binary tree (splaying
  97   // temporarily disabled).
  98   switch (dictionaryChoice) {
  99     case FreeBlockDictionary::dictionarySplayTree:
 100     case FreeBlockDictionary::dictionarySkipList:
 101     default:
 102       warning("dictionaryChoice: selected option not understood; using"
 103               " default BinaryTreeDictionary implementation instead.");
 104     case FreeBlockDictionary::dictionaryBinaryTree:
 105       _dictionary = new BinaryTreeDictionary(mr);
 106       break;
 107   }
 108   assert(_dictionary != NULL, "CMS dictionary initialization");
 109   // The indexed free lists are initially all empty and are lazily
 110   // filled in on demand. Initialize the array elements to NULL.
 111   initializeIndexedFreeListArray();
 112 
 113   // Not using adaptive free lists assumes that allocation is first
 114   // from the linAB's.  Also a cms perm gen which can be compacted
 115   // has to have the klass's klassKlass allocated at a lower
 116   // address in the heap than the klass so that the klassKlass is
 117   // moved to its new location before the klass is moved.
 118   // Set the _refillSize for the linear allocation blocks
 119   if (!use_adaptive_freelists) {
 120     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
 121     // The small linAB initially has all the space and will allocate
 122     // a chunk of any size.
 123     HeapWord* addr = (HeapWord*) fc;
 124     _smallLinearAllocBlock.set(addr, fc->size() ,
 125       1024*SmallForLinearAlloc, fc->size());
 126     // Note that _unallocated_block is not updated here.
 127     // Allocations from the linear allocation block should
 128     // update it.
 129   } else {
 130     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 131                                SmallForLinearAlloc);
 132   }
 133   // CMSIndexedFreeListReplenish should be at least 1
 134   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 135   _promoInfo.setSpace(this);
 136   if (UseCMSBestFit) {
 137     _fitStrategy = FreeBlockBestFitFirst;
 138   } else {
 139     _fitStrategy = FreeBlockStrategyNone;
 140   }
 141   checkFreeListConsistency();
 142 
 143   // Initialize locks for parallel case.
 144 
 145   if (CollectedHeap::use_parallel_gc_threads()) {
 146     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 147       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 148                                               "a freelist par lock",
 149                                               true);
 150       if (_indexedFreeListParLocks[i] == NULL)
 151         vm_exit_during_initialization("Could not allocate a par lock");
 152       DEBUG_ONLY(
 153         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 154       )
 155     }
 156     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 157   }
 158 }
 159 
 160 // Like CompactibleSpace forward() but always calls cross_threshold() to
 161 // update the block offset table.  Removed initialize_threshold call because
 162 // CFLS does not use a block offset array for contiguous spaces.
 163 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 164                                     CompactPoint* cp, HeapWord* compact_top) {
 165   // q is alive
 166   // First check if we should switch compaction space
 167   assert(this == cp->space, "'this' should be current compaction space.");
 168   size_t compaction_max_size = pointer_delta(end(), compact_top);
 169   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 170     "virtual adjustObjectSize_v() method is not correct");
 171   size_t adjusted_size = adjustObjectSize(size);
 172   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 173          "no small fragments allowed");
 174   assert(minimum_free_block_size() == MinChunkSize,
 175          "for de-virtualized reference below");
 176   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 177   if (adjusted_size + MinChunkSize > compaction_max_size &&
 178       adjusted_size != compaction_max_size) {
 179     do {
 180       // switch to next compaction space
 181       cp->space->set_compaction_top(compact_top);
 182       cp->space = cp->space->next_compaction_space();
 183       if (cp->space == NULL) {
 184         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 185         assert(cp->gen != NULL, "compaction must succeed");
 186         cp->space = cp->gen->first_compaction_space();
 187         assert(cp->space != NULL, "generation must have a first compaction space");
 188       }
 189       compact_top = cp->space->bottom();
 190       cp->space->set_compaction_top(compact_top);
 191       // The correct adjusted_size may not be the same as that for this method
 192       // (i.e., cp->space may no longer be "this" so adjust the size again.
 193       // Use the virtual method which is not used above to save the virtual
 194       // dispatch.
 195       adjusted_size = cp->space->adjust_object_size_v(size);
 196       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 197       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 198     } while (adjusted_size > compaction_max_size);
 199   }
 200 
 201   // store the forwarding pointer into the mark word
 202   if ((HeapWord*)q != compact_top) {
 203     q->forward_to(oop(compact_top));
 204     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 205   } else {
 206     // if the object isn't moving we can just set the mark to the default
 207     // mark and handle it specially later on.
 208     q->init_mark();
 209     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 210   }
 211 
 212   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
 213   compact_top += adjusted_size;
 214 
 215   // we need to update the offset table so that the beginnings of objects can be
 216   // found during scavenge.  Note that we are updating the offset table based on
 217   // where the object will be once the compaction phase finishes.
 218 
 219   // Always call cross_threshold().  A contiguous space can only call it when
 220   // the compaction_top exceeds the current threshold but not for an
 221   // non-contiguous space.
 222   cp->threshold =
 223     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 224   return compact_top;
 225 }
 226 
 227 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 228 // and use of single_block instead of alloc_block.  The name here is not really
 229 // appropriate - maybe a more general name could be invented for both the
 230 // contiguous and noncontiguous spaces.
 231 
 232 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 233   _bt.single_block(start, the_end);
 234   return end();
 235 }
 236 
 237 // Initialize them to NULL.
 238 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 239   for (size_t i = 0; i < IndexSetSize; i++) {
 240     // Note that on platforms where objects are double word aligned,
 241     // the odd array elements are not used.  It is convenient, however,
 242     // to map directly from the object size to the array element.
 243     _indexedFreeList[i].reset(IndexSetSize);
 244     _indexedFreeList[i].set_size(i);
 245     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 246     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 247     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 248     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 249   }
 250 }
 251 
 252 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 253   for (int i = 1; i < IndexSetSize; i++) {
 254     assert(_indexedFreeList[i].size() == (size_t) i,
 255       "Indexed free list sizes are incorrect");
 256     _indexedFreeList[i].reset(IndexSetSize);
 257     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 258     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 259     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 260     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 261   }
 262 }
 263 
 264 void CompactibleFreeListSpace::reset(MemRegion mr) {
 265   resetIndexedFreeListArray();
 266   dictionary()->reset();
 267   if (BlockOffsetArrayUseUnallocatedBlock) {
 268     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 269     // Everything's allocated until proven otherwise.
 270     _bt.set_unallocated_block(end());
 271   }
 272   if (!mr.is_empty()) {
 273     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 274     _bt.single_block(mr.start(), mr.word_size());
 275     FreeChunk* fc = (FreeChunk*) mr.start();
 276     fc->setSize(mr.word_size());
 277     if (mr.word_size() >= IndexSetSize ) {
 278       returnChunkToDictionary(fc);
 279     } else {
 280       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 281       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
 282     }
 283   }
 284   _promoInfo.reset();
 285   _smallLinearAllocBlock._ptr = NULL;
 286   _smallLinearAllocBlock._word_size = 0;
 287 }
 288 
 289 void CompactibleFreeListSpace::reset_after_compaction() {
 290   // Reset the space to the new reality - one free chunk.
 291   MemRegion mr(compaction_top(), end());
 292   reset(mr);
 293   // Now refill the linear allocation block(s) if possible.
 294   if (_adaptive_freelists) {
 295     refillLinearAllocBlocksIfNeeded();
 296   } else {
 297     // Place as much of mr in the linAB as we can get,
 298     // provided it was big enough to go into the dictionary.
 299     FreeChunk* fc = dictionary()->findLargestDict();
 300     if (fc != NULL) {
 301       assert(fc->size() == mr.word_size(),
 302              "Why was the chunk broken up?");
 303       removeChunkFromDictionary(fc);
 304       HeapWord* addr = (HeapWord*) fc;
 305       _smallLinearAllocBlock.set(addr, fc->size() ,
 306         1024*SmallForLinearAlloc, fc->size());
 307       // Note that _unallocated_block is not updated here.
 308     }
 309   }
 310 }
 311 
 312 // Walks the entire dictionary, returning a coterminal
 313 // chunk, if it exists. Use with caution since it involves
 314 // a potentially complete walk of a potentially large tree.
 315 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 316 
 317   assert_lock_strong(&_freelistLock);
 318 
 319   return dictionary()->find_chunk_ends_at(end());
 320 }
 321 
 322 
 323 #ifndef PRODUCT
 324 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 325   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 326     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
 327   }
 328 }
 329 
 330 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 331   size_t sum = 0;
 332   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 333     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
 334   }
 335   return sum;
 336 }
 337 
 338 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 339   size_t count = 0;
 340   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
 341     debug_only(
 342       ssize_t total_list_count = 0;
 343       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 344          fc = fc->next()) {
 345         total_list_count++;
 346       }
 347       assert(total_list_count ==  _indexedFreeList[i].count(),
 348         "Count in list is incorrect");
 349     )
 350     count += _indexedFreeList[i].count();
 351   }
 352   return count;
 353 }
 354 
 355 size_t CompactibleFreeListSpace::totalCount() {
 356   size_t num = totalCountInIndexedFreeLists();
 357   num +=  dictionary()->totalCount();
 358   if (_smallLinearAllocBlock._word_size != 0) {
 359     num++;
 360   }
 361   return num;
 362 }
 363 #endif
 364 
 365 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 366   FreeChunk* fc = (FreeChunk*) p;
 367   return fc->isFree();
 368 }
 369 
 370 size_t CompactibleFreeListSpace::used() const {
 371   return capacity() - free();
 372 }
 373 
 374 size_t CompactibleFreeListSpace::free() const {
 375   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 376   // if you do this while the structures are in flux you
 377   // may get an approximate answer only; for instance
 378   // because there is concurrent allocation either
 379   // directly by mutators or for promotion during a GC.
 380   // It's "MT-safe", however, in the sense that you are guaranteed
 381   // not to crash and burn, for instance, because of walking
 382   // pointers that could disappear as you were walking them.
 383   // The approximation is because the various components
 384   // that are read below are not read atomically (and
 385   // further the computation of totalSizeInIndexedFreeLists()
 386   // is itself a non-atomic computation. The normal use of
 387   // this is during a resize operation at the end of GC
 388   // and at that time you are guaranteed to get the
 389   // correct actual value. However, for instance, this is
 390   // also read completely asynchronously by the "perf-sampler"
 391   // that supports jvmstat, and you are apt to see the values
 392   // flicker in such cases.
 393   assert(_dictionary != NULL, "No _dictionary?");
 394   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
 395           totalSizeInIndexedFreeLists() +
 396           _smallLinearAllocBlock._word_size) * HeapWordSize;
 397 }
 398 
 399 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 400   assert(_dictionary != NULL, "No _dictionary?");
 401   assert_locked();
 402   size_t res = _dictionary->maxChunkSize();
 403   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 404                        (size_t) SmallForLinearAlloc - 1));
 405   // XXX the following could potentially be pretty slow;
 406   // should one, pesimally for the rare cases when res
 407   // caclulated above is less than IndexSetSize,
 408   // just return res calculated above? My reasoning was that
 409   // those cases will be so rare that the extra time spent doesn't
 410   // really matter....
 411   // Note: do not change the loop test i >= res + IndexSetStride
 412   // to i > res below, because i is unsigned and res may be zero.
 413   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 414        i -= IndexSetStride) {
 415     if (_indexedFreeList[i].head() != NULL) {
 416       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 417       return i;
 418     }
 419   }
 420   return res;
 421 }
 422 
 423 void LinearAllocBlock::print_on(outputStream* st) const {
 424   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 425             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 426             _ptr, _word_size, _refillSize, _allocation_size_limit);
 427 }
 428 
 429 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 430   st->print_cr("COMPACTIBLE FREELIST SPACE");
 431   st->print_cr(" Space:");
 432   Space::print_on(st);
 433 
 434   st->print_cr("promoInfo:");
 435   _promoInfo.print_on(st);
 436 
 437   st->print_cr("_smallLinearAllocBlock");
 438   _smallLinearAllocBlock.print_on(st);
 439 
 440   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 441 
 442   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 443                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 444 }
 445 
 446 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 447 const {
 448   reportIndexedFreeListStatistics();
 449   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 450   gclog_or_tty->print_cr("---------------------------");
 451   FreeList::print_labels_on(st, "size");
 452   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 453     _indexedFreeList[i].print_on(gclog_or_tty);
 454     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 455          fc = fc->next()) {
 456       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 457                           fc, (HeapWord*)fc + i,
 458                           fc->cantCoalesce() ? "\t CC" : "");
 459     }
 460   }
 461 }
 462 
 463 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 464 const {
 465   _promoInfo.print_on(st);
 466 }
 467 
 468 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 469 const {
 470   _dictionary->reportStatistics();
 471   st->print_cr("Layout of Freelists in Tree");
 472   st->print_cr("---------------------------");
 473   _dictionary->print_free_lists(st);
 474 }
 475 
 476 class BlkPrintingClosure: public BlkClosure {
 477   const CMSCollector*             _collector;
 478   const CompactibleFreeListSpace* _sp;
 479   const CMSBitMap*                _live_bit_map;
 480   const bool                      _post_remark;
 481   outputStream*                   _st;
 482 public:
 483   BlkPrintingClosure(const CMSCollector* collector,
 484                      const CompactibleFreeListSpace* sp,
 485                      const CMSBitMap* live_bit_map,
 486                      outputStream* st):
 487     _collector(collector),
 488     _sp(sp),
 489     _live_bit_map(live_bit_map),
 490     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 491     _st(st) { }
 492   size_t do_blk(HeapWord* addr);
 493 };
 494 
 495 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 496   size_t sz = _sp->block_size_no_stall(addr, _collector);
 497   assert(sz != 0, "Should always be able to compute a size");
 498   if (_sp->block_is_obj(addr)) {
 499     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 500     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 501       addr,
 502       dead ? "dead" : "live",
 503       sz,
 504       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 505     if (CMSPrintObjectsInDump && !dead) {
 506       oop(addr)->print_on(_st);
 507       _st->print_cr("--------------------------------------");
 508     }
 509   } else { // free block
 510     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 511       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 512     if (CMSPrintChunksInDump) {
 513       ((FreeChunk*)addr)->print_on(_st);
 514       _st->print_cr("--------------------------------------");
 515     }
 516   }
 517   return sz;
 518 }
 519 
 520 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 521   outputStream* st) {
 522   st->print_cr("\n=========================");
 523   st->print_cr("Block layout in CMS Heap:");
 524   st->print_cr("=========================");
 525   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 526   blk_iterate(&bpcl);
 527 
 528   st->print_cr("\n=======================================");
 529   st->print_cr("Order & Layout of Promotion Info Blocks");
 530   st->print_cr("=======================================");
 531   print_promo_info_blocks(st);
 532 
 533   st->print_cr("\n===========================");
 534   st->print_cr("Order of Indexed Free Lists");
 535   st->print_cr("=========================");
 536   print_indexed_free_lists(st);
 537 
 538   st->print_cr("\n=================================");
 539   st->print_cr("Order of Free Lists in Dictionary");
 540   st->print_cr("=================================");
 541   print_dictionary_free_lists(st);
 542 }
 543 
 544 
 545 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 546   assert_lock_strong(&_freelistLock);
 547   assert(PrintFLSStatistics != 0, "Reporting error");
 548   _dictionary->reportStatistics();
 549   if (PrintFLSStatistics > 1) {
 550     reportIndexedFreeListStatistics();
 551     size_t totalSize = totalSizeInIndexedFreeLists() +
 552                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
 553     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
 554   }
 555 }
 556 
 557 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 558   assert_lock_strong(&_freelistLock);
 559   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 560                       "--------------------------------\n");
 561   size_t totalSize = totalSizeInIndexedFreeLists();
 562   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
 563   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
 564   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 565   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
 566   if (freeBlocks != 0) {
 567     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
 568   }
 569 }
 570 
 571 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 572   size_t res = 0;
 573   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 574     debug_only(
 575       ssize_t recount = 0;
 576       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 577          fc = fc->next()) {
 578         recount += 1;
 579       }
 580       assert(recount == _indexedFreeList[i].count(),
 581         "Incorrect count in list");
 582     )
 583     res += _indexedFreeList[i].count();
 584   }
 585   return res;
 586 }
 587 
 588 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 589   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 590     if (_indexedFreeList[i].head() != NULL) {
 591       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 592       return (size_t)i;
 593     }
 594   }
 595   return 0;
 596 }
 597 
 598 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 599   HeapWord* prevEnd = end();
 600   assert(prevEnd != value, "unnecessary set_end call");
 601   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 602         "New end is below unallocated block");
 603   _end = value;
 604   if (prevEnd != NULL) {
 605     // Resize the underlying block offset table.
 606     _bt.resize(pointer_delta(value, bottom()));
 607     if (value <= prevEnd) {
 608       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 609              "New end is below unallocated block");
 610     } else {
 611       // Now, take this new chunk and add it to the free blocks.
 612       // Note that the BOT has not yet been updated for this block.
 613       size_t newFcSize = pointer_delta(value, prevEnd);
 614       // XXX This is REALLY UGLY and should be fixed up. XXX
 615       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 616         // Mark the boundary of the new block in BOT
 617         _bt.mark_block(prevEnd, value);
 618         // put it all in the linAB
 619         if (ParallelGCThreads == 0) {
 620           _smallLinearAllocBlock._ptr = prevEnd;
 621           _smallLinearAllocBlock._word_size = newFcSize;
 622           repairLinearAllocBlock(&_smallLinearAllocBlock);
 623         } else { // ParallelGCThreads > 0
 624           MutexLockerEx x(parDictionaryAllocLock(),
 625                           Mutex::_no_safepoint_check_flag);
 626           _smallLinearAllocBlock._ptr = prevEnd;
 627           _smallLinearAllocBlock._word_size = newFcSize;
 628           repairLinearAllocBlock(&_smallLinearAllocBlock);
 629         }
 630         // Births of chunks put into a LinAB are not recorded.  Births
 631         // of chunks as they are allocated out of a LinAB are.
 632       } else {
 633         // Add the block to the free lists, if possible coalescing it
 634         // with the last free block, and update the BOT and census data.
 635         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 636       }
 637     }
 638   }
 639 }
 640 
 641 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 642   CompactibleFreeListSpace* _cfls;
 643   CMSCollector* _collector;
 644 protected:
 645   // Override.
 646 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 647   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 648                                        HeapWord* bottom, HeapWord* top, \
 649                                        ClosureType* cl);                \
 650       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl);                \
 653     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 654                                        HeapWord* bottom, HeapWord* top, \
 655                                        ClosureType* cl)
 656   walk_mem_region_with_cl_DECL(OopClosure);
 657   walk_mem_region_with_cl_DECL(FilteringClosure);
 658 
 659 public:
 660   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 661                       CMSCollector* collector,
 662                       OopClosure* cl,
 663                       CardTableModRefBS::PrecisionStyle precision,
 664                       HeapWord* boundary) :
 665     Filtering_DCTOC(sp, cl, precision, boundary),
 666     _cfls(sp), _collector(collector) {}
 667 };
 668 
 669 // We de-virtualize the block-related calls below, since we know that our
 670 // space is a CompactibleFreeListSpace.
 671 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 672 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 673                                                  HeapWord* bottom,              \
 674                                                  HeapWord* top,                 \
 675                                                  ClosureType* cl) {             \
 676    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
 677      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 678    } else {                                                                     \
 679      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 680    }                                                                            \
 681 }                                                                               \
 682 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 683                                                       HeapWord* bottom,         \
 684                                                       HeapWord* top,            \
 685                                                       ClosureType* cl) {        \
 686   /* Skip parts that are before "mr", in case "block_start" sent us             \
 687      back too far. */                                                           \
 688   HeapWord* mr_start = mr.start();                                              \
 689   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 690   HeapWord* next = bottom + bot_size;                                           \
 691   while (next < mr_start) {                                                     \
 692     bottom = next;                                                              \
 693     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 694     next = bottom + bot_size;                                                   \
 695   }                                                                             \
 696                                                                                 \
 697   while (bottom < top) {                                                        \
 698     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 699         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 700                     oop(bottom)) &&                                             \
 701         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 702       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 703       bottom += _cfls->adjustObjectSize(word_sz);                               \
 704     } else {                                                                    \
 705       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 706     }                                                                           \
 707   }                                                                             \
 708 }                                                                               \
 709 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 710                                                         HeapWord* bottom,       \
 711                                                         HeapWord* top,          \
 712                                                         ClosureType* cl) {      \
 713   /* Skip parts that are before "mr", in case "block_start" sent us             \
 714      back too far. */                                                           \
 715   HeapWord* mr_start = mr.start();                                              \
 716   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 717   HeapWord* next = bottom + bot_size;                                           \
 718   while (next < mr_start) {                                                     \
 719     bottom = next;                                                              \
 720     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 721     next = bottom + bot_size;                                                   \
 722   }                                                                             \
 723                                                                                 \
 724   while (bottom < top) {                                                        \
 725     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 726         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 727                     oop(bottom)) &&                                             \
 728         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 729       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 730       bottom += _cfls->adjustObjectSize(word_sz);                               \
 731     } else {                                                                    \
 732       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 733     }                                                                           \
 734   }                                                                             \
 735 }
 736 
 737 // (There are only two of these, rather than N, because the split is due
 738 // only to the introduction of the FilteringClosure, a local part of the
 739 // impl of this abstraction.)
 740 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 742 
 743 DirtyCardToOopClosure*
 744 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 745                                       CardTableModRefBS::PrecisionStyle precision,
 746                                       HeapWord* boundary) {
 747   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 748 }
 749 
 750 
 751 // Note on locking for the space iteration functions:
 752 // since the collector's iteration activities are concurrent with
 753 // allocation activities by mutators, absent a suitable mutual exclusion
 754 // mechanism the iterators may go awry. For instace a block being iterated
 755 // may suddenly be allocated or divided up and part of it allocated and
 756 // so on.
 757 
 758 // Apply the given closure to each block in the space.
 759 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 760   assert_lock_strong(freelistLock());
 761   HeapWord *cur, *limit;
 762   for (cur = bottom(), limit = end(); cur < limit;
 763        cur += cl->do_blk_careful(cur));
 764 }
 765 
 766 // Apply the given closure to each block in the space.
 767 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 768   assert_lock_strong(freelistLock());
 769   HeapWord *cur, *limit;
 770   for (cur = bottom(), limit = end(); cur < limit;
 771        cur += cl->do_blk(cur));
 772 }
 773 
 774 // Apply the given closure to each oop in the space.
 775 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 776   assert_lock_strong(freelistLock());
 777   HeapWord *cur, *limit;
 778   size_t curSize;
 779   for (cur = bottom(), limit = end(); cur < limit;
 780        cur += curSize) {
 781     curSize = block_size(cur);
 782     if (block_is_obj(cur)) {
 783       oop(cur)->oop_iterate(cl);
 784     }
 785   }
 786 }
 787 
 788 // Apply the given closure to each oop in the space \intersect memory region.
 789 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 790   assert_lock_strong(freelistLock());
 791   if (is_empty()) {
 792     return;
 793   }
 794   MemRegion cur = MemRegion(bottom(), end());
 795   mr = mr.intersection(cur);
 796   if (mr.is_empty()) {
 797     return;
 798   }
 799   if (mr.equals(cur)) {
 800     oop_iterate(cl);
 801     return;
 802   }
 803   assert(mr.end() <= end(), "just took an intersection above");
 804   HeapWord* obj_addr = block_start(mr.start());
 805   HeapWord* t = mr.end();
 806 
 807   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 808   if (block_is_obj(obj_addr)) {
 809     // Handle first object specially.
 810     oop obj = oop(obj_addr);
 811     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 812   } else {
 813     FreeChunk* fc = (FreeChunk*)obj_addr;
 814     obj_addr += fc->size();
 815   }
 816   while (obj_addr < t) {
 817     HeapWord* obj = obj_addr;
 818     obj_addr += block_size(obj_addr);
 819     // If "obj_addr" is not greater than top, then the
 820     // entire object "obj" is within the region.
 821     if (obj_addr <= t) {
 822       if (block_is_obj(obj)) {
 823         oop(obj)->oop_iterate(cl);
 824       }
 825     } else {
 826       // "obj" extends beyond end of region
 827       if (block_is_obj(obj)) {
 828         oop(obj)->oop_iterate(&smr_blk);
 829       }
 830       break;
 831     }
 832   }
 833 }
 834 
 835 // NOTE: In the following methods, in order to safely be able to
 836 // apply the closure to an object, we need to be sure that the
 837 // object has been initialized. We are guaranteed that an object
 838 // is initialized if we are holding the Heap_lock with the
 839 // world stopped.
 840 void CompactibleFreeListSpace::verify_objects_initialized() const {
 841   if (is_init_completed()) {
 842     assert_locked_or_safepoint(Heap_lock);
 843     if (Universe::is_fully_initialized()) {
 844       guarantee(SafepointSynchronize::is_at_safepoint(),
 845                 "Required for objects to be initialized");
 846     }
 847   } // else make a concession at vm start-up
 848 }
 849 
 850 // Apply the given closure to each object in the space
 851 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 852   assert_lock_strong(freelistLock());
 853   NOT_PRODUCT(verify_objects_initialized());
 854   HeapWord *cur, *limit;
 855   size_t curSize;
 856   for (cur = bottom(), limit = end(); cur < limit;
 857        cur += curSize) {
 858     curSize = block_size(cur);
 859     if (block_is_obj(cur)) {
 860       blk->do_object(oop(cur));
 861     }
 862   }
 863 }
 864 
 865 // Apply the given closure to each live object in the space
 866 //   The usage of CompactibleFreeListSpace
 867 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 868 // objects in the space with references to objects that are no longer
 869 // valid.  For example, an object may reference another object
 870 // that has already been sweep up (collected).  This method uses
 871 // obj_is_alive() to determine whether it is safe to apply the closure to
 872 // an object.  See obj_is_alive() for details on how liveness of an
 873 // object is decided.
 874 
 875 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 876   assert_lock_strong(freelistLock());
 877   NOT_PRODUCT(verify_objects_initialized());
 878   HeapWord *cur, *limit;
 879   size_t curSize;
 880   for (cur = bottom(), limit = end(); cur < limit;
 881        cur += curSize) {
 882     curSize = block_size(cur);
 883     if (block_is_obj(cur) && obj_is_alive(cur)) {
 884       blk->do_object(oop(cur));
 885     }
 886   }
 887 }
 888 
 889 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 890                                                   UpwardsObjectClosure* cl) {
 891   assert_locked(freelistLock());
 892   NOT_PRODUCT(verify_objects_initialized());
 893   Space::object_iterate_mem(mr, cl);
 894 }
 895 
 896 // Callers of this iterator beware: The closure application should
 897 // be robust in the face of uninitialized objects and should (always)
 898 // return a correct size so that the next addr + size below gives us a
 899 // valid block boundary. [See for instance,
 900 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 901 // in ConcurrentMarkSweepGeneration.cpp.]
 902 HeapWord*
 903 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 904   assert_lock_strong(freelistLock());
 905   HeapWord *addr, *last;
 906   size_t size;
 907   for (addr = bottom(), last  = end();
 908        addr < last; addr += size) {
 909     FreeChunk* fc = (FreeChunk*)addr;
 910     if (fc->isFree()) {
 911       // Since we hold the free list lock, which protects direct
 912       // allocation in this generation by mutators, a free object
 913       // will remain free throughout this iteration code.
 914       size = fc->size();
 915     } else {
 916       // Note that the object need not necessarily be initialized,
 917       // because (for instance) the free list lock does NOT protect
 918       // object initialization. The closure application below must
 919       // therefore be correct in the face of uninitialized objects.
 920       size = cl->do_object_careful(oop(addr));
 921       if (size == 0) {
 922         // An unparsable object found. Signal early termination.
 923         return addr;
 924       }
 925     }
 926   }
 927   return NULL;
 928 }
 929 
 930 // Callers of this iterator beware: The closure application should
 931 // be robust in the face of uninitialized objects and should (always)
 932 // return a correct size so that the next addr + size below gives us a
 933 // valid block boundary. [See for instance,
 934 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 935 // in ConcurrentMarkSweepGeneration.cpp.]
 936 HeapWord*
 937 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 938   ObjectClosureCareful* cl) {
 939   assert_lock_strong(freelistLock());
 940   // Can't use used_region() below because it may not necessarily
 941   // be the same as [bottom(),end()); although we could
 942   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 943   // that appears too cumbersome, so we just do the simpler check
 944   // in the assertion below.
 945   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 946          "mr should be non-empty and within used space");
 947   HeapWord *addr, *end;
 948   size_t size;
 949   for (addr = block_start_careful(mr.start()), end  = mr.end();
 950        addr < end; addr += size) {
 951     FreeChunk* fc = (FreeChunk*)addr;
 952     if (fc->isFree()) {
 953       // Since we hold the free list lock, which protects direct
 954       // allocation in this generation by mutators, a free object
 955       // will remain free throughout this iteration code.
 956       size = fc->size();
 957     } else {
 958       // Note that the object need not necessarily be initialized,
 959       // because (for instance) the free list lock does NOT protect
 960       // object initialization. The closure application below must
 961       // therefore be correct in the face of uninitialized objects.
 962       size = cl->do_object_careful_m(oop(addr), mr);
 963       if (size == 0) {
 964         // An unparsable object found. Signal early termination.
 965         return addr;
 966       }
 967     }
 968   }
 969   return NULL;
 970 }
 971 
 972 
 973 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 974   NOT_PRODUCT(verify_objects_initialized());
 975   return _bt.block_start(p);
 976 }
 977 
 978 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 979   return _bt.block_start_careful(p);
 980 }
 981 
 982 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 983   NOT_PRODUCT(verify_objects_initialized());
 984   // This must be volatile, or else there is a danger that the compiler
 985   // will compile the code below into a sometimes-infinite loop, by keeping
 986   // the value read the first time in a register.
 987   while (true) {
 988     // We must do this until we get a consistent view of the object.
 989     if (FreeChunk::indicatesFreeChunk(p)) {
 990       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 991       size_t res = fc->size();
 992       // If the object is still a free chunk, return the size, else it
 993       // has been allocated so try again.
 994       if (FreeChunk::indicatesFreeChunk(p)) {
 995         assert(res != 0, "Block size should not be 0");
 996         return res;
 997       }
 998     } else {
 999       // must read from what 'p' points to in each loop.
1000       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1001       if (k != NULL) {
1002         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
1003         oop o = (oop)p;
1004         assert(o->is_parsable(), "Should be parsable");
1005         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1006         size_t res = o->size_given_klass(k->klass_part());
1007         res = adjustObjectSize(res);
1008         assert(res != 0, "Block size should not be 0");
1009         return res;
1010       }
1011     }
1012   }
1013 }
1014 
1015 // A variant of the above that uses the Printezis bits for
1016 // unparsable but allocated objects. This avoids any possible
1017 // stalls waiting for mutators to initialize objects, and is
1018 // thus potentially faster than the variant above. However,
1019 // this variant may return a zero size for a block that is
1020 // under mutation and for which a consistent size cannot be
1021 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1022 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1023                                                      const CMSCollector* c)
1024 const {
1025   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1026   // This must be volatile, or else there is a danger that the compiler
1027   // will compile the code below into a sometimes-infinite loop, by keeping
1028   // the value read the first time in a register.
1029   DEBUG_ONLY(uint loops = 0;)
1030   while (true) {
1031     // We must do this until we get a consistent view of the object.
1032     if (FreeChunk::indicatesFreeChunk(p)) {
1033       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1034       size_t res = fc->size();
1035       if (FreeChunk::indicatesFreeChunk(p)) {
1036         assert(res != 0, "Block size should not be 0");
1037         assert(loops == 0, "Should be 0");
1038         return res;
1039       }
1040     } else {
1041       // must read from what 'p' points to in each loop.
1042       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1043       if (k != NULL &&
1044           ((oopDesc*)p)->is_parsable() &&
1045           ((oopDesc*)p)->is_conc_safe()) {
1046         assert(k->is_oop(), "Should really be klass oop.");
1047         oop o = (oop)p;
1048         assert(o->is_oop(), "Should be an oop");
1049         size_t res = o->size_given_klass(k->klass_part());
1050         res = adjustObjectSize(res);
1051         assert(res != 0, "Block size should not be 0");
1052         return res;
1053       } else {
1054         return c->block_size_if_printezis_bits(p);
1055       }
1056     }
1057     assert(loops == 0, "Can loop at most once");
1058     DEBUG_ONLY(loops++;)
1059   }
1060 }
1061 
1062 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1063   NOT_PRODUCT(verify_objects_initialized());
1064   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1065   FreeChunk* fc = (FreeChunk*)p;
1066   if (fc->isFree()) {
1067     return fc->size();
1068   } else {
1069     // Ignore mark word because this may be a recently promoted
1070     // object whose mark word is used to chain together grey
1071     // objects (the last one would have a null value).
1072     assert(oop(p)->is_oop(true), "Should be an oop");
1073     return adjustObjectSize(oop(p)->size());
1074   }
1075 }
1076 
1077 // This implementation assumes that the property of "being an object" is
1078 // stable.  But being a free chunk may not be (because of parallel
1079 // promotion.)
1080 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1081   FreeChunk* fc = (FreeChunk*)p;
1082   assert(is_in_reserved(p), "Should be in space");
1083   // When doing a mark-sweep-compact of the CMS generation, this
1084   // assertion may fail because prepare_for_compaction() uses
1085   // space that is garbage to maintain information on ranges of
1086   // live objects so that these live ranges can be moved as a whole.
1087   // Comment out this assertion until that problem can be solved
1088   // (i.e., that the block start calculation may look at objects
1089   // at address below "p" in finding the object that contains "p"
1090   // and those objects (if garbage) may have been modified to hold
1091   // live range information.
1092   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1093   //        "Should be a block boundary");
1094   if (FreeChunk::indicatesFreeChunk(p)) return false;
1095   klassOop k = oop(p)->klass_or_null();
1096   if (k != NULL) {
1097     // Ignore mark word because it may have been used to
1098     // chain together promoted objects (the last one
1099     // would have a null value).
1100     assert(oop(p)->is_oop(true), "Should be an oop");
1101     return true;
1102   } else {
1103     return false;  // Was not an object at the start of collection.
1104   }
1105 }
1106 
1107 // Check if the object is alive. This fact is checked either by consulting
1108 // the main marking bitmap in the sweeping phase or, if it's a permanent
1109 // generation and we're not in the sweeping phase, by checking the
1110 // perm_gen_verify_bit_map where we store the "deadness" information if
1111 // we did not sweep the perm gen in the most recent previous GC cycle.
1112 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1113   assert (block_is_obj(p), "The address should point to an object");
1114 
1115   // If we're sweeping, we use object liveness information from the main bit map
1116   // for both perm gen and old gen.
1117   // We don't need to lock the bitmap (live_map or dead_map below), because
1118   // EITHER we are in the middle of the sweeping phase, and the
1119   // main marking bit map (live_map below) is locked,
1120   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1121   // is stable, because it's mutated only in the sweeping phase.
1122   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1123     CMSBitMap* live_map = _collector->markBitMap();
1124     return live_map->isMarked((HeapWord*) p);
1125   } else {
1126     // If we're not currently sweeping and we haven't swept the perm gen in
1127     // the previous concurrent cycle then we may have dead but unswept objects
1128     // in the perm gen. In this case, we use the "deadness" information
1129     // that we had saved in perm_gen_verify_bit_map at the last sweep.
1130     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1131       if (_collector->verifying()) {
1132         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1133         // Object is marked in the dead_map bitmap at the previous sweep
1134         // when we know that it's dead; if the bitmap is not allocated then
1135         // the object is alive.
1136         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1137                || !dead_map->par_isMarked((HeapWord*) p);
1138       } else {
1139         return false; // We can't say for sure if it's live, so we say that it's dead.
1140       }
1141     }
1142   }
1143   return true;
1144 }
1145 
1146 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1147   FreeChunk* fc = (FreeChunk*)p;
1148   assert(is_in_reserved(p), "Should be in space");
1149   assert(_bt.block_start(p) == p, "Should be a block boundary");
1150   if (!fc->isFree()) {
1151     // Ignore mark word because it may have been used to
1152     // chain together promoted objects (the last one
1153     // would have a null value).
1154     assert(oop(p)->is_oop(true), "Should be an oop");
1155     return true;
1156   }
1157   return false;
1158 }
1159 
1160 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1161 // approximate answer if you don't hold the freelistlock when you call this.
1162 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1163   size_t size = 0;
1164   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1165     debug_only(
1166       // We may be calling here without the lock in which case we
1167       // won't do this modest sanity check.
1168       if (freelistLock()->owned_by_self()) {
1169         size_t total_list_size = 0;
1170         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1171           fc = fc->next()) {
1172           total_list_size += i;
1173         }
1174         assert(total_list_size == i * _indexedFreeList[i].count(),
1175                "Count in list is incorrect");
1176       }
1177     )
1178     size += i * _indexedFreeList[i].count();
1179   }
1180   return size;
1181 }
1182 
1183 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1184   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1185   return allocate(size);
1186 }
1187 
1188 HeapWord*
1189 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1190   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1191 }
1192 
1193 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1194   assert_lock_strong(freelistLock());
1195   HeapWord* res = NULL;
1196   assert(size == adjustObjectSize(size),
1197          "use adjustObjectSize() before calling into allocate()");
1198 
1199   if (_adaptive_freelists) {
1200     res = allocate_adaptive_freelists(size);
1201   } else {  // non-adaptive free lists
1202     res = allocate_non_adaptive_freelists(size);
1203   }
1204 
1205   if (res != NULL) {
1206     // check that res does lie in this space!
1207     assert(is_in_reserved(res), "Not in this space!");
1208     assert(is_aligned((void*)res), "alignment check");
1209 
1210     FreeChunk* fc = (FreeChunk*)res;
1211     fc->markNotFree();
1212     assert(!fc->isFree(), "shouldn't be marked free");
1213     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1214     // Verify that the block offset table shows this to
1215     // be a single block, but not one which is unallocated.
1216     _bt.verify_single_block(res, size);
1217     _bt.verify_not_unallocated(res, size);
1218     // mangle a just allocated object with a distinct pattern.
1219     debug_only(fc->mangleAllocated(size));
1220   }
1221 
1222   return res;
1223 }
1224 
1225 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1226   HeapWord* res = NULL;
1227   // try and use linear allocation for smaller blocks
1228   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1229     // if successful, the following also adjusts block offset table
1230     res = getChunkFromSmallLinearAllocBlock(size);
1231   }
1232   // Else triage to indexed lists for smaller sizes
1233   if (res == NULL) {
1234     if (size < SmallForDictionary) {
1235       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1236     } else {
1237       // else get it from the big dictionary; if even this doesn't
1238       // work we are out of luck.
1239       res = (HeapWord*)getChunkFromDictionaryExact(size);
1240     }
1241   }
1242 
1243   return res;
1244 }
1245 
1246 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1247   assert_lock_strong(freelistLock());
1248   HeapWord* res = NULL;
1249   assert(size == adjustObjectSize(size),
1250          "use adjustObjectSize() before calling into allocate()");
1251 
1252   // Strategy
1253   //   if small
1254   //     exact size from small object indexed list if small
1255   //     small or large linear allocation block (linAB) as appropriate
1256   //     take from lists of greater sized chunks
1257   //   else
1258   //     dictionary
1259   //     small or large linear allocation block if it has the space
1260   // Try allocating exact size from indexTable first
1261   if (size < IndexSetSize) {
1262     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1263     if(res != NULL) {
1264       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1265         "Not removed from free list");
1266       // no block offset table adjustment is necessary on blocks in
1267       // the indexed lists.
1268 
1269     // Try allocating from the small LinAB
1270     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1271         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1272         // if successful, the above also adjusts block offset table
1273         // Note that this call will refill the LinAB to
1274         // satisfy the request.  This is different that
1275         // evm.
1276         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1277     } else {
1278       // Raid the exact free lists larger than size, even if they are not
1279       // overpopulated.
1280       res = (HeapWord*) getChunkFromGreater(size);
1281     }
1282   } else {
1283     // Big objects get allocated directly from the dictionary.
1284     res = (HeapWord*) getChunkFromDictionaryExact(size);
1285     if (res == NULL) {
1286       // Try hard not to fail since an allocation failure will likely
1287       // trigger a synchronous GC.  Try to get the space from the
1288       // allocation blocks.
1289       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1290     }
1291   }
1292 
1293   return res;
1294 }
1295 
1296 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1297 // when promoting obj.
1298 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1299   // Depending on the object size, expansion may require refilling either a
1300   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1301   // is added because the dictionary may over-allocate to avoid fragmentation.
1302   size_t space = obj_size;
1303   if (!_adaptive_freelists) {
1304     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1305   }
1306   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1307   return space;
1308 }
1309 
1310 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1311   FreeChunk* ret;
1312 
1313   assert(numWords >= MinChunkSize, "Size is less than minimum");
1314   assert(linearAllocationWouldFail() || bestFitFirst(),
1315     "Should not be here");
1316 
1317   size_t i;
1318   size_t currSize = numWords + MinChunkSize;
1319   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1320   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1321     FreeList* fl = &_indexedFreeList[i];
1322     if (fl->head()) {
1323       ret = getFromListGreater(fl, numWords);
1324       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1325       return ret;
1326     }
1327   }
1328 
1329   currSize = MAX2((size_t)SmallForDictionary,
1330                   (size_t)(numWords + MinChunkSize));
1331 
1332   /* Try to get a chunk that satisfies request, while avoiding
1333      fragmentation that can't be handled. */
1334   {
1335     ret =  dictionary()->getChunk(currSize);
1336     if (ret != NULL) {
1337       assert(ret->size() - numWords >= MinChunkSize,
1338              "Chunk is too small");
1339       _bt.allocated((HeapWord*)ret, ret->size());
1340       /* Carve returned chunk. */
1341       (void) splitChunkAndReturnRemainder(ret, numWords);
1342       /* Label this as no longer a free chunk. */
1343       assert(ret->isFree(), "This chunk should be free");
1344       ret->linkPrev(NULL);
1345     }
1346     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1347     return ret;
1348   }
1349   ShouldNotReachHere();
1350 }
1351 
1352 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
1353   const {
1354   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1355   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1356 }
1357 
1358 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1359   if (fc->size() >= IndexSetSize) {
1360     return dictionary()->verifyChunkInFreeLists(fc);
1361   } else {
1362     return verifyChunkInIndexedFreeLists(fc);
1363   }
1364 }
1365 
1366 #ifndef PRODUCT
1367 void CompactibleFreeListSpace::assert_locked() const {
1368   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1369 }
1370 
1371 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1372   CMSLockVerifier::assert_locked(lock);
1373 }
1374 #endif
1375 
1376 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1377   // In the parallel case, the main thread holds the free list lock
1378   // on behalf the parallel threads.
1379   FreeChunk* fc;
1380   {
1381     // If GC is parallel, this might be called by several threads.
1382     // This should be rare enough that the locking overhead won't affect
1383     // the sequential code.
1384     MutexLockerEx x(parDictionaryAllocLock(),
1385                     Mutex::_no_safepoint_check_flag);
1386     fc = getChunkFromDictionary(size);
1387   }
1388   if (fc != NULL) {
1389     fc->dontCoalesce();
1390     assert(fc->isFree(), "Should be free, but not coalescable");
1391     // Verify that the block offset table shows this to
1392     // be a single block, but not one which is unallocated.
1393     _bt.verify_single_block((HeapWord*)fc, fc->size());
1394     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1395   }
1396   return fc;
1397 }
1398 
1399 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1400   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1401   assert_locked();
1402 
1403   // if we are tracking promotions, then first ensure space for
1404   // promotion (including spooling space for saving header if necessary).
1405   // then allocate and copy, then track promoted info if needed.
1406   // When tracking (see PromotionInfo::track()), the mark word may
1407   // be displaced and in this case restoration of the mark word
1408   // occurs in the (oop_since_save_marks_)iterate phase.
1409   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1410     return NULL;
1411   }
1412   // Call the allocate(size_t, bool) form directly to avoid the
1413   // additional call through the allocate(size_t) form.  Having
1414   // the compile inline the call is problematic because allocate(size_t)
1415   // is a virtual method.
1416   HeapWord* res = allocate(adjustObjectSize(obj_size));
1417   if (res != NULL) {
1418     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1419     // if we should be tracking promotions, do so.
1420     if (_promoInfo.tracking()) {
1421         _promoInfo.track((PromotedObject*)res);
1422     }
1423   }
1424   return oop(res);
1425 }
1426 
1427 HeapWord*
1428 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1429   assert_locked();
1430   assert(size >= MinChunkSize, "minimum chunk size");
1431   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1432     "maximum from smallLinearAllocBlock");
1433   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1434 }
1435 
1436 HeapWord*
1437 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1438                                                        size_t size) {
1439   assert_locked();
1440   assert(size >= MinChunkSize, "too small");
1441   HeapWord* res = NULL;
1442   // Try to do linear allocation from blk, making sure that
1443   if (blk->_word_size == 0) {
1444     // We have probably been unable to fill this either in the prologue or
1445     // when it was exhausted at the last linear allocation. Bail out until
1446     // next time.
1447     assert(blk->_ptr == NULL, "consistency check");
1448     return NULL;
1449   }
1450   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1451   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1452   if (res != NULL) return res;
1453 
1454   // about to exhaust this linear allocation block
1455   if (blk->_word_size == size) { // exactly satisfied
1456     res = blk->_ptr;
1457     _bt.allocated(res, blk->_word_size);
1458   } else if (size + MinChunkSize <= blk->_refillSize) {
1459     size_t sz = blk->_word_size;
1460     // Update _unallocated_block if the size is such that chunk would be
1461     // returned to the indexed free list.  All other chunks in the indexed
1462     // free lists are allocated from the dictionary so that _unallocated_block
1463     // has already been adjusted for them.  Do it here so that the cost
1464     // for all chunks added back to the indexed free lists.
1465     if (sz < SmallForDictionary) {
1466       _bt.allocated(blk->_ptr, sz);
1467     }
1468     // Return the chunk that isn't big enough, and then refill below.
1469     addChunkToFreeLists(blk->_ptr, sz);
1470     splitBirth(sz);
1471     // Don't keep statistics on adding back chunk from a LinAB.
1472   } else {
1473     // A refilled block would not satisfy the request.
1474     return NULL;
1475   }
1476 
1477   blk->_ptr = NULL; blk->_word_size = 0;
1478   refillLinearAllocBlock(blk);
1479   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1480          "block was replenished");
1481   if (res != NULL) {
1482     splitBirth(size);
1483     repairLinearAllocBlock(blk);
1484   } else if (blk->_ptr != NULL) {
1485     res = blk->_ptr;
1486     size_t blk_size = blk->_word_size;
1487     blk->_word_size -= size;
1488     blk->_ptr  += size;
1489     splitBirth(size);
1490     repairLinearAllocBlock(blk);
1491     // Update BOT last so that other (parallel) GC threads see a consistent
1492     // view of the BOT and free blocks.
1493     // Above must occur before BOT is updated below.
1494     OrderAccess::storestore();
1495     _bt.split_block(res, blk_size, size);  // adjust block offset table
1496   }
1497   return res;
1498 }
1499 
1500 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1501                                         LinearAllocBlock* blk,
1502                                         size_t size) {
1503   assert_locked();
1504   assert(size >= MinChunkSize, "too small");
1505 
1506   HeapWord* res = NULL;
1507   // This is the common case.  Keep it simple.
1508   if (blk->_word_size >= size + MinChunkSize) {
1509     assert(blk->_ptr != NULL, "consistency check");
1510     res = blk->_ptr;
1511     // Note that the BOT is up-to-date for the linAB before allocation.  It
1512     // indicates the start of the linAB.  The split_block() updates the
1513     // BOT for the linAB after the allocation (indicates the start of the
1514     // next chunk to be allocated).
1515     size_t blk_size = blk->_word_size;
1516     blk->_word_size -= size;
1517     blk->_ptr  += size;
1518     splitBirth(size);
1519     repairLinearAllocBlock(blk);
1520     // Update BOT last so that other (parallel) GC threads see a consistent
1521     // view of the BOT and free blocks.
1522     // Above must occur before BOT is updated below.
1523     OrderAccess::storestore();
1524     _bt.split_block(res, blk_size, size);  // adjust block offset table
1525     _bt.allocated(res, size);
1526   }
1527   return res;
1528 }
1529 
1530 FreeChunk*
1531 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1532   assert_locked();
1533   assert(size < SmallForDictionary, "just checking");
1534   FreeChunk* res;
1535   res = _indexedFreeList[size].getChunkAtHead();
1536   if (res == NULL) {
1537     res = getChunkFromIndexedFreeListHelper(size);
1538   }
1539   _bt.verify_not_unallocated((HeapWord*) res, size);
1540   assert(res == NULL || res->size() == size, "Incorrect block size");
1541   return res;
1542 }
1543 
1544 FreeChunk*
1545 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1546   bool replenish) {
1547   assert_locked();
1548   FreeChunk* fc = NULL;
1549   if (size < SmallForDictionary) {
1550     assert(_indexedFreeList[size].head() == NULL ||
1551       _indexedFreeList[size].surplus() <= 0,
1552       "List for this size should be empty or under populated");
1553     // Try best fit in exact lists before replenishing the list
1554     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1555       // Replenish list.
1556       //
1557       // Things tried that failed.
1558       //   Tried allocating out of the two LinAB's first before
1559       // replenishing lists.
1560       //   Tried small linAB of size 256 (size in indexed list)
1561       // and replenishing indexed lists from the small linAB.
1562       //
1563       FreeChunk* newFc = NULL;
1564       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1565       if (replenish_size < SmallForDictionary) {
1566         // Do not replenish from an underpopulated size.
1567         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1568             _indexedFreeList[replenish_size].head() != NULL) {
1569           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
1570         } else if (bestFitFirst()) {
1571           newFc = bestFitSmall(replenish_size);
1572         }
1573       }
1574       if (newFc == NULL && replenish_size > size) {
1575         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1576         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1577       }
1578       // Note: The stats update re split-death of block obtained above
1579       // will be recorded below precisely when we know we are going to
1580       // be actually splitting it into more than one pieces below.
1581       if (newFc != NULL) {
1582         if  (replenish || CMSReplenishIntermediate) {
1583           // Replenish this list and return one block to caller.
1584           size_t i;
1585           FreeChunk *curFc, *nextFc;
1586           size_t num_blk = newFc->size() / size;
1587           assert(num_blk >= 1, "Smaller than requested?");
1588           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1589           if (num_blk > 1) {
1590             // we are sure we will be splitting the block just obtained
1591             // into multiple pieces; record the split-death of the original
1592             splitDeath(replenish_size);
1593           }
1594           // carve up and link blocks 0, ..., num_blk - 2
1595           // The last chunk is not added to the lists but is returned as the
1596           // free chunk.
1597           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1598                i = 0;
1599                i < (num_blk - 1);
1600                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1601                i++) {
1602             curFc->setSize(size);
1603             // Don't record this as a return in order to try and
1604             // determine the "returns" from a GC.
1605             _bt.verify_not_unallocated((HeapWord*) fc, size);
1606             _indexedFreeList[size].returnChunkAtTail(curFc, false);
1607             _bt.mark_block((HeapWord*)curFc, size);
1608             splitBirth(size);
1609             // Don't record the initial population of the indexed list
1610             // as a split birth.
1611           }
1612 
1613           // check that the arithmetic was OK above
1614           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1615             "inconsistency in carving newFc");
1616           curFc->setSize(size);
1617           _bt.mark_block((HeapWord*)curFc, size);
1618           splitBirth(size);
1619           fc = curFc;
1620         } else {
1621           // Return entire block to caller
1622           fc = newFc;
1623         }
1624       }
1625     }
1626   } else {
1627     // Get a free chunk from the free chunk dictionary to be returned to
1628     // replenish the indexed free list.
1629     fc = getChunkFromDictionaryExact(size);
1630   }
1631   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1632   return fc;
1633 }
1634 
1635 FreeChunk*
1636 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1637   assert_locked();
1638   FreeChunk* fc = _dictionary->getChunk(size);
1639   if (fc == NULL) {
1640     return NULL;
1641   }
1642   _bt.allocated((HeapWord*)fc, fc->size());
1643   if (fc->size() >= size + MinChunkSize) {
1644     fc = splitChunkAndReturnRemainder(fc, size);
1645   }
1646   assert(fc->size() >= size, "chunk too small");
1647   assert(fc->size() < size + MinChunkSize, "chunk too big");
1648   _bt.verify_single_block((HeapWord*)fc, fc->size());
1649   return fc;
1650 }
1651 
1652 FreeChunk*
1653 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1654   assert_locked();
1655   FreeChunk* fc = _dictionary->getChunk(size);
1656   if (fc == NULL) {
1657     return fc;
1658   }
1659   _bt.allocated((HeapWord*)fc, fc->size());
1660   if (fc->size() == size) {
1661     _bt.verify_single_block((HeapWord*)fc, size);
1662     return fc;
1663   }
1664   assert(fc->size() > size, "getChunk() guarantee");
1665   if (fc->size() < size + MinChunkSize) {
1666     // Return the chunk to the dictionary and go get a bigger one.
1667     returnChunkToDictionary(fc);
1668     fc = _dictionary->getChunk(size + MinChunkSize);
1669     if (fc == NULL) {
1670       return NULL;
1671     }
1672     _bt.allocated((HeapWord*)fc, fc->size());
1673   }
1674   assert(fc->size() >= size + MinChunkSize, "tautology");
1675   fc = splitChunkAndReturnRemainder(fc, size);
1676   assert(fc->size() == size, "chunk is wrong size");
1677   _bt.verify_single_block((HeapWord*)fc, size);
1678   return fc;
1679 }
1680 
1681 void
1682 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1683   assert_locked();
1684 
1685   size_t size = chunk->size();
1686   _bt.verify_single_block((HeapWord*)chunk, size);
1687   // adjust _unallocated_block downward, as necessary
1688   _bt.freed((HeapWord*)chunk, size);
1689   _dictionary->returnChunk(chunk);
1690 #ifndef PRODUCT
1691   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1692     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
1693   }
1694 #endif // PRODUCT
1695 }
1696 
1697 void
1698 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1699   assert_locked();
1700   size_t size = fc->size();
1701   _bt.verify_single_block((HeapWord*) fc, size);
1702   _bt.verify_not_unallocated((HeapWord*) fc, size);
1703   if (_adaptive_freelists) {
1704     _indexedFreeList[size].returnChunkAtTail(fc);
1705   } else {
1706     _indexedFreeList[size].returnChunkAtHead(fc);
1707   }
1708 #ifndef PRODUCT
1709   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1710      _indexedFreeList[size].verify_stats();
1711   }
1712 #endif // PRODUCT
1713 }
1714 
1715 // Add chunk to end of last block -- if it's the largest
1716 // block -- and update BOT and census data. We would
1717 // of course have preferred to coalesce it with the
1718 // last block, but it's currently less expensive to find the
1719 // largest block than it is to find the last.
1720 void
1721 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1722   HeapWord* chunk, size_t     size) {
1723   // check that the chunk does lie in this space!
1724   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1725   // One of the parallel gc task threads may be here
1726   // whilst others are allocating.
1727   Mutex* lock = NULL;
1728   if (ParallelGCThreads != 0) {
1729     lock = &_parDictionaryAllocLock;
1730   }
1731   FreeChunk* ec;
1732   {
1733     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1734     ec = dictionary()->findLargestDict();  // get largest block
1735     if (ec != NULL && ec->end() == chunk) {
1736       // It's a coterminal block - we can coalesce.
1737       size_t old_size = ec->size();
1738       coalDeath(old_size);
1739       removeChunkFromDictionary(ec);
1740       size += old_size;
1741     } else {
1742       ec = (FreeChunk*)chunk;
1743     }
1744   }
1745   ec->setSize(size);
1746   debug_only(ec->mangleFreed(size));
1747   if (size < SmallForDictionary) {
1748     lock = _indexedFreeListParLocks[size];
1749   }
1750   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1751   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1752   // record the birth under the lock since the recording involves
1753   // manipulation of the list on which the chunk lives and
1754   // if the chunk is allocated and is the last on the list,
1755   // the list can go away.
1756   coalBirth(size);
1757 }
1758 
1759 void
1760 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1761                                               size_t     size) {
1762   // check that the chunk does lie in this space!
1763   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1764   assert_locked();
1765   _bt.verify_single_block(chunk, size);
1766 
1767   FreeChunk* fc = (FreeChunk*) chunk;
1768   fc->setSize(size);
1769   debug_only(fc->mangleFreed(size));
1770   if (size < SmallForDictionary) {
1771     returnChunkToFreeList(fc);
1772   } else {
1773     returnChunkToDictionary(fc);
1774   }
1775 }
1776 
1777 void
1778 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1779   size_t size, bool coalesced) {
1780   assert_locked();
1781   assert(chunk != NULL, "null chunk");
1782   if (coalesced) {
1783     // repair BOT
1784     _bt.single_block(chunk, size);
1785   }
1786   addChunkToFreeLists(chunk, size);
1787 }
1788 
1789 // We _must_ find the purported chunk on our free lists;
1790 // we assert if we don't.
1791 void
1792 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1793   size_t size = fc->size();
1794   assert_locked();
1795   debug_only(verifyFreeLists());
1796   if (size < SmallForDictionary) {
1797     removeChunkFromIndexedFreeList(fc);
1798   } else {
1799     removeChunkFromDictionary(fc);
1800   }
1801   _bt.verify_single_block((HeapWord*)fc, size);
1802   debug_only(verifyFreeLists());
1803 }
1804 
1805 void
1806 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1807   size_t size = fc->size();
1808   assert_locked();
1809   assert(fc != NULL, "null chunk");
1810   _bt.verify_single_block((HeapWord*)fc, size);
1811   _dictionary->removeChunk(fc);
1812   // adjust _unallocated_block upward, as necessary
1813   _bt.allocated((HeapWord*)fc, size);
1814 }
1815 
1816 void
1817 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1818   assert_locked();
1819   size_t size = fc->size();
1820   _bt.verify_single_block((HeapWord*)fc, size);
1821   NOT_PRODUCT(
1822     if (FLSVerifyIndexTable) {
1823       verifyIndexedFreeList(size);
1824     }
1825   )
1826   _indexedFreeList[size].removeChunk(fc);
1827   debug_only(fc->clearNext());
1828   debug_only(fc->clearPrev());
1829   NOT_PRODUCT(
1830     if (FLSVerifyIndexTable) {
1831       verifyIndexedFreeList(size);
1832     }
1833   )
1834 }
1835 
1836 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1837   /* A hint is the next larger size that has a surplus.
1838      Start search at a size large enough to guarantee that
1839      the excess is >= MIN_CHUNK. */
1840   size_t start = align_object_size(numWords + MinChunkSize);
1841   if (start < IndexSetSize) {
1842     FreeList* it   = _indexedFreeList;
1843     size_t    hint = _indexedFreeList[start].hint();
1844     while (hint < IndexSetSize) {
1845       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1846       FreeList *fl = &_indexedFreeList[hint];
1847       if (fl->surplus() > 0 && fl->head() != NULL) {
1848         // Found a list with surplus, reset original hint
1849         // and split out a free chunk which is returned.
1850         _indexedFreeList[start].set_hint(hint);
1851         FreeChunk* res = getFromListGreater(fl, numWords);
1852         assert(res == NULL || res->isFree(),
1853           "Should be returning a free chunk");
1854         return res;
1855       }
1856       hint = fl->hint(); /* keep looking */
1857     }
1858     /* None found. */
1859     it[start].set_hint(IndexSetSize);
1860   }
1861   return NULL;
1862 }
1863 
1864 /* Requires fl->size >= numWords + MinChunkSize */
1865 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1866   size_t numWords) {
1867   FreeChunk *curr = fl->head();
1868   size_t oldNumWords = curr->size();
1869   assert(numWords >= MinChunkSize, "Word size is too small");
1870   assert(curr != NULL, "List is empty");
1871   assert(oldNumWords >= numWords + MinChunkSize,
1872         "Size of chunks in the list is too small");
1873 
1874   fl->removeChunk(curr);
1875   // recorded indirectly by splitChunkAndReturnRemainder -
1876   // smallSplit(oldNumWords, numWords);
1877   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1878   // Does anything have to be done for the remainder in terms of
1879   // fixing the card table?
1880   assert(new_chunk == NULL || new_chunk->isFree(),
1881     "Should be returning a free chunk");
1882   return new_chunk;
1883 }
1884 
1885 FreeChunk*
1886 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1887   size_t new_size) {
1888   assert_locked();
1889   size_t size = chunk->size();
1890   assert(size > new_size, "Split from a smaller block?");
1891   assert(is_aligned(chunk), "alignment problem");
1892   assert(size == adjustObjectSize(size), "alignment problem");
1893   size_t rem_size = size - new_size;
1894   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1895   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1896   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1897   assert(is_aligned(ffc), "alignment problem");
1898   ffc->setSize(rem_size);
1899   ffc->linkNext(NULL);
1900   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1901   // Above must occur before BOT is updated below.
1902   // adjust block offset table
1903   OrderAccess::storestore();
1904   assert(chunk->isFree() && ffc->isFree(), "Error");
1905   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1906   if (rem_size < SmallForDictionary) {
1907     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1908     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1909     returnChunkToFreeList(ffc);
1910     split(size, rem_size);
1911     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1912   } else {
1913     returnChunkToDictionary(ffc);
1914     split(size ,rem_size);
1915   }
1916   chunk->setSize(new_size);
1917   return chunk;
1918 }
1919 
1920 void
1921 CompactibleFreeListSpace::sweep_completed() {
1922   // Now that space is probably plentiful, refill linear
1923   // allocation blocks as needed.
1924   refillLinearAllocBlocksIfNeeded();
1925 }
1926 
1927 void
1928 CompactibleFreeListSpace::gc_prologue() {
1929   assert_locked();
1930   if (PrintFLSStatistics != 0) {
1931     gclog_or_tty->print("Before GC:\n");
1932     reportFreeListStatistics();
1933   }
1934   refillLinearAllocBlocksIfNeeded();
1935 }
1936 
1937 void
1938 CompactibleFreeListSpace::gc_epilogue() {
1939   assert_locked();
1940   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1941     if (_smallLinearAllocBlock._word_size == 0)
1942       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1943   }
1944   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1945   _promoInfo.stopTrackingPromotions();
1946   repairLinearAllocationBlocks();
1947   // Print Space's stats
1948   if (PrintFLSStatistics != 0) {
1949     gclog_or_tty->print("After GC:\n");
1950     reportFreeListStatistics();
1951   }
1952 }
1953 
1954 // Iteration support, mostly delegated from a CMS generation
1955 
1956 void CompactibleFreeListSpace::save_marks() {
1957   // mark the "end" of the used space at the time of this call;
1958   // note, however, that promoted objects from this point
1959   // on are tracked in the _promoInfo below.
1960   set_saved_mark_word(unallocated_block());
1961   // inform allocator that promotions should be tracked.
1962   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1963   _promoInfo.startTrackingPromotions();
1964 }
1965 
1966 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1967   assert(_promoInfo.tracking(), "No preceding save_marks?");
1968   assert(SharedHeap::heap()->n_par_threads() == 0,
1969          "Shouldn't be called if using parallel gc.");
1970   return _promoInfo.noPromotions();
1971 }
1972 
1973 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1974                                                                             \
1975 void CompactibleFreeListSpace::                                             \
1976 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1977   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1978          "Shouldn't be called (yet) during parallel part of gc.");          \
1979   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1980   /*                                                                        \
1981    * This also restores any displaced headers and removes the elements from \
1982    * the iteration set as they are processed, so that we have a clean slate \
1983    * at the end of the iteration. Note, thus, that if new objects are       \
1984    * promoted as a result of the iteration they are iterated over as well.  \
1985    */                                                                       \
1986   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1987 }
1988 
1989 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1990 
1991 
1992 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1993   // ugghh... how would one do this efficiently for a non-contiguous space?
1994   guarantee(false, "NYI");
1995 }
1996 
1997 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1998   return _smallLinearAllocBlock._word_size == 0;
1999 }
2000 
2001 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2002   // Fix up linear allocation blocks to look like free blocks
2003   repairLinearAllocBlock(&_smallLinearAllocBlock);
2004 }
2005 
2006 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2007   assert_locked();
2008   if (blk->_ptr != NULL) {
2009     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2010            "Minimum block size requirement");
2011     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2012     fc->setSize(blk->_word_size);
2013     fc->linkPrev(NULL);   // mark as free
2014     fc->dontCoalesce();
2015     assert(fc->isFree(), "just marked it free");
2016     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2017   }
2018 }
2019 
2020 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2021   assert_locked();
2022   if (_smallLinearAllocBlock._ptr == NULL) {
2023     assert(_smallLinearAllocBlock._word_size == 0,
2024       "Size of linAB should be zero if the ptr is NULL");
2025     // Reset the linAB refill and allocation size limit.
2026     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2027   }
2028   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2029 }
2030 
2031 void
2032 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2033   assert_locked();
2034   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2035          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2036          "blk invariant");
2037   if (blk->_ptr == NULL) {
2038     refillLinearAllocBlock(blk);
2039   }
2040   if (PrintMiscellaneous && Verbose) {
2041     if (blk->_word_size == 0) {
2042       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2043     }
2044   }
2045 }
2046 
2047 void
2048 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2049   assert_locked();
2050   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2051          "linear allocation block should be empty");
2052   FreeChunk* fc;
2053   if (blk->_refillSize < SmallForDictionary &&
2054       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2055     // A linAB's strategy might be to use small sizes to reduce
2056     // fragmentation but still get the benefits of allocation from a
2057     // linAB.
2058   } else {
2059     fc = getChunkFromDictionary(blk->_refillSize);
2060   }
2061   if (fc != NULL) {
2062     blk->_ptr  = (HeapWord*)fc;
2063     blk->_word_size = fc->size();
2064     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2065   }
2066 }
2067 
2068 // Support for concurrent collection policy decisions.
2069 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2070   // In the future we might want to add in frgamentation stats --
2071   // including erosion of the "mountain" into this decision as well.
2072   return !adaptive_freelists() && linearAllocationWouldFail();
2073 }
2074 
2075 // Support for compaction
2076 
2077 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2078   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2079   // prepare_for_compaction() uses the space between live objects
2080   // so that later phase can skip dead space quickly.  So verification
2081   // of the free lists doesn't work after.
2082 }
2083 
2084 #define obj_size(q) adjustObjectSize(oop(q)->size())
2085 #define adjust_obj_size(s) adjustObjectSize(s)
2086 
2087 void CompactibleFreeListSpace::adjust_pointers() {
2088   // In other versions of adjust_pointers(), a bail out
2089   // based on the amount of live data in the generation
2090   // (i.e., if 0, bail out) may be used.
2091   // Cannot test used() == 0 here because the free lists have already
2092   // been mangled by the compaction.
2093 
2094   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2095   // See note about verification in prepare_for_compaction().
2096 }
2097 
2098 void CompactibleFreeListSpace::compact() {
2099   SCAN_AND_COMPACT(obj_size);
2100 }
2101 
2102 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2103 // where fbs is free block sizes
2104 double CompactibleFreeListSpace::flsFrag() const {
2105   size_t itabFree = totalSizeInIndexedFreeLists();
2106   double frag = 0.0;
2107   size_t i;
2108 
2109   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2110     double sz  = i;
2111     frag      += _indexedFreeList[i].count() * (sz * sz);
2112   }
2113 
2114   double totFree = itabFree +
2115                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
2116   if (totFree > 0) {
2117     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2118             (totFree * totFree));
2119     frag = (double)1.0  - frag;
2120   } else {
2121     assert(frag == 0.0, "Follows from totFree == 0");
2122   }
2123   return frag;
2124 }
2125 
2126 void CompactibleFreeListSpace::beginSweepFLCensus(
2127   float inter_sweep_current,
2128   float inter_sweep_estimate,
2129   float intra_sweep_estimate) {
2130   assert_locked();
2131   size_t i;
2132   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2133     FreeList* fl    = &_indexedFreeList[i];
2134     if (PrintFLSStatistics > 1) {
2135       gclog_or_tty->print("size[%d] : ", i);
2136     }
2137     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2138     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2139     fl->set_beforeSweep(fl->count());
2140     fl->set_bfrSurp(fl->surplus());
2141   }
2142   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
2143                                     inter_sweep_current,
2144                                     inter_sweep_estimate,
2145                                     intra_sweep_estimate);
2146 }
2147 
2148 void CompactibleFreeListSpace::setFLSurplus() {
2149   assert_locked();
2150   size_t i;
2151   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2152     FreeList *fl = &_indexedFreeList[i];
2153     fl->set_surplus(fl->count() -
2154                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2155   }
2156 }
2157 
2158 void CompactibleFreeListSpace::setFLHints() {
2159   assert_locked();
2160   size_t i;
2161   size_t h = IndexSetSize;
2162   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2163     FreeList *fl = &_indexedFreeList[i];
2164     fl->set_hint(h);
2165     if (fl->surplus() > 0) {
2166       h = i;
2167     }
2168   }
2169 }
2170 
2171 void CompactibleFreeListSpace::clearFLCensus() {
2172   assert_locked();
2173   int i;
2174   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2175     FreeList *fl = &_indexedFreeList[i];
2176     fl->set_prevSweep(fl->count());
2177     fl->set_coalBirths(0);
2178     fl->set_coalDeaths(0);
2179     fl->set_splitBirths(0);
2180     fl->set_splitDeaths(0);
2181   }
2182 }
2183 
2184 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2185   if (PrintFLSStatistics > 0) {
2186     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
2187     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2188                            largestAddr);
2189   }
2190   setFLSurplus();
2191   setFLHints();
2192   if (PrintGC && PrintFLSCensus > 0) {
2193     printFLCensus(sweep_count);
2194   }
2195   clearFLCensus();
2196   assert_locked();
2197   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
2198 }
2199 
2200 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2201   if (size < SmallForDictionary) {
2202     FreeList *fl = &_indexedFreeList[size];
2203     return (fl->coalDesired() < 0) ||
2204            ((int)fl->count() > fl->coalDesired());
2205   } else {
2206     return dictionary()->coalDictOverPopulated(size);
2207   }
2208 }
2209 
2210 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2211   assert(size < SmallForDictionary, "Size too large for indexed list");
2212   FreeList *fl = &_indexedFreeList[size];
2213   fl->increment_coalBirths();
2214   fl->increment_surplus();
2215 }
2216 
2217 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2218   assert(size < SmallForDictionary, "Size too large for indexed list");
2219   FreeList *fl = &_indexedFreeList[size];
2220   fl->increment_coalDeaths();
2221   fl->decrement_surplus();
2222 }
2223 
2224 void CompactibleFreeListSpace::coalBirth(size_t size) {
2225   if (size  < SmallForDictionary) {
2226     smallCoalBirth(size);
2227   } else {
2228     dictionary()->dictCensusUpdate(size,
2229                                    false /* split */,
2230                                    true /* birth */);
2231   }
2232 }
2233 
2234 void CompactibleFreeListSpace::coalDeath(size_t size) {
2235   if(size  < SmallForDictionary) {
2236     smallCoalDeath(size);
2237   } else {
2238     dictionary()->dictCensusUpdate(size,
2239                                    false /* split */,
2240                                    false /* birth */);
2241   }
2242 }
2243 
2244 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2245   assert(size < SmallForDictionary, "Size too large for indexed list");
2246   FreeList *fl = &_indexedFreeList[size];
2247   fl->increment_splitBirths();
2248   fl->increment_surplus();
2249 }
2250 
2251 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2252   assert(size < SmallForDictionary, "Size too large for indexed list");
2253   FreeList *fl = &_indexedFreeList[size];
2254   fl->increment_splitDeaths();
2255   fl->decrement_surplus();
2256 }
2257 
2258 void CompactibleFreeListSpace::splitBirth(size_t size) {
2259   if (size  < SmallForDictionary) {
2260     smallSplitBirth(size);
2261   } else {
2262     dictionary()->dictCensusUpdate(size,
2263                                    true /* split */,
2264                                    true /* birth */);
2265   }
2266 }
2267 
2268 void CompactibleFreeListSpace::splitDeath(size_t size) {
2269   if (size  < SmallForDictionary) {
2270     smallSplitDeath(size);
2271   } else {
2272     dictionary()->dictCensusUpdate(size,
2273                                    true /* split */,
2274                                    false /* birth */);
2275   }
2276 }
2277 
2278 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2279   size_t to2 = from - to1;
2280   splitDeath(from);
2281   splitBirth(to1);
2282   splitBirth(to2);
2283 }
2284 
2285 void CompactibleFreeListSpace::print() const {
2286   Space::print_on(tty);
2287 }
2288 
2289 void CompactibleFreeListSpace::prepare_for_verify() {
2290   assert_locked();
2291   repairLinearAllocationBlocks();
2292   // Verify that the SpoolBlocks look like free blocks of
2293   // appropriate sizes... To be done ...
2294 }
2295 
2296 class VerifyAllBlksClosure: public BlkClosure {
2297  private:
2298   const CompactibleFreeListSpace* _sp;
2299   const MemRegion                 _span;
2300   HeapWord*                       _last_addr;
2301   size_t                          _last_size;
2302   bool                            _last_was_obj;
2303   bool                            _last_was_live;
2304 
2305  public:
2306   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2307     MemRegion span) :  _sp(sp), _span(span),
2308                        _last_addr(NULL), _last_size(0),
2309                        _last_was_obj(false), _last_was_live(false) { }
2310 
2311   virtual size_t do_blk(HeapWord* addr) {
2312     size_t res;
2313     bool   was_obj  = false;
2314     bool   was_live = false;
2315     if (_sp->block_is_obj(addr)) {
2316       was_obj = true;
2317       oop p = oop(addr);
2318       guarantee(p->is_oop(), "Should be an oop");
2319       res = _sp->adjustObjectSize(p->size());
2320       if (_sp->obj_is_alive(addr)) {
2321         was_live = true;
2322         p->verify();
2323       }
2324     } else {
2325       FreeChunk* fc = (FreeChunk*)addr;
2326       res = fc->size();
2327       if (FLSVerifyLists && !fc->cantCoalesce()) {
2328         guarantee(_sp->verifyChunkInFreeLists(fc),
2329                   "Chunk should be on a free list");
2330       }
2331     }
2332     if (res == 0) {
2333       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2334       gclog_or_tty->print_cr(
2335         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2336         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2337         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2338         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2339       _sp->print_on(gclog_or_tty);
2340       guarantee(false, "Seppuku!");
2341     }
2342     _last_addr = addr;
2343     _last_size = res;
2344     _last_was_obj  = was_obj;
2345     _last_was_live = was_live;
2346     return res;
2347   }
2348 };
2349 
2350 class VerifyAllOopsClosure: public OopClosure {
2351  private:
2352   const CMSCollector*             _collector;
2353   const CompactibleFreeListSpace* _sp;
2354   const MemRegion                 _span;
2355   const bool                      _past_remark;
2356   const CMSBitMap*                _bit_map;
2357 
2358  protected:
2359   void do_oop(void* p, oop obj) {
2360     if (_span.contains(obj)) { // the interior oop points into CMS heap
2361       if (!_span.contains(p)) { // reference from outside CMS heap
2362         // Should be a valid object; the first disjunct below allows
2363         // us to sidestep an assertion in block_is_obj() that insists
2364         // that p be in _sp. Note that several generations (and spaces)
2365         // are spanned by _span (CMS heap) above.
2366         guarantee(!_sp->is_in_reserved(obj) ||
2367                   _sp->block_is_obj((HeapWord*)obj),
2368                   "Should be an object");
2369         guarantee(obj->is_oop(), "Should be an oop");
2370         obj->verify();
2371         if (_past_remark) {
2372           // Remark has been completed, the object should be marked
2373           _bit_map->isMarked((HeapWord*)obj);
2374         }
2375       } else { // reference within CMS heap
2376         if (_past_remark) {
2377           // Remark has been completed -- so the referent should have
2378           // been marked, if referring object is.
2379           if (_bit_map->isMarked(_collector->block_start(p))) {
2380             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2381           }
2382         }
2383       }
2384     } else if (_sp->is_in_reserved(p)) {
2385       // the reference is from FLS, and points out of FLS
2386       guarantee(obj->is_oop(), "Should be an oop");
2387       obj->verify();
2388     }
2389   }
2390 
2391   template <class T> void do_oop_work(T* p) {
2392     T heap_oop = oopDesc::load_heap_oop(p);
2393     if (!oopDesc::is_null(heap_oop)) {
2394       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2395       do_oop(p, obj);
2396     }
2397   }
2398 
2399  public:
2400   VerifyAllOopsClosure(const CMSCollector* collector,
2401     const CompactibleFreeListSpace* sp, MemRegion span,
2402     bool past_remark, CMSBitMap* bit_map) :
2403     OopClosure(), _collector(collector), _sp(sp), _span(span),
2404     _past_remark(past_remark), _bit_map(bit_map) { }
2405 
2406   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2407   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2408 };
2409 
2410 void CompactibleFreeListSpace::verify(bool ignored) const {
2411   assert_lock_strong(&_freelistLock);
2412   verify_objects_initialized();
2413   MemRegion span = _collector->_span;
2414   bool past_remark = (_collector->abstract_state() ==
2415                       CMSCollector::Sweeping);
2416 
2417   ResourceMark rm;
2418   HandleMark  hm;
2419 
2420   // Check integrity of CFL data structures
2421   _promoInfo.verify();
2422   _dictionary->verify();
2423   if (FLSVerifyIndexTable) {
2424     verifyIndexedFreeLists();
2425   }
2426   // Check integrity of all objects and free blocks in space
2427   {
2428     VerifyAllBlksClosure cl(this, span);
2429     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2430   }
2431   // Check that all references in the heap to FLS
2432   // are to valid objects in FLS or that references in
2433   // FLS are to valid objects elsewhere in the heap
2434   if (FLSVerifyAllHeapReferences)
2435   {
2436     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2437       _collector->markBitMap());
2438     CollectedHeap* ch = Universe::heap();
2439     ch->oop_iterate(&cl);              // all oops in generations
2440     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2441   }
2442 
2443   if (VerifyObjectStartArray) {
2444     // Verify the block offset table
2445     _bt.verify();
2446   }
2447 }
2448 
2449 #ifndef PRODUCT
2450 void CompactibleFreeListSpace::verifyFreeLists() const {
2451   if (FLSVerifyLists) {
2452     _dictionary->verify();
2453     verifyIndexedFreeLists();
2454   } else {
2455     if (FLSVerifyDictionary) {
2456       _dictionary->verify();
2457     }
2458     if (FLSVerifyIndexTable) {
2459       verifyIndexedFreeLists();
2460     }
2461   }
2462 }
2463 #endif
2464 
2465 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2466   size_t i = 0;
2467   for (; i < MinChunkSize; i++) {
2468     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2469   }
2470   for (; i < IndexSetSize; i++) {
2471     verifyIndexedFreeList(i);
2472   }
2473 }
2474 
2475 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2476   FreeChunk* fc   =  _indexedFreeList[size].head();
2477   FreeChunk* tail =  _indexedFreeList[size].tail();
2478   size_t    num = _indexedFreeList[size].count();
2479   size_t      n = 0;
2480   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2481   for (; fc != NULL; fc = fc->next(), n++) {
2482     guarantee(fc->size() == size, "Size inconsistency");
2483     guarantee(fc->isFree(), "!free?");
2484     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2485     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2486   }
2487   guarantee(n == num, "Incorrect count");
2488 }
2489 
2490 #ifndef PRODUCT
2491 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2492   assert(_dictionary->minSize() <= IndexSetSize,
2493     "Some sizes can't be allocated without recourse to"
2494     " linear allocation buffers");
2495   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2496     "else MIN_TREE_CHUNK_SIZE is wrong");
2497   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2498          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2499   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2500       "Some for-loops may be incorrectly initialized");
2501   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2502       "For-loops that iterate over IndexSet with stride 2 may be wrong");
2503 }
2504 #endif
2505 
2506 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2507   assert_lock_strong(&_freelistLock);
2508   FreeList total;
2509   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2510   FreeList::print_labels_on(gclog_or_tty, "size");
2511   size_t totalFree = 0;
2512   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2513     const FreeList *fl = &_indexedFreeList[i];
2514     totalFree += fl->count() * fl->size();
2515     if (i % (40*IndexSetStride) == 0) {
2516       FreeList::print_labels_on(gclog_or_tty, "size");
2517     }
2518     fl->print_on(gclog_or_tty);
2519     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
2520     total.set_surplus(    total.surplus()     + fl->surplus()    );
2521     total.set_desired(    total.desired()     + fl->desired()    );
2522     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
2523     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2524     total.set_count(      total.count()       + fl->count()      );
2525     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
2526     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
2527     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2528     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2529   }
2530   total.print_on(gclog_or_tty, "TOTAL");
2531   gclog_or_tty->print_cr("Total free in indexed lists "
2532                          SIZE_FORMAT " words", totalFree);
2533   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2534     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2535             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2536     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2537   _dictionary->printDictCensus();
2538 }
2539 
2540 ///////////////////////////////////////////////////////////////////////////
2541 // CFLS_LAB
2542 ///////////////////////////////////////////////////////////////////////////
2543 
2544 #define VECTOR_257(x)                                                                                  \
2545   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2546   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2547      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2548      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2549      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2550      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2551      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2552      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x }
2555 
2556 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2557 // OldPLABSize, whose static default is different; if overridden at the
2558 // command-line, this will get reinitialized via a call to
2559 // modify_initialization() below.
2560 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2561   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2562 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2563 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2564 
2565 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2566   _cfls(cfls)
2567 {
2568   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2569   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2570        i < CompactibleFreeListSpace::IndexSetSize;
2571        i += CompactibleFreeListSpace::IndexSetStride) {
2572     _indexedFreeList[i].set_size(i);
2573     _num_blocks[i] = 0;
2574   }
2575 }
2576 
2577 static bool _CFLS_LAB_modified = false;
2578 
2579 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2580   assert(!_CFLS_LAB_modified, "Call only once");
2581   _CFLS_LAB_modified = true;
2582   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2583        i < CompactibleFreeListSpace::IndexSetSize;
2584        i += CompactibleFreeListSpace::IndexSetStride) {
2585     _blocks_to_claim[i].modify(n, wt, true /* force */);
2586   }
2587 }
2588 
2589 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2590   FreeChunk* res;
2591   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2592   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2593     // This locking manages sync with other large object allocations.
2594     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2595                     Mutex::_no_safepoint_check_flag);
2596     res = _cfls->getChunkFromDictionaryExact(word_sz);
2597     if (res == NULL) return NULL;
2598   } else {
2599     FreeList* fl = &_indexedFreeList[word_sz];
2600     if (fl->count() == 0) {
2601       // Attempt to refill this local free list.
2602       get_from_global_pool(word_sz, fl);
2603       // If it didn't work, give up.
2604       if (fl->count() == 0) return NULL;
2605     }
2606     res = fl->getChunkAtHead();
2607     assert(res != NULL, "Why was count non-zero?");
2608   }
2609   res->markNotFree();
2610   assert(!res->isFree(), "shouldn't be marked free");
2611   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2612   // mangle a just allocated object with a distinct pattern.
2613   debug_only(res->mangleAllocated(word_sz));
2614   return (HeapWord*)res;
2615 }
2616 
2617 // Get a chunk of blocks of the right size and update related
2618 // book-keeping stats
2619 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
2620   // Get the #blocks we want to claim
2621   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2622   assert(n_blks > 0, "Error");
2623   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2624   // In some cases, when the application has a phase change,
2625   // there may be a sudden and sharp shift in the object survival
2626   // profile, and updating the counts at the end of a scavenge
2627   // may not be quick enough, giving rise to large scavenge pauses
2628   // during these phase changes. It is beneficial to detect such
2629   // changes on-the-fly during a scavenge and avoid such a phase-change
2630   // pothole. The following code is a heuristic attempt to do that.
2631   // It is protected by a product flag until we have gained
2632   // enough experience with this heuristic and fine-tuned its behaviour.
2633   // WARNING: This might increase fragmentation if we overreact to
2634   // small spikes, so some kind of historical smoothing based on
2635   // previous experience with the greater reactivity might be useful.
2636   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2637   // default.
2638   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2639     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2640     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2641     n_blks = MIN2(n_blks, CMSOldPLABMax);
2642   }
2643   assert(n_blks > 0, "Error");
2644   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2645   // Update stats table entry for this block size
2646   _num_blocks[word_sz] += fl->count();
2647 }
2648 
2649 void CFLS_LAB::compute_desired_plab_size() {
2650   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2651        i < CompactibleFreeListSpace::IndexSetSize;
2652        i += CompactibleFreeListSpace::IndexSetStride) {
2653     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2654            "Counter inconsistency");
2655     if (_global_num_workers[i] > 0) {
2656       // Need to smooth wrt historical average
2657       if (ResizeOldPLAB) {
2658         _blocks_to_claim[i].sample(
2659           MAX2((size_t)CMSOldPLABMin,
2660           MIN2((size_t)CMSOldPLABMax,
2661                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2662       }
2663       // Reset counters for next round
2664       _global_num_workers[i] = 0;
2665       _global_num_blocks[i] = 0;
2666       if (PrintOldPLAB) {
2667         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2668       }
2669     }
2670   }
2671 }
2672 
2673 void CFLS_LAB::retire(int tid) {
2674   // We run this single threaded with the world stopped;
2675   // so no need for locks and such.
2676 #define CFLS_LAB_PARALLEL_ACCESS 0
2677   NOT_PRODUCT(Thread* t = Thread::current();)
2678   assert(Thread::current()->is_VM_thread(), "Error");
2679   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
2680          "Will access to uninitialized slot below");
2681 #if CFLS_LAB_PARALLEL_ACCESS
2682   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
2683        i > 0;
2684        i -= CompactibleFreeListSpace::IndexSetStride) {
2685 #else // CFLS_LAB_PARALLEL_ACCESS
2686   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2687        i < CompactibleFreeListSpace::IndexSetSize;
2688        i += CompactibleFreeListSpace::IndexSetStride) {
2689 #endif // !CFLS_LAB_PARALLEL_ACCESS
2690     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2691            "Can't retire more than what we obtained");
2692     if (_num_blocks[i] > 0) {
2693       size_t num_retire =  _indexedFreeList[i].count();
2694       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2695       {
2696 #if CFLS_LAB_PARALLEL_ACCESS
2697         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2698                         Mutex::_no_safepoint_check_flag);
2699 #endif // CFLS_LAB_PARALLEL_ACCESS
2700         // Update globals stats for num_blocks used
2701         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2702         _global_num_workers[i]++;
2703         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
2704         if (num_retire > 0) {
2705           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2706           // Reset this list.
2707           _indexedFreeList[i] = FreeList();
2708           _indexedFreeList[i].set_size(i);
2709         }
2710       }
2711       if (PrintOldPLAB) {
2712         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2713                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2714       }
2715       // Reset stats for next round
2716       _num_blocks[i]         = 0;
2717     }
2718   }
2719 }
2720 
2721 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2722   assert(fl->count() == 0, "Precondition.");
2723   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2724          "Precondition");
2725 
2726   // We'll try all multiples of word_sz in the indexed set, starting with
2727   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2728   // then try getting a big chunk and splitting it.
2729   {
2730     bool found;
2731     int  k;
2732     size_t cur_sz;
2733     for (k = 1, cur_sz = k * word_sz, found = false;
2734          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2735          (CMSSplitIndexedFreeListBlocks || k <= 1);
2736          k++, cur_sz = k * word_sz) {
2737       FreeList fl_for_cur_sz;  // Empty.
2738       fl_for_cur_sz.set_size(cur_sz);
2739       {
2740         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2741                         Mutex::_no_safepoint_check_flag);
2742         FreeList* gfl = &_indexedFreeList[cur_sz];
2743         if (gfl->count() != 0) {
2744           // nn is the number of chunks of size cur_sz that
2745           // we'd need to split k-ways each, in order to create
2746           // "n" chunks of size word_sz each.
2747           const size_t nn = MAX2(n/k, (size_t)1);
2748           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2749           found = true;
2750           if (k > 1) {
2751             // Update split death stats for the cur_sz-size blocks list:
2752             // we increment the split death count by the number of blocks
2753             // we just took from the cur_sz-size blocks list and which
2754             // we will be splitting below.
2755             ssize_t deaths = gfl->splitDeaths() +
2756                              fl_for_cur_sz.count();
2757             gfl->set_splitDeaths(deaths);
2758           }
2759         }
2760       }
2761       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2762       if (found) {
2763         if (k == 1) {
2764           fl->prepend(&fl_for_cur_sz);
2765         } else {
2766           // Divide each block on fl_for_cur_sz up k ways.
2767           FreeChunk* fc;
2768           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2769             // Must do this in reverse order, so that anybody attempting to
2770             // access the main chunk sees it as a single free block until we
2771             // change it.
2772             size_t fc_size = fc->size();
2773             assert(fc->isFree(), "Error");
2774             for (int i = k-1; i >= 0; i--) {
2775               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2776               assert((i != 0) ||
2777                         ((fc == ffc) && ffc->isFree() &&
2778                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2779                         "Counting error");
2780               ffc->setSize(word_sz);
2781               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2782               ffc->linkNext(NULL);
2783               // Above must occur before BOT is updated below.
2784               OrderAccess::storestore();
2785               // splitting from the right, fc_size == i * word_sz
2786               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2787               fc_size -= word_sz;
2788               assert(fc_size == i*word_sz, "Error");
2789               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2790               _bt.verify_single_block((HeapWord*)fc, fc_size);
2791               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2792               // Push this on "fl".
2793               fl->returnChunkAtHead(ffc);
2794             }
2795             // TRAP
2796             assert(fl->tail()->next() == NULL, "List invariant.");
2797           }
2798         }
2799         // Update birth stats for this block size.
2800         size_t num = fl->count();
2801         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2802                         Mutex::_no_safepoint_check_flag);
2803         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
2804         _indexedFreeList[word_sz].set_splitBirths(births);
2805         return;
2806       }
2807     }
2808   }
2809   // Otherwise, we'll split a block from the dictionary.
2810   FreeChunk* fc = NULL;
2811   FreeChunk* rem_fc = NULL;
2812   size_t rem;
2813   {
2814     MutexLockerEx x(parDictionaryAllocLock(),
2815                     Mutex::_no_safepoint_check_flag);
2816     while (n > 0) {
2817       fc = dictionary()->getChunk(MAX2(n * word_sz,
2818                                   _dictionary->minSize()),
2819                                   FreeBlockDictionary::atLeast);
2820       if (fc != NULL) {
2821         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2822         dictionary()->dictCensusUpdate(fc->size(),
2823                                        true /*split*/,
2824                                        false /*birth*/);
2825         break;
2826       } else {
2827         n--;
2828       }
2829     }
2830     if (fc == NULL) return;
2831     // Otherwise, split up that block.
2832     assert((ssize_t)n >= 1, "Control point invariant");
2833     assert(fc->isFree(), "Error: should be a free block");
2834     _bt.verify_single_block((HeapWord*)fc, fc->size());
2835     const size_t nn = fc->size() / word_sz;
2836     n = MIN2(nn, n);
2837     assert((ssize_t)n >= 1, "Control point invariant");
2838     rem = fc->size() - n * word_sz;
2839     // If there is a remainder, and it's too small, allocate one fewer.
2840     if (rem > 0 && rem < MinChunkSize) {
2841       n--; rem += word_sz;
2842     }
2843     // Note that at this point we may have n == 0.
2844     assert((ssize_t)n >= 0, "Control point invariant");
2845 
2846     // If n is 0, the chunk fc that was found is not large
2847     // enough to leave a viable remainder.  We are unable to
2848     // allocate even one block.  Return fc to the
2849     // dictionary and return, leaving "fl" empty.
2850     if (n == 0) {
2851       returnChunkToDictionary(fc);
2852       assert(fl->count() == 0, "We never allocated any blocks");
2853       return;
2854     }
2855 
2856     // First return the remainder, if any.
2857     // Note that we hold the lock until we decide if we're going to give
2858     // back the remainder to the dictionary, since a concurrent allocation
2859     // may otherwise see the heap as empty.  (We're willing to take that
2860     // hit if the block is a small block.)
2861     if (rem > 0) {
2862       size_t prefix_size = n * word_sz;
2863       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2864       rem_fc->setSize(rem);
2865       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2866       rem_fc->linkNext(NULL);
2867       // Above must occur before BOT is updated below.
2868       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2869       OrderAccess::storestore();
2870       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2871       assert(fc->isFree(), "Error");
2872       fc->setSize(prefix_size);
2873       if (rem >= IndexSetSize) {
2874         returnChunkToDictionary(rem_fc);
2875         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
2876         rem_fc = NULL;
2877       }
2878       // Otherwise, return it to the small list below.
2879     }
2880   }
2881   if (rem_fc != NULL) {
2882     MutexLockerEx x(_indexedFreeListParLocks[rem],
2883                     Mutex::_no_safepoint_check_flag);
2884     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2885     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2886     smallSplitBirth(rem);
2887   }
2888   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2889   // Now do the splitting up.
2890   // Must do this in reverse order, so that anybody attempting to
2891   // access the main chunk sees it as a single free block until we
2892   // change it.
2893   size_t fc_size = n * word_sz;
2894   // All but first chunk in this loop
2895   for (ssize_t i = n-1; i > 0; i--) {
2896     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2897     ffc->setSize(word_sz);
2898     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2899     ffc->linkNext(NULL);
2900     // Above must occur before BOT is updated below.
2901     OrderAccess::storestore();
2902     // splitting from the right, fc_size == (n - i + 1) * wordsize
2903     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2904     fc_size -= word_sz;
2905     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2906     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2907     _bt.verify_single_block((HeapWord*)fc, fc_size);
2908     // Push this on "fl".
2909     fl->returnChunkAtHead(ffc);
2910   }
2911   // First chunk
2912   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
2913   // The blocks above should show their new sizes before the first block below
2914   fc->setSize(word_sz);
2915   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
2916   fc->linkNext(NULL);
2917   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2918   _bt.verify_single_block((HeapWord*)fc, fc->size());
2919   fl->returnChunkAtHead(fc);
2920 
2921   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2922   {
2923     // Update the stats for this block size.
2924     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2925                     Mutex::_no_safepoint_check_flag);
2926     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
2927     _indexedFreeList[word_sz].set_splitBirths(births);
2928     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2929     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2930   }
2931 
2932   // TRAP
2933   assert(fl->tail()->next() == NULL, "List invariant.");
2934 }
2935 
2936 // Set up the space's par_seq_tasks structure for work claiming
2937 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2938 // XXX Need to suitably abstract and generalize this and the next
2939 // method into one.
2940 void
2941 CompactibleFreeListSpace::
2942 initialize_sequential_subtasks_for_rescan(int n_threads) {
2943   // The "size" of each task is fixed according to rescan_task_size.
2944   assert(n_threads > 0, "Unexpected n_threads argument");
2945   const size_t task_size = rescan_task_size();
2946   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2947   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2948   assert(n_tasks == 0 ||
2949          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2950           (used_region().start() + n_tasks*task_size >= used_region().end())),
2951          "n_tasks calculation incorrect");
2952   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2953   assert(!pst->valid(), "Clobbering existing data?");
2954   // Sets the condition for completion of the subtask (how many threads
2955   // need to finish in order to be done).
2956   pst->set_n_threads(n_threads);
2957   pst->set_n_tasks((int)n_tasks);
2958 }
2959 
2960 // Set up the space's par_seq_tasks structure for work claiming
2961 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2962 void
2963 CompactibleFreeListSpace::
2964 initialize_sequential_subtasks_for_marking(int n_threads,
2965                                            HeapWord* low) {
2966   // The "size" of each task is fixed according to rescan_task_size.
2967   assert(n_threads > 0, "Unexpected n_threads argument");
2968   const size_t task_size = marking_task_size();
2969   assert(task_size > CardTableModRefBS::card_size_in_words &&
2970          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2971          "Otherwise arithmetic below would be incorrect");
2972   MemRegion span = _gen->reserved();
2973   if (low != NULL) {
2974     if (span.contains(low)) {
2975       // Align low down to  a card boundary so that
2976       // we can use block_offset_careful() on span boundaries.
2977       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2978                                  CardTableModRefBS::card_size);
2979       // Clip span prefix at aligned_low
2980       span = span.intersection(MemRegion(aligned_low, span.end()));
2981     } else if (low > span.end()) {
2982       span = MemRegion(low, low);  // Null region
2983     } // else use entire span
2984   }
2985   assert(span.is_empty() ||
2986          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2987         "span should start at a card boundary");
2988   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2989   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2990   assert(n_tasks == 0 ||
2991          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2992           (span.start() + n_tasks*task_size >= span.end())),
2993          "n_tasks calculation incorrect");
2994   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2995   assert(!pst->valid(), "Clobbering existing data?");
2996   // Sets the condition for completion of the subtask (how many threads
2997   // need to finish in order to be done).
2998   pst->set_n_threads(n_threads);
2999   pst->set_n_tasks((int)n_tasks);
3000 }