1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/liveRange.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 223   for (size_t i = 1; i < IndexSetSize; i++) {
 224     assert(_indexedFreeList[i].size() == (size_t) i,
 225       "Indexed free list sizes are incorrect");
 226     _indexedFreeList[i].reset(IndexSetSize);
 227     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 228     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 229     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 230     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 231   }
 232 }
 233 
 234 void CompactibleFreeListSpace::reset(MemRegion mr) {
 235   resetIndexedFreeListArray();
 236   dictionary()->reset();
 237   if (BlockOffsetArrayUseUnallocatedBlock) {
 238     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 239     // Everything's allocated until proven otherwise.
 240     _bt.set_unallocated_block(end());
 241   }
 242   if (!mr.is_empty()) {
 243     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 244     _bt.single_block(mr.start(), mr.word_size());
 245     FreeChunk* fc = (FreeChunk*) mr.start();
 246     fc->set_size(mr.word_size());
 247     if (mr.word_size() >= IndexSetSize ) {
 248       returnChunkToDictionary(fc);
 249     } else {
 250       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 251       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 252     }
 253     coalBirth(mr.word_size());
 254   }
 255   _promoInfo.reset();
 256   _smallLinearAllocBlock._ptr = NULL;
 257   _smallLinearAllocBlock._word_size = 0;
 258 }
 259 
 260 void CompactibleFreeListSpace::reset_after_compaction() {
 261   // Reset the space to the new reality - one free chunk.
 262   MemRegion mr(compaction_top(), end());
 263   reset(mr);
 264   // Now refill the linear allocation block(s) if possible.
 265   refillLinearAllocBlocksIfNeeded();
 266 }
 267 
 268 // Walks the entire dictionary, returning a coterminal
 269 // chunk, if it exists. Use with caution since it involves
 270 // a potentially complete walk of a potentially large tree.
 271 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 272 
 273   assert_lock_strong(&_freelistLock);
 274 
 275   return dictionary()->find_chunk_ends_at(end());
 276 }
 277 
 278 
 279 #ifndef PRODUCT
 280 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 281   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 282     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 283   }
 284 }
 285 
 286 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 287   size_t sum = 0;
 288   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 289     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 290   }
 291   return sum;
 292 }
 293 
 294 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 295   size_t count = 0;
 296   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 297     debug_only(
 298       ssize_t total_list_count = 0;
 299       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 300          fc = fc->next()) {
 301         total_list_count++;
 302       }
 303       assert(total_list_count ==  _indexedFreeList[i].count(),
 304         "Count in list is incorrect");
 305     )
 306     count += _indexedFreeList[i].count();
 307   }
 308   return count;
 309 }
 310 
 311 size_t CompactibleFreeListSpace::totalCount() {
 312   size_t num = totalCountInIndexedFreeLists();
 313   num +=  dictionary()->total_count();
 314   if (_smallLinearAllocBlock._word_size != 0) {
 315     num++;
 316   }
 317   return num;
 318 }
 319 #endif
 320 
 321 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 322   FreeChunk* fc = (FreeChunk*) p;
 323   return fc->is_free();
 324 }
 325 
 326 size_t CompactibleFreeListSpace::used() const {
 327   return capacity() - free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::free() const {
 331   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 332   // if you do this while the structures are in flux you
 333   // may get an approximate answer only; for instance
 334   // because there is concurrent allocation either
 335   // directly by mutators or for promotion during a GC.
 336   // It's "MT-safe", however, in the sense that you are guaranteed
 337   // not to crash and burn, for instance, because of walking
 338   // pointers that could disappear as you were walking them.
 339   // The approximation is because the various components
 340   // that are read below are not read atomically (and
 341   // further the computation of totalSizeInIndexedFreeLists()
 342   // is itself a non-atomic computation. The normal use of
 343   // this is during a resize operation at the end of GC
 344   // and at that time you are guaranteed to get the
 345   // correct actual value. However, for instance, this is
 346   // also read completely asynchronously by the "perf-sampler"
 347   // that supports jvmstat, and you are apt to see the values
 348   // flicker in such cases.
 349   assert(_dictionary != NULL, "No _dictionary?");
 350   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 351           totalSizeInIndexedFreeLists() +
 352           _smallLinearAllocBlock._word_size) * HeapWordSize;
 353 }
 354 
 355 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 356   assert(_dictionary != NULL, "No _dictionary?");
 357   assert_locked();
 358   size_t res = _dictionary->max_chunk_size();
 359   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 360                        (size_t) SmallForLinearAlloc - 1));
 361   // XXX the following could potentially be pretty slow;
 362   // should one, pessimistically for the rare cases when res
 363   // calculated above is less than IndexSetSize,
 364   // just return res calculated above? My reasoning was that
 365   // those cases will be so rare that the extra time spent doesn't
 366   // really matter....
 367   // Note: do not change the loop test i >= res + IndexSetStride
 368   // to i > res below, because i is unsigned and res may be zero.
 369   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 370        i -= IndexSetStride) {
 371     if (_indexedFreeList[i].head() != NULL) {
 372       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 373       return i;
 374     }
 375   }
 376   return res;
 377 }
 378 
 379 void LinearAllocBlock::print_on(outputStream* st) const {
 380   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 381             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 382             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 383 }
 384 
 385 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 386   st->print_cr("COMPACTIBLE FREELIST SPACE");
 387   st->print_cr(" Space:");
 388   Space::print_on(st);
 389 
 390   st->print_cr("promoInfo:");
 391   _promoInfo.print_on(st);
 392 
 393   st->print_cr("_smallLinearAllocBlock");
 394   _smallLinearAllocBlock.print_on(st);
 395 
 396   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 397 
 398   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 399 }
 400 
 401 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 402 const {
 403   reportIndexedFreeListStatistics();
 404   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 405   gclog_or_tty->print_cr("---------------------------");
 406   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 407   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 408     _indexedFreeList[i].print_on(gclog_or_tty);
 409     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 410          fc = fc->next()) {
 411       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 412                           p2i(fc), p2i((HeapWord*)fc + i),
 413                           fc->cantCoalesce() ? "\t CC" : "");
 414     }
 415   }
 416 }
 417 
 418 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 419 const {
 420   _promoInfo.print_on(st);
 421 }
 422 
 423 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 424 const {
 425   _dictionary->report_statistics();
 426   st->print_cr("Layout of Freelists in Tree");
 427   st->print_cr("---------------------------");
 428   _dictionary->print_free_lists(st);
 429 }
 430 
 431 class BlkPrintingClosure: public BlkClosure {
 432   const CMSCollector*             _collector;
 433   const CompactibleFreeListSpace* _sp;
 434   const CMSBitMap*                _live_bit_map;
 435   const bool                      _post_remark;
 436   outputStream*                   _st;
 437 public:
 438   BlkPrintingClosure(const CMSCollector* collector,
 439                      const CompactibleFreeListSpace* sp,
 440                      const CMSBitMap* live_bit_map,
 441                      outputStream* st):
 442     _collector(collector),
 443     _sp(sp),
 444     _live_bit_map(live_bit_map),
 445     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 446     _st(st) { }
 447   size_t do_blk(HeapWord* addr);
 448 };
 449 
 450 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 451   size_t sz = _sp->block_size_no_stall(addr, _collector);
 452   assert(sz != 0, "Should always be able to compute a size");
 453   if (_sp->block_is_obj(addr)) {
 454     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 455     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 456       p2i(addr),
 457       dead ? "dead" : "live",
 458       sz,
 459       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 460     if (CMSPrintObjectsInDump && !dead) {
 461       oop(addr)->print_on(_st);
 462       _st->print_cr("--------------------------------------");
 463     }
 464   } else { // free block
 465     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 466       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 467     if (CMSPrintChunksInDump) {
 468       ((FreeChunk*)addr)->print_on(_st);
 469       _st->print_cr("--------------------------------------");
 470     }
 471   }
 472   return sz;
 473 }
 474 
 475 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 476   outputStream* st) {
 477   st->print_cr("\n=========================");
 478   st->print_cr("Block layout in CMS Heap:");
 479   st->print_cr("=========================");
 480   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 481   blk_iterate(&bpcl);
 482 
 483   st->print_cr("\n=======================================");
 484   st->print_cr("Order & Layout of Promotion Info Blocks");
 485   st->print_cr("=======================================");
 486   print_promo_info_blocks(st);
 487 
 488   st->print_cr("\n===========================");
 489   st->print_cr("Order of Indexed Free Lists");
 490   st->print_cr("=========================");
 491   print_indexed_free_lists(st);
 492 
 493   st->print_cr("\n=================================");
 494   st->print_cr("Order of Free Lists in Dictionary");
 495   st->print_cr("=================================");
 496   print_dictionary_free_lists(st);
 497 }
 498 
 499 
 500 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 501   assert_lock_strong(&_freelistLock);
 502   assert(PrintFLSStatistics != 0, "Reporting error");
 503   _dictionary->report_statistics();
 504   if (PrintFLSStatistics > 1) {
 505     reportIndexedFreeListStatistics();
 506     size_t total_size = totalSizeInIndexedFreeLists() +
 507                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 508     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 509   }
 510 }
 511 
 512 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 513   assert_lock_strong(&_freelistLock);
 514   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 515                       "--------------------------------\n");
 516   size_t total_size = totalSizeInIndexedFreeLists();
 517   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 518   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 519   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 520   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 521   if (free_blocks != 0) {
 522     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 523   }
 524 }
 525 
 526 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 527   size_t res = 0;
 528   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 529     debug_only(
 530       ssize_t recount = 0;
 531       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 532          fc = fc->next()) {
 533         recount += 1;
 534       }
 535       assert(recount == _indexedFreeList[i].count(),
 536         "Incorrect count in list");
 537     )
 538     res += _indexedFreeList[i].count();
 539   }
 540   return res;
 541 }
 542 
 543 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 544   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 545     if (_indexedFreeList[i].head() != NULL) {
 546       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 547       return (size_t)i;
 548     }
 549   }
 550   return 0;
 551 }
 552 
 553 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 554   HeapWord* prevEnd = end();
 555   assert(prevEnd != value, "unnecessary set_end call");
 556   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 557         "New end is below unallocated block");
 558   _end = value;
 559   if (prevEnd != NULL) {
 560     // Resize the underlying block offset table.
 561     _bt.resize(pointer_delta(value, bottom()));
 562     if (value <= prevEnd) {
 563       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 564              "New end is below unallocated block");
 565     } else {
 566       // Now, take this new chunk and add it to the free blocks.
 567       // Note that the BOT has not yet been updated for this block.
 568       size_t newFcSize = pointer_delta(value, prevEnd);
 569       // Add the block to the free lists, if possible coalescing it
 570       // with the last free block, and update the BOT and census data.
 571       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 572     }
 573   }
 574 }
 575 
 576 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 577   CompactibleFreeListSpace* _cfls;
 578   CMSCollector* _collector;
 579   bool _parallel;
 580 protected:
 581   // Override.
 582 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 583   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 584                                        HeapWord* bottom, HeapWord* top, \
 585                                        ClosureType* cl);                \
 586       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 587                                        HeapWord* bottom, HeapWord* top, \
 588                                        ClosureType* cl);                \
 589     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 590                                        HeapWord* bottom, HeapWord* top, \
 591                                        ClosureType* cl)
 592   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 593   walk_mem_region_with_cl_DECL(FilteringClosure);
 594 
 595 public:
 596   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 597                       CMSCollector* collector,
 598                       ExtendedOopClosure* cl,
 599                       CardTableModRefBS::PrecisionStyle precision,
 600                       HeapWord* boundary,
 601                       bool parallel) :
 602     Filtering_DCTOC(sp, cl, precision, boundary),
 603     _cfls(sp), _collector(collector), _parallel(parallel) {}
 604 };
 605 
 606 // We de-virtualize the block-related calls below, since we know that our
 607 // space is a CompactibleFreeListSpace.
 608 
 609 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 610 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 611                                                  HeapWord* bottom,              \
 612                                                  HeapWord* top,                 \
 613                                                  ClosureType* cl) {             \
 614    if (_parallel) {                                                             \
 615      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 616    } else {                                                                     \
 617      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 618    }                                                                            \
 619 }                                                                               \
 620 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 621                                                       HeapWord* bottom,         \
 622                                                       HeapWord* top,            \
 623                                                       ClosureType* cl) {        \
 624   /* Skip parts that are before "mr", in case "block_start" sent us             \
 625      back too far. */                                                           \
 626   HeapWord* mr_start = mr.start();                                              \
 627   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 628   HeapWord* next = bottom + bot_size;                                           \
 629   while (next < mr_start) {                                                     \
 630     bottom = next;                                                              \
 631     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 632     next = bottom + bot_size;                                                   \
 633   }                                                                             \
 634                                                                                 \
 635   while (bottom < top) {                                                        \
 636     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 637         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 638                     oop(bottom)) &&                                             \
 639         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 640       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 641       bottom += _cfls->adjustObjectSize(word_sz);                               \
 642     } else {                                                                    \
 643       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 644     }                                                                           \
 645   }                                                                             \
 646 }                                                                               \
 647 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 648                                                         HeapWord* bottom,       \
 649                                                         HeapWord* top,          \
 650                                                         ClosureType* cl) {      \
 651   /* Skip parts that are before "mr", in case "block_start" sent us             \
 652      back too far. */                                                           \
 653   HeapWord* mr_start = mr.start();                                              \
 654   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 655   HeapWord* next = bottom + bot_size;                                           \
 656   while (next < mr_start) {                                                     \
 657     bottom = next;                                                              \
 658     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 659     next = bottom + bot_size;                                                   \
 660   }                                                                             \
 661                                                                                 \
 662   while (bottom < top) {                                                        \
 663     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 664         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 665                     oop(bottom)) &&                                             \
 666         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 667       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 668       bottom += _cfls->adjustObjectSize(word_sz);                               \
 669     } else {                                                                    \
 670       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 671     }                                                                           \
 672   }                                                                             \
 673 }
 674 
 675 // (There are only two of these, rather than N, because the split is due
 676 // only to the introduction of the FilteringClosure, a local part of the
 677 // impl of this abstraction.)
 678 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 679 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 680 
 681 DirtyCardToOopClosure*
 682 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 683                                       CardTableModRefBS::PrecisionStyle precision,
 684                                       HeapWord* boundary,
 685                                       bool parallel) {
 686   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
 687 }
 688 
 689 
 690 // Note on locking for the space iteration functions:
 691 // since the collector's iteration activities are concurrent with
 692 // allocation activities by mutators, absent a suitable mutual exclusion
 693 // mechanism the iterators may go awry. For instance a block being iterated
 694 // may suddenly be allocated or divided up and part of it allocated and
 695 // so on.
 696 
 697 // Apply the given closure to each block in the space.
 698 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 699   assert_lock_strong(freelistLock());
 700   HeapWord *cur, *limit;
 701   for (cur = bottom(), limit = end(); cur < limit;
 702        cur += cl->do_blk_careful(cur));
 703 }
 704 
 705 // Apply the given closure to each block in the space.
 706 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 707   assert_lock_strong(freelistLock());
 708   HeapWord *cur, *limit;
 709   for (cur = bottom(), limit = end(); cur < limit;
 710        cur += cl->do_blk(cur));
 711 }
 712 
 713 // Apply the given closure to each oop in the space.
 714 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 715   assert_lock_strong(freelistLock());
 716   HeapWord *cur, *limit;
 717   size_t curSize;
 718   for (cur = bottom(), limit = end(); cur < limit;
 719        cur += curSize) {
 720     curSize = block_size(cur);
 721     if (block_is_obj(cur)) {
 722       oop(cur)->oop_iterate(cl);
 723     }
 724   }
 725 }
 726 
 727 // NOTE: In the following methods, in order to safely be able to
 728 // apply the closure to an object, we need to be sure that the
 729 // object has been initialized. We are guaranteed that an object
 730 // is initialized if we are holding the Heap_lock with the
 731 // world stopped.
 732 void CompactibleFreeListSpace::verify_objects_initialized() const {
 733   if (is_init_completed()) {
 734     assert_locked_or_safepoint(Heap_lock);
 735     if (Universe::is_fully_initialized()) {
 736       guarantee(SafepointSynchronize::is_at_safepoint(),
 737                 "Required for objects to be initialized");
 738     }
 739   } // else make a concession at vm start-up
 740 }
 741 
 742 // Apply the given closure to each object in the space
 743 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 744   assert_lock_strong(freelistLock());
 745   NOT_PRODUCT(verify_objects_initialized());
 746   HeapWord *cur, *limit;
 747   size_t curSize;
 748   for (cur = bottom(), limit = end(); cur < limit;
 749        cur += curSize) {
 750     curSize = block_size(cur);
 751     if (block_is_obj(cur)) {
 752       blk->do_object(oop(cur));
 753     }
 754   }
 755 }
 756 
 757 // Apply the given closure to each live object in the space
 758 //   The usage of CompactibleFreeListSpace
 759 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 760 // objects in the space with references to objects that are no longer
 761 // valid.  For example, an object may reference another object
 762 // that has already been sweep up (collected).  This method uses
 763 // obj_is_alive() to determine whether it is safe to apply the closure to
 764 // an object.  See obj_is_alive() for details on how liveness of an
 765 // object is decided.
 766 
 767 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 768   assert_lock_strong(freelistLock());
 769   NOT_PRODUCT(verify_objects_initialized());
 770   HeapWord *cur, *limit;
 771   size_t curSize;
 772   for (cur = bottom(), limit = end(); cur < limit;
 773        cur += curSize) {
 774     curSize = block_size(cur);
 775     if (block_is_obj(cur) && obj_is_alive(cur)) {
 776       blk->do_object(oop(cur));
 777     }
 778   }
 779 }
 780 
 781 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 782                                                   UpwardsObjectClosure* cl) {
 783   assert_locked(freelistLock());
 784   NOT_PRODUCT(verify_objects_initialized());
 785   assert(!mr.is_empty(), "Should be non-empty");
 786   // We use MemRegion(bottom(), end()) rather than used_region() below
 787   // because the two are not necessarily equal for some kinds of
 788   // spaces, in particular, certain kinds of free list spaces.
 789   // We could use the more complicated but more precise:
 790   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 791   // but the slight imprecision seems acceptable in the assertion check.
 792   assert(MemRegion(bottom(), end()).contains(mr),
 793          "Should be within used space");
 794   HeapWord* prev = cl->previous();   // max address from last time
 795   if (prev >= mr.end()) { // nothing to do
 796     return;
 797   }
 798   // This assert will not work when we go from cms space to perm
 799   // space, and use same closure. Easy fix deferred for later. XXX YSR
 800   // assert(prev == NULL || contains(prev), "Should be within space");
 801 
 802   bool last_was_obj_array = false;
 803   HeapWord *blk_start_addr, *region_start_addr;
 804   if (prev > mr.start()) {
 805     region_start_addr = prev;
 806     blk_start_addr    = prev;
 807     // The previous invocation may have pushed "prev" beyond the
 808     // last allocated block yet there may be still be blocks
 809     // in this region due to a particular coalescing policy.
 810     // Relax the assertion so that the case where the unallocated
 811     // block is maintained and "prev" is beyond the unallocated
 812     // block does not cause the assertion to fire.
 813     assert((BlockOffsetArrayUseUnallocatedBlock &&
 814             (!is_in(prev))) ||
 815            (blk_start_addr == block_start(region_start_addr)), "invariant");
 816   } else {
 817     region_start_addr = mr.start();
 818     blk_start_addr    = block_start(region_start_addr);
 819   }
 820   HeapWord* region_end_addr = mr.end();
 821   MemRegion derived_mr(region_start_addr, region_end_addr);
 822   while (blk_start_addr < region_end_addr) {
 823     const size_t size = block_size(blk_start_addr);
 824     if (block_is_obj(blk_start_addr)) {
 825       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 826     } else {
 827       last_was_obj_array = false;
 828     }
 829     blk_start_addr += size;
 830   }
 831   if (!last_was_obj_array) {
 832     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 833            "Should be within (closed) used space");
 834     assert(blk_start_addr > prev, "Invariant");
 835     cl->set_previous(blk_start_addr); // min address for next time
 836   }
 837 }
 838 
 839 // Callers of this iterator beware: The closure application should
 840 // be robust in the face of uninitialized objects and should (always)
 841 // return a correct size so that the next addr + size below gives us a
 842 // valid block boundary. [See for instance,
 843 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 844 // in ConcurrentMarkSweepGeneration.cpp.]
 845 HeapWord*
 846 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 847   ObjectClosureCareful* cl) {
 848   assert_lock_strong(freelistLock());
 849   // Can't use used_region() below because it may not necessarily
 850   // be the same as [bottom(),end()); although we could
 851   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 852   // that appears too cumbersome, so we just do the simpler check
 853   // in the assertion below.
 854   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 855          "mr should be non-empty and within used space");
 856   HeapWord *addr, *end;
 857   size_t size;
 858   for (addr = block_start_careful(mr.start()), end  = mr.end();
 859        addr < end; addr += size) {
 860     FreeChunk* fc = (FreeChunk*)addr;
 861     if (fc->is_free()) {
 862       // Since we hold the free list lock, which protects direct
 863       // allocation in this generation by mutators, a free object
 864       // will remain free throughout this iteration code.
 865       size = fc->size();
 866     } else {
 867       // Note that the object need not necessarily be initialized,
 868       // because (for instance) the free list lock does NOT protect
 869       // object initialization. The closure application below must
 870       // therefore be correct in the face of uninitialized objects.
 871       size = cl->do_object_careful_m(oop(addr), mr);
 872       if (size == 0) {
 873         // An unparsable object found. Signal early termination.
 874         return addr;
 875       }
 876     }
 877   }
 878   return NULL;
 879 }
 880 
 881 
 882 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 883   NOT_PRODUCT(verify_objects_initialized());
 884   return _bt.block_start(p);
 885 }
 886 
 887 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 888   return _bt.block_start_careful(p);
 889 }
 890 
 891 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 892   NOT_PRODUCT(verify_objects_initialized());
 893   // This must be volatile, or else there is a danger that the compiler
 894   // will compile the code below into a sometimes-infinite loop, by keeping
 895   // the value read the first time in a register.
 896   while (true) {
 897     // We must do this until we get a consistent view of the object.
 898     if (FreeChunk::indicatesFreeChunk(p)) {
 899       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 900       size_t res = fc->size();
 901 
 902       // Bugfix for systems with weak memory model (PPC64/IA64). The
 903       // block's free bit was set and we have read the size of the
 904       // block. Acquire and check the free bit again. If the block is
 905       // still free, the read size is correct.
 906       OrderAccess::acquire();
 907 
 908       // If the object is still a free chunk, return the size, else it
 909       // has been allocated so try again.
 910       if (FreeChunk::indicatesFreeChunk(p)) {
 911         assert(res != 0, "Block size should not be 0");
 912         return res;
 913       }
 914     } else {
 915       // must read from what 'p' points to in each loop.
 916       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 917       if (k != NULL) {
 918         assert(k->is_klass(), "Should really be klass oop.");
 919         oop o = (oop)p;
 920         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 921 
 922         // Bugfix for systems with weak memory model (PPC64/IA64).
 923         // The object o may be an array. Acquire to make sure that the array
 924         // size (third word) is consistent.
 925         OrderAccess::acquire();
 926 
 927         size_t res = o->size_given_klass(k);
 928         res = adjustObjectSize(res);
 929         assert(res != 0, "Block size should not be 0");
 930         return res;
 931       }
 932     }
 933   }
 934 }
 935 
 936 // TODO: Now that is_parsable is gone, we should combine these two functions.
 937 // A variant of the above that uses the Printezis bits for
 938 // unparsable but allocated objects. This avoids any possible
 939 // stalls waiting for mutators to initialize objects, and is
 940 // thus potentially faster than the variant above. However,
 941 // this variant may return a zero size for a block that is
 942 // under mutation and for which a consistent size cannot be
 943 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 944 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 945                                                      const CMSCollector* c)
 946 const {
 947   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 948   // This must be volatile, or else there is a danger that the compiler
 949   // will compile the code below into a sometimes-infinite loop, by keeping
 950   // the value read the first time in a register.
 951   DEBUG_ONLY(uint loops = 0;)
 952   while (true) {
 953     // We must do this until we get a consistent view of the object.
 954     if (FreeChunk::indicatesFreeChunk(p)) {
 955       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 956       size_t res = fc->size();
 957 
 958       // Bugfix for systems with weak memory model (PPC64/IA64). The
 959       // free bit of the block was set and we have read the size of
 960       // the block. Acquire and check the free bit again. If the
 961       // block is still free, the read size is correct.
 962       OrderAccess::acquire();
 963 
 964       if (FreeChunk::indicatesFreeChunk(p)) {
 965         assert(res != 0, "Block size should not be 0");
 966         assert(loops == 0, "Should be 0");
 967         return res;
 968       }
 969     } else {
 970       // must read from what 'p' points to in each loop.
 971       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 972       // We trust the size of any object that has a non-NULL
 973       // klass and (for those in the perm gen) is parsable
 974       // -- irrespective of its conc_safe-ty.
 975       if (k != NULL) {
 976         assert(k->is_klass(), "Should really be klass oop.");
 977         oop o = (oop)p;
 978         assert(o->is_oop(), "Should be an oop");
 979 
 980         // Bugfix for systems with weak memory model (PPC64/IA64).
 981         // The object o may be an array. Acquire to make sure that the array
 982         // size (third word) is consistent.
 983         OrderAccess::acquire();
 984 
 985         size_t res = o->size_given_klass(k);
 986         res = adjustObjectSize(res);
 987         assert(res != 0, "Block size should not be 0");
 988         return res;
 989       } else {
 990         // May return 0 if P-bits not present.
 991         return c->block_size_if_printezis_bits(p);
 992       }
 993     }
 994     assert(loops == 0, "Can loop at most once");
 995     DEBUG_ONLY(loops++;)
 996   }
 997 }
 998 
 999 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1000   NOT_PRODUCT(verify_objects_initialized());
1001   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1002   FreeChunk* fc = (FreeChunk*)p;
1003   if (fc->is_free()) {
1004     return fc->size();
1005   } else {
1006     // Ignore mark word because this may be a recently promoted
1007     // object whose mark word is used to chain together grey
1008     // objects (the last one would have a null value).
1009     assert(oop(p)->is_oop(true), "Should be an oop");
1010     return adjustObjectSize(oop(p)->size());
1011   }
1012 }
1013 
1014 // This implementation assumes that the property of "being an object" is
1015 // stable.  But being a free chunk may not be (because of parallel
1016 // promotion.)
1017 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1018   FreeChunk* fc = (FreeChunk*)p;
1019   assert(is_in_reserved(p), "Should be in space");
1020   if (FreeChunk::indicatesFreeChunk(p)) return false;
1021   Klass* k = oop(p)->klass_or_null();
1022   if (k != NULL) {
1023     // Ignore mark word because it may have been used to
1024     // chain together promoted objects (the last one
1025     // would have a null value).
1026     assert(oop(p)->is_oop(true), "Should be an oop");
1027     return true;
1028   } else {
1029     return false;  // Was not an object at the start of collection.
1030   }
1031 }
1032 
1033 // Check if the object is alive. This fact is checked either by consulting
1034 // the main marking bitmap in the sweeping phase or, if it's a permanent
1035 // generation and we're not in the sweeping phase, by checking the
1036 // perm_gen_verify_bit_map where we store the "deadness" information if
1037 // we did not sweep the perm gen in the most recent previous GC cycle.
1038 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1039   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1040          "Else races are possible");
1041   assert(block_is_obj(p), "The address should point to an object");
1042 
1043   // If we're sweeping, we use object liveness information from the main bit map
1044   // for both perm gen and old gen.
1045   // We don't need to lock the bitmap (live_map or dead_map below), because
1046   // EITHER we are in the middle of the sweeping phase, and the
1047   // main marking bit map (live_map below) is locked,
1048   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1049   // is stable, because it's mutated only in the sweeping phase.
1050   // NOTE: This method is also used by jmap where, if class unloading is
1051   // off, the results can return "false" for legitimate perm objects,
1052   // when we are not in the midst of a sweeping phase, which can result
1053   // in jmap not reporting certain perm gen objects. This will be moot
1054   // if/when the perm gen goes away in the future.
1055   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1056     CMSBitMap* live_map = _collector->markBitMap();
1057     return live_map->par_isMarked((HeapWord*) p);
1058   }
1059   return true;
1060 }
1061 
1062 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1063   FreeChunk* fc = (FreeChunk*)p;
1064   assert(is_in_reserved(p), "Should be in space");
1065   assert(_bt.block_start(p) == p, "Should be a block boundary");
1066   if (!fc->is_free()) {
1067     // Ignore mark word because it may have been used to
1068     // chain together promoted objects (the last one
1069     // would have a null value).
1070     assert(oop(p)->is_oop(true), "Should be an oop");
1071     return true;
1072   }
1073   return false;
1074 }
1075 
1076 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1077 // approximate answer if you don't hold the freelistlock when you call this.
1078 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1079   size_t size = 0;
1080   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1081     debug_only(
1082       // We may be calling here without the lock in which case we
1083       // won't do this modest sanity check.
1084       if (freelistLock()->owned_by_self()) {
1085         size_t total_list_size = 0;
1086         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1087           fc = fc->next()) {
1088           total_list_size += i;
1089         }
1090         assert(total_list_size == i * _indexedFreeList[i].count(),
1091                "Count in list is incorrect");
1092       }
1093     )
1094     size += i * _indexedFreeList[i].count();
1095   }
1096   return size;
1097 }
1098 
1099 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1100   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1101   return allocate(size);
1102 }
1103 
1104 HeapWord*
1105 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1106   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1107 }
1108 
1109 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1110   assert_lock_strong(freelistLock());
1111   HeapWord* res = NULL;
1112   assert(size == adjustObjectSize(size),
1113          "use adjustObjectSize() before calling into allocate()");
1114 
1115   res = allocate_adaptive_freelists(size);
1116 
1117   if (res != NULL) {
1118     // check that res does lie in this space!
1119     assert(is_in_reserved(res), "Not in this space!");
1120     assert(is_aligned((void*)res), "alignment check");
1121 
1122     FreeChunk* fc = (FreeChunk*)res;
1123     fc->markNotFree();
1124     assert(!fc->is_free(), "shouldn't be marked free");
1125     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1126     // Verify that the block offset table shows this to
1127     // be a single block, but not one which is unallocated.
1128     _bt.verify_single_block(res, size);
1129     _bt.verify_not_unallocated(res, size);
1130     // mangle a just allocated object with a distinct pattern.
1131     debug_only(fc->mangleAllocated(size));
1132   }
1133 
1134   return res;
1135 }
1136 
1137 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1138   assert_lock_strong(freelistLock());
1139   HeapWord* res = NULL;
1140   assert(size == adjustObjectSize(size),
1141          "use adjustObjectSize() before calling into allocate()");
1142 
1143   // Strategy
1144   //   if small
1145   //     exact size from small object indexed list if small
1146   //     small or large linear allocation block (linAB) as appropriate
1147   //     take from lists of greater sized chunks
1148   //   else
1149   //     dictionary
1150   //     small or large linear allocation block if it has the space
1151   // Try allocating exact size from indexTable first
1152   if (size < IndexSetSize) {
1153     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1154     if(res != NULL) {
1155       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1156         "Not removed from free list");
1157       // no block offset table adjustment is necessary on blocks in
1158       // the indexed lists.
1159 
1160     // Try allocating from the small LinAB
1161     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1162         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1163         // if successful, the above also adjusts block offset table
1164         // Note that this call will refill the LinAB to
1165         // satisfy the request.  This is different that
1166         // evm.
1167         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1168     } else {
1169       // Raid the exact free lists larger than size, even if they are not
1170       // overpopulated.
1171       res = (HeapWord*) getChunkFromGreater(size);
1172     }
1173   } else {
1174     // Big objects get allocated directly from the dictionary.
1175     res = (HeapWord*) getChunkFromDictionaryExact(size);
1176     if (res == NULL) {
1177       // Try hard not to fail since an allocation failure will likely
1178       // trigger a synchronous GC.  Try to get the space from the
1179       // allocation blocks.
1180       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1181     }
1182   }
1183 
1184   return res;
1185 }
1186 
1187 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1188 // when promoting obj.
1189 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1190   // Depending on the object size, expansion may require refilling either a
1191   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1192   // is added because the dictionary may over-allocate to avoid fragmentation.
1193   size_t space = obj_size;
1194   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1195   return space;
1196 }
1197 
1198 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1199   FreeChunk* ret;
1200 
1201   assert(numWords >= MinChunkSize, "Size is less than minimum");
1202   assert(linearAllocationWouldFail() || bestFitFirst(),
1203     "Should not be here");
1204 
1205   size_t i;
1206   size_t currSize = numWords + MinChunkSize;
1207   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1208   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1209     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1210     if (fl->head()) {
1211       ret = getFromListGreater(fl, numWords);
1212       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1213       return ret;
1214     }
1215   }
1216 
1217   currSize = MAX2((size_t)SmallForDictionary,
1218                   (size_t)(numWords + MinChunkSize));
1219 
1220   /* Try to get a chunk that satisfies request, while avoiding
1221      fragmentation that can't be handled. */
1222   {
1223     ret =  dictionary()->get_chunk(currSize);
1224     if (ret != NULL) {
1225       assert(ret->size() - numWords >= MinChunkSize,
1226              "Chunk is too small");
1227       _bt.allocated((HeapWord*)ret, ret->size());
1228       /* Carve returned chunk. */
1229       (void) splitChunkAndReturnRemainder(ret, numWords);
1230       /* Label this as no longer a free chunk. */
1231       assert(ret->is_free(), "This chunk should be free");
1232       ret->link_prev(NULL);
1233     }
1234     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1235     return ret;
1236   }
1237   ShouldNotReachHere();
1238 }
1239 
1240 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1241   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1242   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1243 }
1244 
1245 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1246   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1247          (_smallLinearAllocBlock._word_size == fc->size()),
1248          "Linear allocation block shows incorrect size");
1249   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1250           (_smallLinearAllocBlock._word_size == fc->size()));
1251 }
1252 
1253 // Check if the purported free chunk is present either as a linear
1254 // allocation block, the size-indexed table of (smaller) free blocks,
1255 // or the larger free blocks kept in the binary tree dictionary.
1256 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1257   if (verify_chunk_is_linear_alloc_block(fc)) {
1258     return true;
1259   } else if (fc->size() < IndexSetSize) {
1260     return verifyChunkInIndexedFreeLists(fc);
1261   } else {
1262     return dictionary()->verify_chunk_in_free_list(fc);
1263   }
1264 }
1265 
1266 #ifndef PRODUCT
1267 void CompactibleFreeListSpace::assert_locked() const {
1268   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1269 }
1270 
1271 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1272   CMSLockVerifier::assert_locked(lock);
1273 }
1274 #endif
1275 
1276 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1277   // In the parallel case, the main thread holds the free list lock
1278   // on behalf the parallel threads.
1279   FreeChunk* fc;
1280   {
1281     // If GC is parallel, this might be called by several threads.
1282     // This should be rare enough that the locking overhead won't affect
1283     // the sequential code.
1284     MutexLockerEx x(parDictionaryAllocLock(),
1285                     Mutex::_no_safepoint_check_flag);
1286     fc = getChunkFromDictionary(size);
1287   }
1288   if (fc != NULL) {
1289     fc->dontCoalesce();
1290     assert(fc->is_free(), "Should be free, but not coalescable");
1291     // Verify that the block offset table shows this to
1292     // be a single block, but not one which is unallocated.
1293     _bt.verify_single_block((HeapWord*)fc, fc->size());
1294     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1295   }
1296   return fc;
1297 }
1298 
1299 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1300   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1301   assert_locked();
1302 
1303   // if we are tracking promotions, then first ensure space for
1304   // promotion (including spooling space for saving header if necessary).
1305   // then allocate and copy, then track promoted info if needed.
1306   // When tracking (see PromotionInfo::track()), the mark word may
1307   // be displaced and in this case restoration of the mark word
1308   // occurs in the (oop_since_save_marks_)iterate phase.
1309   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1310     return NULL;
1311   }
1312   // Call the allocate(size_t, bool) form directly to avoid the
1313   // additional call through the allocate(size_t) form.  Having
1314   // the compile inline the call is problematic because allocate(size_t)
1315   // is a virtual method.
1316   HeapWord* res = allocate(adjustObjectSize(obj_size));
1317   if (res != NULL) {
1318     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1319     // if we should be tracking promotions, do so.
1320     if (_promoInfo.tracking()) {
1321         _promoInfo.track((PromotedObject*)res);
1322     }
1323   }
1324   return oop(res);
1325 }
1326 
1327 HeapWord*
1328 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1329   assert_locked();
1330   assert(size >= MinChunkSize, "minimum chunk size");
1331   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1332     "maximum from smallLinearAllocBlock");
1333   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1334 }
1335 
1336 HeapWord*
1337 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1338                                                        size_t size) {
1339   assert_locked();
1340   assert(size >= MinChunkSize, "too small");
1341   HeapWord* res = NULL;
1342   // Try to do linear allocation from blk, making sure that
1343   if (blk->_word_size == 0) {
1344     // We have probably been unable to fill this either in the prologue or
1345     // when it was exhausted at the last linear allocation. Bail out until
1346     // next time.
1347     assert(blk->_ptr == NULL, "consistency check");
1348     return NULL;
1349   }
1350   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1351   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1352   if (res != NULL) return res;
1353 
1354   // about to exhaust this linear allocation block
1355   if (blk->_word_size == size) { // exactly satisfied
1356     res = blk->_ptr;
1357     _bt.allocated(res, blk->_word_size);
1358   } else if (size + MinChunkSize <= blk->_refillSize) {
1359     size_t sz = blk->_word_size;
1360     // Update _unallocated_block if the size is such that chunk would be
1361     // returned to the indexed free list.  All other chunks in the indexed
1362     // free lists are allocated from the dictionary so that _unallocated_block
1363     // has already been adjusted for them.  Do it here so that the cost
1364     // for all chunks added back to the indexed free lists.
1365     if (sz < SmallForDictionary) {
1366       _bt.allocated(blk->_ptr, sz);
1367     }
1368     // Return the chunk that isn't big enough, and then refill below.
1369     addChunkToFreeLists(blk->_ptr, sz);
1370     split_birth(sz);
1371     // Don't keep statistics on adding back chunk from a LinAB.
1372   } else {
1373     // A refilled block would not satisfy the request.
1374     return NULL;
1375   }
1376 
1377   blk->_ptr = NULL; blk->_word_size = 0;
1378   refillLinearAllocBlock(blk);
1379   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1380          "block was replenished");
1381   if (res != NULL) {
1382     split_birth(size);
1383     repairLinearAllocBlock(blk);
1384   } else if (blk->_ptr != NULL) {
1385     res = blk->_ptr;
1386     size_t blk_size = blk->_word_size;
1387     blk->_word_size -= size;
1388     blk->_ptr  += size;
1389     split_birth(size);
1390     repairLinearAllocBlock(blk);
1391     // Update BOT last so that other (parallel) GC threads see a consistent
1392     // view of the BOT and free blocks.
1393     // Above must occur before BOT is updated below.
1394     OrderAccess::storestore();
1395     _bt.split_block(res, blk_size, size);  // adjust block offset table
1396   }
1397   return res;
1398 }
1399 
1400 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1401                                         LinearAllocBlock* blk,
1402                                         size_t size) {
1403   assert_locked();
1404   assert(size >= MinChunkSize, "too small");
1405 
1406   HeapWord* res = NULL;
1407   // This is the common case.  Keep it simple.
1408   if (blk->_word_size >= size + MinChunkSize) {
1409     assert(blk->_ptr != NULL, "consistency check");
1410     res = blk->_ptr;
1411     // Note that the BOT is up-to-date for the linAB before allocation.  It
1412     // indicates the start of the linAB.  The split_block() updates the
1413     // BOT for the linAB after the allocation (indicates the start of the
1414     // next chunk to be allocated).
1415     size_t blk_size = blk->_word_size;
1416     blk->_word_size -= size;
1417     blk->_ptr  += size;
1418     split_birth(size);
1419     repairLinearAllocBlock(blk);
1420     // Update BOT last so that other (parallel) GC threads see a consistent
1421     // view of the BOT and free blocks.
1422     // Above must occur before BOT is updated below.
1423     OrderAccess::storestore();
1424     _bt.split_block(res, blk_size, size);  // adjust block offset table
1425     _bt.allocated(res, size);
1426   }
1427   return res;
1428 }
1429 
1430 FreeChunk*
1431 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1432   assert_locked();
1433   assert(size < SmallForDictionary, "just checking");
1434   FreeChunk* res;
1435   res = _indexedFreeList[size].get_chunk_at_head();
1436   if (res == NULL) {
1437     res = getChunkFromIndexedFreeListHelper(size);
1438   }
1439   _bt.verify_not_unallocated((HeapWord*) res, size);
1440   assert(res == NULL || res->size() == size, "Incorrect block size");
1441   return res;
1442 }
1443 
1444 FreeChunk*
1445 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1446   bool replenish) {
1447   assert_locked();
1448   FreeChunk* fc = NULL;
1449   if (size < SmallForDictionary) {
1450     assert(_indexedFreeList[size].head() == NULL ||
1451       _indexedFreeList[size].surplus() <= 0,
1452       "List for this size should be empty or under populated");
1453     // Try best fit in exact lists before replenishing the list
1454     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1455       // Replenish list.
1456       //
1457       // Things tried that failed.
1458       //   Tried allocating out of the two LinAB's first before
1459       // replenishing lists.
1460       //   Tried small linAB of size 256 (size in indexed list)
1461       // and replenishing indexed lists from the small linAB.
1462       //
1463       FreeChunk* newFc = NULL;
1464       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1465       if (replenish_size < SmallForDictionary) {
1466         // Do not replenish from an underpopulated size.
1467         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1468             _indexedFreeList[replenish_size].head() != NULL) {
1469           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1470         } else if (bestFitFirst()) {
1471           newFc = bestFitSmall(replenish_size);
1472         }
1473       }
1474       if (newFc == NULL && replenish_size > size) {
1475         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1476         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1477       }
1478       // Note: The stats update re split-death of block obtained above
1479       // will be recorded below precisely when we know we are going to
1480       // be actually splitting it into more than one pieces below.
1481       if (newFc != NULL) {
1482         if  (replenish || CMSReplenishIntermediate) {
1483           // Replenish this list and return one block to caller.
1484           size_t i;
1485           FreeChunk *curFc, *nextFc;
1486           size_t num_blk = newFc->size() / size;
1487           assert(num_blk >= 1, "Smaller than requested?");
1488           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1489           if (num_blk > 1) {
1490             // we are sure we will be splitting the block just obtained
1491             // into multiple pieces; record the split-death of the original
1492             splitDeath(replenish_size);
1493           }
1494           // carve up and link blocks 0, ..., num_blk - 2
1495           // The last chunk is not added to the lists but is returned as the
1496           // free chunk.
1497           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1498                i = 0;
1499                i < (num_blk - 1);
1500                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1501                i++) {
1502             curFc->set_size(size);
1503             // Don't record this as a return in order to try and
1504             // determine the "returns" from a GC.
1505             _bt.verify_not_unallocated((HeapWord*) fc, size);
1506             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1507             _bt.mark_block((HeapWord*)curFc, size);
1508             split_birth(size);
1509             // Don't record the initial population of the indexed list
1510             // as a split birth.
1511           }
1512 
1513           // check that the arithmetic was OK above
1514           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1515             "inconsistency in carving newFc");
1516           curFc->set_size(size);
1517           _bt.mark_block((HeapWord*)curFc, size);
1518           split_birth(size);
1519           fc = curFc;
1520         } else {
1521           // Return entire block to caller
1522           fc = newFc;
1523         }
1524       }
1525     }
1526   } else {
1527     // Get a free chunk from the free chunk dictionary to be returned to
1528     // replenish the indexed free list.
1529     fc = getChunkFromDictionaryExact(size);
1530   }
1531   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1532   return fc;
1533 }
1534 
1535 FreeChunk*
1536 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1537   assert_locked();
1538   FreeChunk* fc = _dictionary->get_chunk(size,
1539                                          FreeBlockDictionary<FreeChunk>::atLeast);
1540   if (fc == NULL) {
1541     return NULL;
1542   }
1543   _bt.allocated((HeapWord*)fc, fc->size());
1544   if (fc->size() >= size + MinChunkSize) {
1545     fc = splitChunkAndReturnRemainder(fc, size);
1546   }
1547   assert(fc->size() >= size, "chunk too small");
1548   assert(fc->size() < size + MinChunkSize, "chunk too big");
1549   _bt.verify_single_block((HeapWord*)fc, fc->size());
1550   return fc;
1551 }
1552 
1553 FreeChunk*
1554 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1555   assert_locked();
1556   FreeChunk* fc = _dictionary->get_chunk(size,
1557                                          FreeBlockDictionary<FreeChunk>::atLeast);
1558   if (fc == NULL) {
1559     return fc;
1560   }
1561   _bt.allocated((HeapWord*)fc, fc->size());
1562   if (fc->size() == size) {
1563     _bt.verify_single_block((HeapWord*)fc, size);
1564     return fc;
1565   }
1566   assert(fc->size() > size, "get_chunk() guarantee");
1567   if (fc->size() < size + MinChunkSize) {
1568     // Return the chunk to the dictionary and go get a bigger one.
1569     returnChunkToDictionary(fc);
1570     fc = _dictionary->get_chunk(size + MinChunkSize,
1571                                 FreeBlockDictionary<FreeChunk>::atLeast);
1572     if (fc == NULL) {
1573       return NULL;
1574     }
1575     _bt.allocated((HeapWord*)fc, fc->size());
1576   }
1577   assert(fc->size() >= size + MinChunkSize, "tautology");
1578   fc = splitChunkAndReturnRemainder(fc, size);
1579   assert(fc->size() == size, "chunk is wrong size");
1580   _bt.verify_single_block((HeapWord*)fc, size);
1581   return fc;
1582 }
1583 
1584 void
1585 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1586   assert_locked();
1587 
1588   size_t size = chunk->size();
1589   _bt.verify_single_block((HeapWord*)chunk, size);
1590   // adjust _unallocated_block downward, as necessary
1591   _bt.freed((HeapWord*)chunk, size);
1592   _dictionary->return_chunk(chunk);
1593 #ifndef PRODUCT
1594   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1595     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1596     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1597     tl->verify_stats();
1598   }
1599 #endif // PRODUCT
1600 }
1601 
1602 void
1603 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1604   assert_locked();
1605   size_t size = fc->size();
1606   _bt.verify_single_block((HeapWord*) fc, size);
1607   _bt.verify_not_unallocated((HeapWord*) fc, size);
1608   _indexedFreeList[size].return_chunk_at_tail(fc);
1609 #ifndef PRODUCT
1610   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1611      _indexedFreeList[size].verify_stats();
1612   }
1613 #endif // PRODUCT
1614 }
1615 
1616 // Add chunk to end of last block -- if it's the largest
1617 // block -- and update BOT and census data. We would
1618 // of course have preferred to coalesce it with the
1619 // last block, but it's currently less expensive to find the
1620 // largest block than it is to find the last.
1621 void
1622 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1623   HeapWord* chunk, size_t     size) {
1624   // check that the chunk does lie in this space!
1625   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1626   // One of the parallel gc task threads may be here
1627   // whilst others are allocating.
1628   Mutex* lock = &_parDictionaryAllocLock;
1629   FreeChunk* ec;
1630   {
1631     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1632     ec = dictionary()->find_largest_dict();  // get largest block
1633     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1634       // It's a coterminal block - we can coalesce.
1635       size_t old_size = ec->size();
1636       coalDeath(old_size);
1637       removeChunkFromDictionary(ec);
1638       size += old_size;
1639     } else {
1640       ec = (FreeChunk*)chunk;
1641     }
1642   }
1643   ec->set_size(size);
1644   debug_only(ec->mangleFreed(size));
1645   if (size < SmallForDictionary) {
1646     lock = _indexedFreeListParLocks[size];
1647   }
1648   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1649   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1650   // record the birth under the lock since the recording involves
1651   // manipulation of the list on which the chunk lives and
1652   // if the chunk is allocated and is the last on the list,
1653   // the list can go away.
1654   coalBirth(size);
1655 }
1656 
1657 void
1658 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1659                                               size_t     size) {
1660   // check that the chunk does lie in this space!
1661   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1662   assert_locked();
1663   _bt.verify_single_block(chunk, size);
1664 
1665   FreeChunk* fc = (FreeChunk*) chunk;
1666   fc->set_size(size);
1667   debug_only(fc->mangleFreed(size));
1668   if (size < SmallForDictionary) {
1669     returnChunkToFreeList(fc);
1670   } else {
1671     returnChunkToDictionary(fc);
1672   }
1673 }
1674 
1675 void
1676 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1677   size_t size, bool coalesced) {
1678   assert_locked();
1679   assert(chunk != NULL, "null chunk");
1680   if (coalesced) {
1681     // repair BOT
1682     _bt.single_block(chunk, size);
1683   }
1684   addChunkToFreeLists(chunk, size);
1685 }
1686 
1687 // We _must_ find the purported chunk on our free lists;
1688 // we assert if we don't.
1689 void
1690 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1691   size_t size = fc->size();
1692   assert_locked();
1693   debug_only(verifyFreeLists());
1694   if (size < SmallForDictionary) {
1695     removeChunkFromIndexedFreeList(fc);
1696   } else {
1697     removeChunkFromDictionary(fc);
1698   }
1699   _bt.verify_single_block((HeapWord*)fc, size);
1700   debug_only(verifyFreeLists());
1701 }
1702 
1703 void
1704 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1705   size_t size = fc->size();
1706   assert_locked();
1707   assert(fc != NULL, "null chunk");
1708   _bt.verify_single_block((HeapWord*)fc, size);
1709   _dictionary->remove_chunk(fc);
1710   // adjust _unallocated_block upward, as necessary
1711   _bt.allocated((HeapWord*)fc, size);
1712 }
1713 
1714 void
1715 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1716   assert_locked();
1717   size_t size = fc->size();
1718   _bt.verify_single_block((HeapWord*)fc, size);
1719   NOT_PRODUCT(
1720     if (FLSVerifyIndexTable) {
1721       verifyIndexedFreeList(size);
1722     }
1723   )
1724   _indexedFreeList[size].remove_chunk(fc);
1725   NOT_PRODUCT(
1726     if (FLSVerifyIndexTable) {
1727       verifyIndexedFreeList(size);
1728     }
1729   )
1730 }
1731 
1732 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1733   /* A hint is the next larger size that has a surplus.
1734      Start search at a size large enough to guarantee that
1735      the excess is >= MIN_CHUNK. */
1736   size_t start = align_object_size(numWords + MinChunkSize);
1737   if (start < IndexSetSize) {
1738     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1739     size_t    hint = _indexedFreeList[start].hint();
1740     while (hint < IndexSetSize) {
1741       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1742       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1743       if (fl->surplus() > 0 && fl->head() != NULL) {
1744         // Found a list with surplus, reset original hint
1745         // and split out a free chunk which is returned.
1746         _indexedFreeList[start].set_hint(hint);
1747         FreeChunk* res = getFromListGreater(fl, numWords);
1748         assert(res == NULL || res->is_free(),
1749           "Should be returning a free chunk");
1750         return res;
1751       }
1752       hint = fl->hint(); /* keep looking */
1753     }
1754     /* None found. */
1755     it[start].set_hint(IndexSetSize);
1756   }
1757   return NULL;
1758 }
1759 
1760 /* Requires fl->size >= numWords + MinChunkSize */
1761 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1762   size_t numWords) {
1763   FreeChunk *curr = fl->head();
1764   size_t oldNumWords = curr->size();
1765   assert(numWords >= MinChunkSize, "Word size is too small");
1766   assert(curr != NULL, "List is empty");
1767   assert(oldNumWords >= numWords + MinChunkSize,
1768         "Size of chunks in the list is too small");
1769 
1770   fl->remove_chunk(curr);
1771   // recorded indirectly by splitChunkAndReturnRemainder -
1772   // smallSplit(oldNumWords, numWords);
1773   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1774   // Does anything have to be done for the remainder in terms of
1775   // fixing the card table?
1776   assert(new_chunk == NULL || new_chunk->is_free(),
1777     "Should be returning a free chunk");
1778   return new_chunk;
1779 }
1780 
1781 FreeChunk*
1782 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1783   size_t new_size) {
1784   assert_locked();
1785   size_t size = chunk->size();
1786   assert(size > new_size, "Split from a smaller block?");
1787   assert(is_aligned(chunk), "alignment problem");
1788   assert(size == adjustObjectSize(size), "alignment problem");
1789   size_t rem_sz = size - new_size;
1790   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1791   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1792   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1793   assert(is_aligned(ffc), "alignment problem");
1794   ffc->set_size(rem_sz);
1795   ffc->link_next(NULL);
1796   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1797   // Above must occur before BOT is updated below.
1798   // adjust block offset table
1799   OrderAccess::storestore();
1800   assert(chunk->is_free() && ffc->is_free(), "Error");
1801   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1802   if (rem_sz < SmallForDictionary) {
1803     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1804     bool is_par = Thread::current()->is_GC_task_thread();
1805     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1806     returnChunkToFreeList(ffc);
1807     split(size, rem_sz);
1808     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1809   } else {
1810     returnChunkToDictionary(ffc);
1811     split(size, rem_sz);
1812   }
1813   chunk->set_size(new_size);
1814   return chunk;
1815 }
1816 
1817 void
1818 CompactibleFreeListSpace::sweep_completed() {
1819   // Now that space is probably plentiful, refill linear
1820   // allocation blocks as needed.
1821   refillLinearAllocBlocksIfNeeded();
1822 }
1823 
1824 void
1825 CompactibleFreeListSpace::gc_prologue() {
1826   assert_locked();
1827   if (PrintFLSStatistics != 0) {
1828     gclog_or_tty->print("Before GC:\n");
1829     reportFreeListStatistics();
1830   }
1831   refillLinearAllocBlocksIfNeeded();
1832 }
1833 
1834 void
1835 CompactibleFreeListSpace::gc_epilogue() {
1836   assert_locked();
1837   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1838   _promoInfo.stopTrackingPromotions();
1839   repairLinearAllocationBlocks();
1840   // Print Space's stats
1841   if (PrintFLSStatistics != 0) {
1842     gclog_or_tty->print("After GC:\n");
1843     reportFreeListStatistics();
1844   }
1845 }
1846 
1847 // Iteration support, mostly delegated from a CMS generation
1848 
1849 void CompactibleFreeListSpace::save_marks() {
1850   assert(Thread::current()->is_VM_thread(),
1851          "Global variable should only be set when single-threaded");
1852   // Mark the "end" of the used space at the time of this call;
1853   // note, however, that promoted objects from this point
1854   // on are tracked in the _promoInfo below.
1855   set_saved_mark_word(unallocated_block());
1856 #ifdef ASSERT
1857   // Check the sanity of save_marks() etc.
1858   MemRegion ur    = used_region();
1859   MemRegion urasm = used_region_at_save_marks();
1860   assert(ur.contains(urasm),
1861          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1862          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1863          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1864 #endif
1865   // inform allocator that promotions should be tracked.
1866   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1867   _promoInfo.startTrackingPromotions();
1868 }
1869 
1870 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1871   assert(_promoInfo.tracking(), "No preceding save_marks?");
1872   return _promoInfo.noPromotions();
1873 }
1874 
1875 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1876                                                                             \
1877 void CompactibleFreeListSpace::                                             \
1878 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1879   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1880   /*                                                                        \
1881    * This also restores any displaced headers and removes the elements from \
1882    * the iteration set as they are processed, so that we have a clean slate \
1883    * at the end of the iteration. Note, thus, that if new objects are       \
1884    * promoted as a result of the iteration they are iterated over as well.  \
1885    */                                                                       \
1886   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1887 }
1888 
1889 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1890 
1891 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1892   return _smallLinearAllocBlock._word_size == 0;
1893 }
1894 
1895 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1896   // Fix up linear allocation blocks to look like free blocks
1897   repairLinearAllocBlock(&_smallLinearAllocBlock);
1898 }
1899 
1900 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1901   assert_locked();
1902   if (blk->_ptr != NULL) {
1903     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1904            "Minimum block size requirement");
1905     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1906     fc->set_size(blk->_word_size);
1907     fc->link_prev(NULL);   // mark as free
1908     fc->dontCoalesce();
1909     assert(fc->is_free(), "just marked it free");
1910     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1911   }
1912 }
1913 
1914 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1915   assert_locked();
1916   if (_smallLinearAllocBlock._ptr == NULL) {
1917     assert(_smallLinearAllocBlock._word_size == 0,
1918       "Size of linAB should be zero if the ptr is NULL");
1919     // Reset the linAB refill and allocation size limit.
1920     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1921   }
1922   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1923 }
1924 
1925 void
1926 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1927   assert_locked();
1928   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1929          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1930          "blk invariant");
1931   if (blk->_ptr == NULL) {
1932     refillLinearAllocBlock(blk);
1933   }
1934   if (PrintMiscellaneous && Verbose) {
1935     if (blk->_word_size == 0) {
1936       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1937     }
1938   }
1939 }
1940 
1941 void
1942 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1943   assert_locked();
1944   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1945          "linear allocation block should be empty");
1946   FreeChunk* fc;
1947   if (blk->_refillSize < SmallForDictionary &&
1948       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1949     // A linAB's strategy might be to use small sizes to reduce
1950     // fragmentation but still get the benefits of allocation from a
1951     // linAB.
1952   } else {
1953     fc = getChunkFromDictionary(blk->_refillSize);
1954   }
1955   if (fc != NULL) {
1956     blk->_ptr  = (HeapWord*)fc;
1957     blk->_word_size = fc->size();
1958     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1959   }
1960 }
1961 
1962 // Support for compaction
1963 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1964   scan_and_forward(this, cp);
1965   // Prepare_for_compaction() uses the space between live objects
1966   // so that later phase can skip dead space quickly.  So verification
1967   // of the free lists doesn't work after.
1968 }
1969 
1970 void CompactibleFreeListSpace::adjust_pointers() {
1971   // In other versions of adjust_pointers(), a bail out
1972   // based on the amount of live data in the generation
1973   // (i.e., if 0, bail out) may be used.
1974   // Cannot test used() == 0 here because the free lists have already
1975   // been mangled by the compaction.
1976 
1977   scan_and_adjust_pointers(this);
1978   // See note about verification in prepare_for_compaction().
1979 }
1980 
1981 void CompactibleFreeListSpace::compact() {
1982   scan_and_compact(this);
1983 }
1984 
1985 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1986 // where fbs is free block sizes
1987 double CompactibleFreeListSpace::flsFrag() const {
1988   size_t itabFree = totalSizeInIndexedFreeLists();
1989   double frag = 0.0;
1990   size_t i;
1991 
1992   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1993     double sz  = i;
1994     frag      += _indexedFreeList[i].count() * (sz * sz);
1995   }
1996 
1997   double totFree = itabFree +
1998                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1999   if (totFree > 0) {
2000     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2001             (totFree * totFree));
2002     frag = (double)1.0  - frag;
2003   } else {
2004     assert(frag == 0.0, "Follows from totFree == 0");
2005   }
2006   return frag;
2007 }
2008 
2009 void CompactibleFreeListSpace::beginSweepFLCensus(
2010   float inter_sweep_current,
2011   float inter_sweep_estimate,
2012   float intra_sweep_estimate) {
2013   assert_locked();
2014   size_t i;
2015   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2016     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2017     if (PrintFLSStatistics > 1) {
2018       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2019     }
2020     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2021     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2022     fl->set_before_sweep(fl->count());
2023     fl->set_bfr_surp(fl->surplus());
2024   }
2025   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2026                                     inter_sweep_current,
2027                                     inter_sweep_estimate,
2028                                     intra_sweep_estimate);
2029 }
2030 
2031 void CompactibleFreeListSpace::setFLSurplus() {
2032   assert_locked();
2033   size_t i;
2034   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2035     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2036     fl->set_surplus(fl->count() -
2037                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2038   }
2039 }
2040 
2041 void CompactibleFreeListSpace::setFLHints() {
2042   assert_locked();
2043   size_t i;
2044   size_t h = IndexSetSize;
2045   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2046     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2047     fl->set_hint(h);
2048     if (fl->surplus() > 0) {
2049       h = i;
2050     }
2051   }
2052 }
2053 
2054 void CompactibleFreeListSpace::clearFLCensus() {
2055   assert_locked();
2056   size_t i;
2057   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2058     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2059     fl->set_prev_sweep(fl->count());
2060     fl->set_coal_births(0);
2061     fl->set_coal_deaths(0);
2062     fl->set_split_births(0);
2063     fl->set_split_deaths(0);
2064   }
2065 }
2066 
2067 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2068   if (PrintFLSStatistics > 0) {
2069     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2070     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2071                            p2i(largestAddr));
2072   }
2073   setFLSurplus();
2074   setFLHints();
2075   if (PrintGC && PrintFLSCensus > 0) {
2076     printFLCensus(sweep_count);
2077   }
2078   clearFLCensus();
2079   assert_locked();
2080   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2081 }
2082 
2083 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2084   if (size < SmallForDictionary) {
2085     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2086     return (fl->coal_desired() < 0) ||
2087            ((int)fl->count() > fl->coal_desired());
2088   } else {
2089     return dictionary()->coal_dict_over_populated(size);
2090   }
2091 }
2092 
2093 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2094   assert(size < SmallForDictionary, "Size too large for indexed list");
2095   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2096   fl->increment_coal_births();
2097   fl->increment_surplus();
2098 }
2099 
2100 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2101   assert(size < SmallForDictionary, "Size too large for indexed list");
2102   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2103   fl->increment_coal_deaths();
2104   fl->decrement_surplus();
2105 }
2106 
2107 void CompactibleFreeListSpace::coalBirth(size_t size) {
2108   if (size  < SmallForDictionary) {
2109     smallCoalBirth(size);
2110   } else {
2111     dictionary()->dict_census_update(size,
2112                                    false /* split */,
2113                                    true /* birth */);
2114   }
2115 }
2116 
2117 void CompactibleFreeListSpace::coalDeath(size_t size) {
2118   if(size  < SmallForDictionary) {
2119     smallCoalDeath(size);
2120   } else {
2121     dictionary()->dict_census_update(size,
2122                                    false /* split */,
2123                                    false /* birth */);
2124   }
2125 }
2126 
2127 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2128   assert(size < SmallForDictionary, "Size too large for indexed list");
2129   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2130   fl->increment_split_births();
2131   fl->increment_surplus();
2132 }
2133 
2134 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2135   assert(size < SmallForDictionary, "Size too large for indexed list");
2136   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2137   fl->increment_split_deaths();
2138   fl->decrement_surplus();
2139 }
2140 
2141 void CompactibleFreeListSpace::split_birth(size_t size) {
2142   if (size  < SmallForDictionary) {
2143     smallSplitBirth(size);
2144   } else {
2145     dictionary()->dict_census_update(size,
2146                                    true /* split */,
2147                                    true /* birth */);
2148   }
2149 }
2150 
2151 void CompactibleFreeListSpace::splitDeath(size_t size) {
2152   if (size  < SmallForDictionary) {
2153     smallSplitDeath(size);
2154   } else {
2155     dictionary()->dict_census_update(size,
2156                                    true /* split */,
2157                                    false /* birth */);
2158   }
2159 }
2160 
2161 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2162   size_t to2 = from - to1;
2163   splitDeath(from);
2164   split_birth(to1);
2165   split_birth(to2);
2166 }
2167 
2168 void CompactibleFreeListSpace::print() const {
2169   print_on(tty);
2170 }
2171 
2172 void CompactibleFreeListSpace::prepare_for_verify() {
2173   assert_locked();
2174   repairLinearAllocationBlocks();
2175   // Verify that the SpoolBlocks look like free blocks of
2176   // appropriate sizes... To be done ...
2177 }
2178 
2179 class VerifyAllBlksClosure: public BlkClosure {
2180  private:
2181   const CompactibleFreeListSpace* _sp;
2182   const MemRegion                 _span;
2183   HeapWord*                       _last_addr;
2184   size_t                          _last_size;
2185   bool                            _last_was_obj;
2186   bool                            _last_was_live;
2187 
2188  public:
2189   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2190     MemRegion span) :  _sp(sp), _span(span),
2191                        _last_addr(NULL), _last_size(0),
2192                        _last_was_obj(false), _last_was_live(false) { }
2193 
2194   virtual size_t do_blk(HeapWord* addr) {
2195     size_t res;
2196     bool   was_obj  = false;
2197     bool   was_live = false;
2198     if (_sp->block_is_obj(addr)) {
2199       was_obj = true;
2200       oop p = oop(addr);
2201       guarantee(p->is_oop(), "Should be an oop");
2202       res = _sp->adjustObjectSize(p->size());
2203       if (_sp->obj_is_alive(addr)) {
2204         was_live = true;
2205         p->verify();
2206       }
2207     } else {
2208       FreeChunk* fc = (FreeChunk*)addr;
2209       res = fc->size();
2210       if (FLSVerifyLists && !fc->cantCoalesce()) {
2211         guarantee(_sp->verify_chunk_in_free_list(fc),
2212                   "Chunk should be on a free list");
2213       }
2214     }
2215     if (res == 0) {
2216       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2217       gclog_or_tty->print_cr(
2218         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2219         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2220         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2221         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2222       _sp->print_on(gclog_or_tty);
2223       guarantee(false, "Seppuku!");
2224     }
2225     _last_addr = addr;
2226     _last_size = res;
2227     _last_was_obj  = was_obj;
2228     _last_was_live = was_live;
2229     return res;
2230   }
2231 };
2232 
2233 class VerifyAllOopsClosure: public OopClosure {
2234  private:
2235   const CMSCollector*             _collector;
2236   const CompactibleFreeListSpace* _sp;
2237   const MemRegion                 _span;
2238   const bool                      _past_remark;
2239   const CMSBitMap*                _bit_map;
2240 
2241  protected:
2242   void do_oop(void* p, oop obj) {
2243     if (_span.contains(obj)) { // the interior oop points into CMS heap
2244       if (!_span.contains(p)) { // reference from outside CMS heap
2245         // Should be a valid object; the first disjunct below allows
2246         // us to sidestep an assertion in block_is_obj() that insists
2247         // that p be in _sp. Note that several generations (and spaces)
2248         // are spanned by _span (CMS heap) above.
2249         guarantee(!_sp->is_in_reserved(obj) ||
2250                   _sp->block_is_obj((HeapWord*)obj),
2251                   "Should be an object");
2252         guarantee(obj->is_oop(), "Should be an oop");
2253         obj->verify();
2254         if (_past_remark) {
2255           // Remark has been completed, the object should be marked
2256           _bit_map->isMarked((HeapWord*)obj);
2257         }
2258       } else { // reference within CMS heap
2259         if (_past_remark) {
2260           // Remark has been completed -- so the referent should have
2261           // been marked, if referring object is.
2262           if (_bit_map->isMarked(_collector->block_start(p))) {
2263             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2264           }
2265         }
2266       }
2267     } else if (_sp->is_in_reserved(p)) {
2268       // the reference is from FLS, and points out of FLS
2269       guarantee(obj->is_oop(), "Should be an oop");
2270       obj->verify();
2271     }
2272   }
2273 
2274   template <class T> void do_oop_work(T* p) {
2275     T heap_oop = oopDesc::load_heap_oop(p);
2276     if (!oopDesc::is_null(heap_oop)) {
2277       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2278       do_oop(p, obj);
2279     }
2280   }
2281 
2282  public:
2283   VerifyAllOopsClosure(const CMSCollector* collector,
2284     const CompactibleFreeListSpace* sp, MemRegion span,
2285     bool past_remark, CMSBitMap* bit_map) :
2286     _collector(collector), _sp(sp), _span(span),
2287     _past_remark(past_remark), _bit_map(bit_map) { }
2288 
2289   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2290   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2291 };
2292 
2293 void CompactibleFreeListSpace::verify() const {
2294   assert_lock_strong(&_freelistLock);
2295   verify_objects_initialized();
2296   MemRegion span = _collector->_span;
2297   bool past_remark = (_collector->abstract_state() ==
2298                       CMSCollector::Sweeping);
2299 
2300   ResourceMark rm;
2301   HandleMark  hm;
2302 
2303   // Check integrity of CFL data structures
2304   _promoInfo.verify();
2305   _dictionary->verify();
2306   if (FLSVerifyIndexTable) {
2307     verifyIndexedFreeLists();
2308   }
2309   // Check integrity of all objects and free blocks in space
2310   {
2311     VerifyAllBlksClosure cl(this, span);
2312     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2313   }
2314   // Check that all references in the heap to FLS
2315   // are to valid objects in FLS or that references in
2316   // FLS are to valid objects elsewhere in the heap
2317   if (FLSVerifyAllHeapReferences)
2318   {
2319     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2320       _collector->markBitMap());
2321 
2322     // Iterate over all oops in the heap. Uses the _no_header version
2323     // since we are not interested in following the klass pointers.
2324     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2325   }
2326 
2327   if (VerifyObjectStartArray) {
2328     // Verify the block offset table
2329     _bt.verify();
2330   }
2331 }
2332 
2333 #ifndef PRODUCT
2334 void CompactibleFreeListSpace::verifyFreeLists() const {
2335   if (FLSVerifyLists) {
2336     _dictionary->verify();
2337     verifyIndexedFreeLists();
2338   } else {
2339     if (FLSVerifyDictionary) {
2340       _dictionary->verify();
2341     }
2342     if (FLSVerifyIndexTable) {
2343       verifyIndexedFreeLists();
2344     }
2345   }
2346 }
2347 #endif
2348 
2349 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2350   size_t i = 0;
2351   for (; i < IndexSetStart; i++) {
2352     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2353   }
2354   for (; i < IndexSetSize; i++) {
2355     verifyIndexedFreeList(i);
2356   }
2357 }
2358 
2359 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2360   FreeChunk* fc   =  _indexedFreeList[size].head();
2361   FreeChunk* tail =  _indexedFreeList[size].tail();
2362   size_t    num = _indexedFreeList[size].count();
2363   size_t      n = 0;
2364   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2365             "Slot should have been empty");
2366   for (; fc != NULL; fc = fc->next(), n++) {
2367     guarantee(fc->size() == size, "Size inconsistency");
2368     guarantee(fc->is_free(), "!free?");
2369     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2370     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2371   }
2372   guarantee(n == num, "Incorrect count");
2373 }
2374 
2375 #ifndef PRODUCT
2376 void CompactibleFreeListSpace::check_free_list_consistency() const {
2377   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2378     "Some sizes can't be allocated without recourse to"
2379     " linear allocation buffers");
2380   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2381     "else MIN_TREE_CHUNK_SIZE is wrong");
2382   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2383   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2384 }
2385 #endif
2386 
2387 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2388   assert_lock_strong(&_freelistLock);
2389   AdaptiveFreeList<FreeChunk> total;
2390   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2391   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2392   size_t total_free = 0;
2393   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2394     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2395     total_free += fl->count() * fl->size();
2396     if (i % (40*IndexSetStride) == 0) {
2397       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2398     }
2399     fl->print_on(gclog_or_tty);
2400     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2401     total.set_surplus(    total.surplus()     + fl->surplus()    );
2402     total.set_desired(    total.desired()     + fl->desired()    );
2403     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2404     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2405     total.set_count(      total.count()       + fl->count()      );
2406     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2407     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2408     total.set_split_births(total.split_births() + fl->split_births());
2409     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2410   }
2411   total.print_on(gclog_or_tty, "TOTAL");
2412   gclog_or_tty->print_cr("Total free in indexed lists "
2413                          SIZE_FORMAT " words", total_free);
2414   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2415     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2416             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2417     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2418   _dictionary->print_dict_census();
2419 }
2420 
2421 ///////////////////////////////////////////////////////////////////////////
2422 // CFLS_LAB
2423 ///////////////////////////////////////////////////////////////////////////
2424 
2425 #define VECTOR_257(x)                                                                                  \
2426   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2427   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2428      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2429      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2430      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2431      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2432      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2433      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2434      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2435      x }
2436 
2437 // Initialize with default setting for CMS, _not_
2438 // generic OldPLABSize, whose static default is different; if overridden at the
2439 // command-line, this will get reinitialized via a call to
2440 // modify_initialization() below.
2441 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2442   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2443 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2444 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2445 
2446 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2447   _cfls(cfls)
2448 {
2449   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2450   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2451        i < CompactibleFreeListSpace::IndexSetSize;
2452        i += CompactibleFreeListSpace::IndexSetStride) {
2453     _indexedFreeList[i].set_size(i);
2454     _num_blocks[i] = 0;
2455   }
2456 }
2457 
2458 static bool _CFLS_LAB_modified = false;
2459 
2460 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2461   assert(!_CFLS_LAB_modified, "Call only once");
2462   _CFLS_LAB_modified = true;
2463   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2464        i < CompactibleFreeListSpace::IndexSetSize;
2465        i += CompactibleFreeListSpace::IndexSetStride) {
2466     _blocks_to_claim[i].modify(n, wt, true /* force */);
2467   }
2468 }
2469 
2470 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2471   FreeChunk* res;
2472   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2473   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2474     // This locking manages sync with other large object allocations.
2475     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2476                     Mutex::_no_safepoint_check_flag);
2477     res = _cfls->getChunkFromDictionaryExact(word_sz);
2478     if (res == NULL) return NULL;
2479   } else {
2480     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2481     if (fl->count() == 0) {
2482       // Attempt to refill this local free list.
2483       get_from_global_pool(word_sz, fl);
2484       // If it didn't work, give up.
2485       if (fl->count() == 0) return NULL;
2486     }
2487     res = fl->get_chunk_at_head();
2488     assert(res != NULL, "Why was count non-zero?");
2489   }
2490   res->markNotFree();
2491   assert(!res->is_free(), "shouldn't be marked free");
2492   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2493   // mangle a just allocated object with a distinct pattern.
2494   debug_only(res->mangleAllocated(word_sz));
2495   return (HeapWord*)res;
2496 }
2497 
2498 // Get a chunk of blocks of the right size and update related
2499 // book-keeping stats
2500 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2501   // Get the #blocks we want to claim
2502   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2503   assert(n_blks > 0, "Error");
2504   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2505   // In some cases, when the application has a phase change,
2506   // there may be a sudden and sharp shift in the object survival
2507   // profile, and updating the counts at the end of a scavenge
2508   // may not be quick enough, giving rise to large scavenge pauses
2509   // during these phase changes. It is beneficial to detect such
2510   // changes on-the-fly during a scavenge and avoid such a phase-change
2511   // pothole. The following code is a heuristic attempt to do that.
2512   // It is protected by a product flag until we have gained
2513   // enough experience with this heuristic and fine-tuned its behavior.
2514   // WARNING: This might increase fragmentation if we overreact to
2515   // small spikes, so some kind of historical smoothing based on
2516   // previous experience with the greater reactivity might be useful.
2517   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2518   // default.
2519   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2520     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2521     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2522     n_blks = MIN2(n_blks, CMSOldPLABMax);
2523   }
2524   assert(n_blks > 0, "Error");
2525   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2526   // Update stats table entry for this block size
2527   _num_blocks[word_sz] += fl->count();
2528 }
2529 
2530 void CFLS_LAB::compute_desired_plab_size() {
2531   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2532        i < CompactibleFreeListSpace::IndexSetSize;
2533        i += CompactibleFreeListSpace::IndexSetStride) {
2534     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2535            "Counter inconsistency");
2536     if (_global_num_workers[i] > 0) {
2537       // Need to smooth wrt historical average
2538       if (ResizeOldPLAB) {
2539         _blocks_to_claim[i].sample(
2540           MAX2(CMSOldPLABMin,
2541           MIN2(CMSOldPLABMax,
2542                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2543       }
2544       // Reset counters for next round
2545       _global_num_workers[i] = 0;
2546       _global_num_blocks[i] = 0;
2547       if (PrintOldPLAB) {
2548         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2549                                i, (size_t)_blocks_to_claim[i].average());
2550       }
2551     }
2552   }
2553 }
2554 
2555 // If this is changed in the future to allow parallel
2556 // access, one would need to take the FL locks and,
2557 // depending on how it is used, stagger access from
2558 // parallel threads to reduce contention.
2559 void CFLS_LAB::retire(int tid) {
2560   // We run this single threaded with the world stopped;
2561   // so no need for locks and such.
2562   NOT_PRODUCT(Thread* t = Thread::current();)
2563   assert(Thread::current()->is_VM_thread(), "Error");
2564   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2565        i < CompactibleFreeListSpace::IndexSetSize;
2566        i += CompactibleFreeListSpace::IndexSetStride) {
2567     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2568            "Can't retire more than what we obtained");
2569     if (_num_blocks[i] > 0) {
2570       size_t num_retire =  _indexedFreeList[i].count();
2571       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2572       {
2573         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2574         //                Mutex::_no_safepoint_check_flag);
2575 
2576         // Update globals stats for num_blocks used
2577         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2578         _global_num_workers[i]++;
2579         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2580         if (num_retire > 0) {
2581           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2582           // Reset this list.
2583           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2584           _indexedFreeList[i].set_size(i);
2585         }
2586       }
2587       if (PrintOldPLAB) {
2588         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2589                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2590       }
2591       // Reset stats for next round
2592       _num_blocks[i]         = 0;
2593     }
2594   }
2595 }
2596 
2597 // Used by par_get_chunk_of_blocks() for the chunks from the
2598 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2599 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2600 // to the free list "fl".  "n" is the maximum number of chunks to
2601 // be added to "fl".
2602 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2603 
2604   // We'll try all multiples of word_sz in the indexed set, starting with
2605   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2606   // then try getting a big chunk and splitting it.
2607   {
2608     bool found;
2609     int  k;
2610     size_t cur_sz;
2611     for (k = 1, cur_sz = k * word_sz, found = false;
2612          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2613          (CMSSplitIndexedFreeListBlocks || k <= 1);
2614          k++, cur_sz = k * word_sz) {
2615       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2616       fl_for_cur_sz.set_size(cur_sz);
2617       {
2618         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2619                         Mutex::_no_safepoint_check_flag);
2620         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2621         if (gfl->count() != 0) {
2622           // nn is the number of chunks of size cur_sz that
2623           // we'd need to split k-ways each, in order to create
2624           // "n" chunks of size word_sz each.
2625           const size_t nn = MAX2(n/k, (size_t)1);
2626           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2627           found = true;
2628           if (k > 1) {
2629             // Update split death stats for the cur_sz-size blocks list:
2630             // we increment the split death count by the number of blocks
2631             // we just took from the cur_sz-size blocks list and which
2632             // we will be splitting below.
2633             ssize_t deaths = gfl->split_deaths() +
2634                              fl_for_cur_sz.count();
2635             gfl->set_split_deaths(deaths);
2636           }
2637         }
2638       }
2639       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2640       if (found) {
2641         if (k == 1) {
2642           fl->prepend(&fl_for_cur_sz);
2643         } else {
2644           // Divide each block on fl_for_cur_sz up k ways.
2645           FreeChunk* fc;
2646           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2647             // Must do this in reverse order, so that anybody attempting to
2648             // access the main chunk sees it as a single free block until we
2649             // change it.
2650             size_t fc_size = fc->size();
2651             assert(fc->is_free(), "Error");
2652             for (int i = k-1; i >= 0; i--) {
2653               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2654               assert((i != 0) ||
2655                         ((fc == ffc) && ffc->is_free() &&
2656                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2657                         "Counting error");
2658               ffc->set_size(word_sz);
2659               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2660               ffc->link_next(NULL);
2661               // Above must occur before BOT is updated below.
2662               OrderAccess::storestore();
2663               // splitting from the right, fc_size == i * word_sz
2664               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2665               fc_size -= word_sz;
2666               assert(fc_size == i*word_sz, "Error");
2667               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2668               _bt.verify_single_block((HeapWord*)fc, fc_size);
2669               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2670               // Push this on "fl".
2671               fl->return_chunk_at_head(ffc);
2672             }
2673             // TRAP
2674             assert(fl->tail()->next() == NULL, "List invariant.");
2675           }
2676         }
2677         // Update birth stats for this block size.
2678         size_t num = fl->count();
2679         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2680                         Mutex::_no_safepoint_check_flag);
2681         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2682         _indexedFreeList[word_sz].set_split_births(births);
2683         return true;
2684       }
2685     }
2686     return found;
2687   }
2688 }
2689 
2690 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2691 
2692   FreeChunk* fc = NULL;
2693   FreeChunk* rem_fc = NULL;
2694   size_t rem;
2695   {
2696     MutexLockerEx x(parDictionaryAllocLock(),
2697                     Mutex::_no_safepoint_check_flag);
2698     while (n > 0) {
2699       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2700                                   FreeBlockDictionary<FreeChunk>::atLeast);
2701       if (fc != NULL) {
2702         break;
2703       } else {
2704         n--;
2705       }
2706     }
2707     if (fc == NULL) return NULL;
2708     // Otherwise, split up that block.
2709     assert((ssize_t)n >= 1, "Control point invariant");
2710     assert(fc->is_free(), "Error: should be a free block");
2711     _bt.verify_single_block((HeapWord*)fc, fc->size());
2712     const size_t nn = fc->size() / word_sz;
2713     n = MIN2(nn, n);
2714     assert((ssize_t)n >= 1, "Control point invariant");
2715     rem = fc->size() - n * word_sz;
2716     // If there is a remainder, and it's too small, allocate one fewer.
2717     if (rem > 0 && rem < MinChunkSize) {
2718       n--; rem += word_sz;
2719     }
2720     // Note that at this point we may have n == 0.
2721     assert((ssize_t)n >= 0, "Control point invariant");
2722 
2723     // If n is 0, the chunk fc that was found is not large
2724     // enough to leave a viable remainder.  We are unable to
2725     // allocate even one block.  Return fc to the
2726     // dictionary and return, leaving "fl" empty.
2727     if (n == 0) {
2728       returnChunkToDictionary(fc);
2729       return NULL;
2730     }
2731 
2732     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2733     dictionary()->dict_census_update(fc->size(),
2734                                      true /*split*/,
2735                                      false /*birth*/);
2736 
2737     // First return the remainder, if any.
2738     // Note that we hold the lock until we decide if we're going to give
2739     // back the remainder to the dictionary, since a concurrent allocation
2740     // may otherwise see the heap as empty.  (We're willing to take that
2741     // hit if the block is a small block.)
2742     if (rem > 0) {
2743       size_t prefix_size = n * word_sz;
2744       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2745       rem_fc->set_size(rem);
2746       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2747       rem_fc->link_next(NULL);
2748       // Above must occur before BOT is updated below.
2749       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2750       OrderAccess::storestore();
2751       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2752       assert(fc->is_free(), "Error");
2753       fc->set_size(prefix_size);
2754       if (rem >= IndexSetSize) {
2755         returnChunkToDictionary(rem_fc);
2756         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2757         rem_fc = NULL;
2758       }
2759       // Otherwise, return it to the small list below.
2760     }
2761   }
2762   if (rem_fc != NULL) {
2763     MutexLockerEx x(_indexedFreeListParLocks[rem],
2764                     Mutex::_no_safepoint_check_flag);
2765     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2766     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2767     smallSplitBirth(rem);
2768   }
2769   assert(n * word_sz == fc->size(),
2770          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2771          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2772          fc->size(), n, word_sz);
2773   return fc;
2774 }
2775 
2776 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2777 
2778   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2779 
2780   if (fc == NULL) {
2781     return;
2782   }
2783 
2784   size_t n = fc->size() / word_sz;
2785 
2786   assert((ssize_t)n > 0, "Consistency");
2787   // Now do the splitting up.
2788   // Must do this in reverse order, so that anybody attempting to
2789   // access the main chunk sees it as a single free block until we
2790   // change it.
2791   size_t fc_size = n * word_sz;
2792   // All but first chunk in this loop
2793   for (ssize_t i = n-1; i > 0; i--) {
2794     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2795     ffc->set_size(word_sz);
2796     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2797     ffc->link_next(NULL);
2798     // Above must occur before BOT is updated below.
2799     OrderAccess::storestore();
2800     // splitting from the right, fc_size == (n - i + 1) * wordsize
2801     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2802     fc_size -= word_sz;
2803     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2804     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2805     _bt.verify_single_block((HeapWord*)fc, fc_size);
2806     // Push this on "fl".
2807     fl->return_chunk_at_head(ffc);
2808   }
2809   // First chunk
2810   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2811   // The blocks above should show their new sizes before the first block below
2812   fc->set_size(word_sz);
2813   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2814   fc->link_next(NULL);
2815   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2816   _bt.verify_single_block((HeapWord*)fc, fc->size());
2817   fl->return_chunk_at_head(fc);
2818 
2819   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2820   {
2821     // Update the stats for this block size.
2822     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2823                     Mutex::_no_safepoint_check_flag);
2824     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2825     _indexedFreeList[word_sz].set_split_births(births);
2826     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2827     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2828   }
2829 
2830   // TRAP
2831   assert(fl->tail()->next() == NULL, "List invariant.");
2832 }
2833 
2834 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2835   assert(fl->count() == 0, "Precondition.");
2836   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2837          "Precondition");
2838 
2839   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2840     // Got it
2841     return;
2842   }
2843 
2844   // Otherwise, we'll split a block from the dictionary.
2845   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2846 }
2847 
2848 // Set up the space's par_seq_tasks structure for work claiming
2849 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2850 // XXX Need to suitably abstract and generalize this and the next
2851 // method into one.
2852 void
2853 CompactibleFreeListSpace::
2854 initialize_sequential_subtasks_for_rescan(int n_threads) {
2855   // The "size" of each task is fixed according to rescan_task_size.
2856   assert(n_threads > 0, "Unexpected n_threads argument");
2857   const size_t task_size = rescan_task_size();
2858   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2859   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2860   assert(n_tasks == 0 ||
2861          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2862           (used_region().start() + n_tasks*task_size >= used_region().end())),
2863          "n_tasks calculation incorrect");
2864   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2865   assert(!pst->valid(), "Clobbering existing data?");
2866   // Sets the condition for completion of the subtask (how many threads
2867   // need to finish in order to be done).
2868   pst->set_n_threads(n_threads);
2869   pst->set_n_tasks((int)n_tasks);
2870 }
2871 
2872 // Set up the space's par_seq_tasks structure for work claiming
2873 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2874 void
2875 CompactibleFreeListSpace::
2876 initialize_sequential_subtasks_for_marking(int n_threads,
2877                                            HeapWord* low) {
2878   // The "size" of each task is fixed according to rescan_task_size.
2879   assert(n_threads > 0, "Unexpected n_threads argument");
2880   const size_t task_size = marking_task_size();
2881   assert(task_size > CardTableModRefBS::card_size_in_words &&
2882          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2883          "Otherwise arithmetic below would be incorrect");
2884   MemRegion span = _old_gen->reserved();
2885   if (low != NULL) {
2886     if (span.contains(low)) {
2887       // Align low down to  a card boundary so that
2888       // we can use block_offset_careful() on span boundaries.
2889       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2890                                  CardTableModRefBS::card_size);
2891       // Clip span prefix at aligned_low
2892       span = span.intersection(MemRegion(aligned_low, span.end()));
2893     } else if (low > span.end()) {
2894       span = MemRegion(low, low);  // Null region
2895     } // else use entire span
2896   }
2897   assert(span.is_empty() ||
2898          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2899         "span should start at a card boundary");
2900   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2901   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2902   assert(n_tasks == 0 ||
2903          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2904           (span.start() + n_tasks*task_size >= span.end())),
2905          "n_tasks calculation incorrect");
2906   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2907   assert(!pst->valid(), "Clobbering existing data?");
2908   // Sets the condition for completion of the subtask (how many threads
2909   // need to finish in order to be done).
2910   pst->set_n_threads(n_threads);
2911   pst->set_n_tasks((int)n_tasks);
2912 }