1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "memory/allocation.inline.hpp"
  36 #include "memory/resourceArea.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  76   _bt(bs, mr),
  77   // free list locks are in the range of values taken by _lockRank
  78   // This range currently is [_leaf+2, _leaf+3]
  79   // Note: this requires that CFLspace c'tors
  80   // are called serially in the order in which the locks are
  81   // are acquired in the program text. This is true today.
  82   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  83                 Monitor::_safepoint_check_sometimes),
  84   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  85                           "CompactibleFreeListSpace._dict_par_lock", true,
  86                           Monitor::_safepoint_check_never),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL),
  92   _preconsumptionDirtyCardClosure(NULL)
  93 {
  94   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  95          "FreeChunk is larger than expected");
  96   _bt.set_space(this);
  97   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  98 
  99   _dictionary = new AFLBinaryTreeDictionary(mr);
 100 
 101   assert(_dictionary != NULL, "CMS dictionary initialization");
 102   // The indexed free lists are initially all empty and are lazily
 103   // filled in on demand. Initialize the array elements to NULL.
 104   initializeIndexedFreeListArray();
 105 
 106   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 107                              SmallForLinearAlloc);
 108 
 109   // CMSIndexedFreeListReplenish should be at least 1
 110   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 111   _promoInfo.setSpace(this);
 112   if (UseCMSBestFit) {
 113     _fitStrategy = FreeBlockBestFitFirst;
 114   } else {
 115     _fitStrategy = FreeBlockStrategyNone;
 116   }
 117   check_free_list_consistency();
 118 
 119   // Initialize locks for parallel case.
 120   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 121     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 122                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 123     DEBUG_ONLY(
 124       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 125     )
 126   }
 127   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 128 }
 129 
 130 // Like CompactibleSpace forward() but always calls cross_threshold() to
 131 // update the block offset table.  Removed initialize_threshold call because
 132 // CFLS does not use a block offset array for contiguous spaces.
 133 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 134                                     CompactPoint* cp, HeapWord* compact_top) {
 135   // q is alive
 136   // First check if we should switch compaction space
 137   assert(this == cp->space, "'this' should be current compaction space.");
 138   size_t compaction_max_size = pointer_delta(end(), compact_top);
 139   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 140     "virtual adjustObjectSize_v() method is not correct");
 141   size_t adjusted_size = adjustObjectSize(size);
 142   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 143          "no small fragments allowed");
 144   assert(minimum_free_block_size() == MinChunkSize,
 145          "for de-virtualized reference below");
 146   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 147   if (adjusted_size + MinChunkSize > compaction_max_size &&
 148       adjusted_size != compaction_max_size) {
 149     do {
 150       // switch to next compaction space
 151       cp->space->set_compaction_top(compact_top);
 152       cp->space = cp->space->next_compaction_space();
 153       if (cp->space == NULL) {
 154         cp->gen = GenCollectedHeap::heap()->young_gen();
 155         assert(cp->gen != NULL, "compaction must succeed");
 156         cp->space = cp->gen->first_compaction_space();
 157         assert(cp->space != NULL, "generation must have a first compaction space");
 158       }
 159       compact_top = cp->space->bottom();
 160       cp->space->set_compaction_top(compact_top);
 161       // The correct adjusted_size may not be the same as that for this method
 162       // (i.e., cp->space may no longer be "this" so adjust the size again.
 163       // Use the virtual method which is not used above to save the virtual
 164       // dispatch.
 165       adjusted_size = cp->space->adjust_object_size_v(size);
 166       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 167       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 168     } while (adjusted_size > compaction_max_size);
 169   }
 170 
 171   // store the forwarding pointer into the mark word
 172   if ((HeapWord*)q != compact_top) {
 173     q->forward_to(oop(compact_top));
 174     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 175   } else {
 176     // if the object isn't moving we can just set the mark to the default
 177     // mark and handle it specially later on.
 178     q->init_mark();
 179     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 180   }
 181 
 182   compact_top += adjusted_size;
 183 
 184   // we need to update the offset table so that the beginnings of objects can be
 185   // found during scavenge.  Note that we are updating the offset table based on
 186   // where the object will be once the compaction phase finishes.
 187 
 188   // Always call cross_threshold().  A contiguous space can only call it when
 189   // the compaction_top exceeds the current threshold but not for an
 190   // non-contiguous space.
 191   cp->threshold =
 192     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 193   return compact_top;
 194 }
 195 
 196 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 197 // and use of single_block instead of alloc_block.  The name here is not really
 198 // appropriate - maybe a more general name could be invented for both the
 199 // contiguous and noncontiguous spaces.
 200 
 201 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 202   _bt.single_block(start, the_end);
 203   return end();
 204 }
 205 
 206 // Initialize them to NULL.
 207 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 208   for (size_t i = 0; i < IndexSetSize; i++) {
 209     // Note that on platforms where objects are double word aligned,
 210     // the odd array elements are not used.  It is convenient, however,
 211     // to map directly from the object size to the array element.
 212     _indexedFreeList[i].reset(IndexSetSize);
 213     _indexedFreeList[i].set_size(i);
 214     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 215     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 216     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 218   }
 219 }
 220 
 221 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 222   return adjustObjectSize(oop(addr)->size());
 223 }
 224 
 225 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 226   for (size_t i = 1; i < IndexSetSize; i++) {
 227     assert(_indexedFreeList[i].size() == (size_t) i,
 228       "Indexed free list sizes are incorrect");
 229     _indexedFreeList[i].reset(IndexSetSize);
 230     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 231     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 232     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 233     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 234   }
 235 }
 236 
 237 void CompactibleFreeListSpace::reset(MemRegion mr) {
 238   resetIndexedFreeListArray();
 239   dictionary()->reset();
 240   if (BlockOffsetArrayUseUnallocatedBlock) {
 241     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 242     // Everything's allocated until proven otherwise.
 243     _bt.set_unallocated_block(end());
 244   }
 245   if (!mr.is_empty()) {
 246     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 247     _bt.single_block(mr.start(), mr.word_size());
 248     FreeChunk* fc = (FreeChunk*) mr.start();
 249     fc->set_size(mr.word_size());
 250     if (mr.word_size() >= IndexSetSize ) {
 251       returnChunkToDictionary(fc);
 252     } else {
 253       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 254       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 255     }
 256     coalBirth(mr.word_size());
 257   }
 258   _promoInfo.reset();
 259   _smallLinearAllocBlock._ptr = NULL;
 260   _smallLinearAllocBlock._word_size = 0;
 261 }
 262 
 263 void CompactibleFreeListSpace::reset_after_compaction() {
 264   // Reset the space to the new reality - one free chunk.
 265   MemRegion mr(compaction_top(), end());
 266   reset(mr);
 267   // Now refill the linear allocation block(s) if possible.
 268   refillLinearAllocBlocksIfNeeded();
 269 }
 270 
 271 // Walks the entire dictionary, returning a coterminal
 272 // chunk, if it exists. Use with caution since it involves
 273 // a potentially complete walk of a potentially large tree.
 274 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 275 
 276   assert_lock_strong(&_freelistLock);
 277 
 278   return dictionary()->find_chunk_ends_at(end());
 279 }
 280 
 281 
 282 #ifndef PRODUCT
 283 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 284   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 285     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 286   }
 287 }
 288 
 289 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 290   size_t sum = 0;
 291   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 292     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 293   }
 294   return sum;
 295 }
 296 
 297 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 298   size_t count = 0;
 299   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 300     debug_only(
 301       ssize_t total_list_count = 0;
 302       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 303          fc = fc->next()) {
 304         total_list_count++;
 305       }
 306       assert(total_list_count ==  _indexedFreeList[i].count(),
 307         "Count in list is incorrect");
 308     )
 309     count += _indexedFreeList[i].count();
 310   }
 311   return count;
 312 }
 313 
 314 size_t CompactibleFreeListSpace::totalCount() {
 315   size_t num = totalCountInIndexedFreeLists();
 316   num +=  dictionary()->total_count();
 317   if (_smallLinearAllocBlock._word_size != 0) {
 318     num++;
 319   }
 320   return num;
 321 }
 322 #endif
 323 
 324 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 325   FreeChunk* fc = (FreeChunk*) p;
 326   return fc->is_free();
 327 }
 328 
 329 size_t CompactibleFreeListSpace::used() const {
 330   return capacity() - free();
 331 }
 332 
 333 size_t CompactibleFreeListSpace::free() const {
 334   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 335   // if you do this while the structures are in flux you
 336   // may get an approximate answer only; for instance
 337   // because there is concurrent allocation either
 338   // directly by mutators or for promotion during a GC.
 339   // It's "MT-safe", however, in the sense that you are guaranteed
 340   // not to crash and burn, for instance, because of walking
 341   // pointers that could disappear as you were walking them.
 342   // The approximation is because the various components
 343   // that are read below are not read atomically (and
 344   // further the computation of totalSizeInIndexedFreeLists()
 345   // is itself a non-atomic computation. The normal use of
 346   // this is during a resize operation at the end of GC
 347   // and at that time you are guaranteed to get the
 348   // correct actual value. However, for instance, this is
 349   // also read completely asynchronously by the "perf-sampler"
 350   // that supports jvmstat, and you are apt to see the values
 351   // flicker in such cases.
 352   assert(_dictionary != NULL, "No _dictionary?");
 353   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 354           totalSizeInIndexedFreeLists() +
 355           _smallLinearAllocBlock._word_size) * HeapWordSize;
 356 }
 357 
 358 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 359   assert(_dictionary != NULL, "No _dictionary?");
 360   assert_locked();
 361   size_t res = _dictionary->max_chunk_size();
 362   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 363                        (size_t) SmallForLinearAlloc - 1));
 364   // XXX the following could potentially be pretty slow;
 365   // should one, pessimistically for the rare cases when res
 366   // calculated above is less than IndexSetSize,
 367   // just return res calculated above? My reasoning was that
 368   // those cases will be so rare that the extra time spent doesn't
 369   // really matter....
 370   // Note: do not change the loop test i >= res + IndexSetStride
 371   // to i > res below, because i is unsigned and res may be zero.
 372   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 373        i -= IndexSetStride) {
 374     if (_indexedFreeList[i].head() != NULL) {
 375       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 376       return i;
 377     }
 378   }
 379   return res;
 380 }
 381 
 382 void LinearAllocBlock::print_on(outputStream* st) const {
 383   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 384             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 385             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 386 }
 387 
 388 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 389   st->print_cr("COMPACTIBLE FREELIST SPACE");
 390   st->print_cr(" Space:");
 391   Space::print_on(st);
 392 
 393   st->print_cr("promoInfo:");
 394   _promoInfo.print_on(st);
 395 
 396   st->print_cr("_smallLinearAllocBlock");
 397   _smallLinearAllocBlock.print_on(st);
 398 
 399   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 400 
 401   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 402 }
 403 
 404 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 405 const {
 406   reportIndexedFreeListStatistics(st);
 407   st->print_cr("Layout of Indexed Freelists");
 408   st->print_cr("---------------------------");
 409   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 410   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 411     _indexedFreeList[i].print_on(st);
 412     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 413       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 414                    p2i(fc), p2i((HeapWord*)fc + i),
 415                    fc->cantCoalesce() ? "\t CC" : "");
 416     }
 417   }
 418 }
 419 
 420 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 421 const {
 422   _promoInfo.print_on(st);
 423 }
 424 
 425 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 426 const {
 427   _dictionary->report_statistics(st);
 428   st->print_cr("Layout of Freelists in Tree");
 429   st->print_cr("---------------------------");
 430   _dictionary->print_free_lists(st);
 431 }
 432 
 433 class BlkPrintingClosure: public BlkClosure {
 434   const CMSCollector*             _collector;
 435   const CompactibleFreeListSpace* _sp;
 436   const CMSBitMap*                _live_bit_map;
 437   const bool                      _post_remark;
 438   outputStream*                   _st;
 439 public:
 440   BlkPrintingClosure(const CMSCollector* collector,
 441                      const CompactibleFreeListSpace* sp,
 442                      const CMSBitMap* live_bit_map,
 443                      outputStream* st):
 444     _collector(collector),
 445     _sp(sp),
 446     _live_bit_map(live_bit_map),
 447     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 448     _st(st) { }
 449   size_t do_blk(HeapWord* addr);
 450 };
 451 
 452 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 453   size_t sz = _sp->block_size_no_stall(addr, _collector);
 454   assert(sz != 0, "Should always be able to compute a size");
 455   if (_sp->block_is_obj(addr)) {
 456     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 457     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 458       p2i(addr),
 459       dead ? "dead" : "live",
 460       sz,
 461       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 462     if (CMSPrintObjectsInDump && !dead) {
 463       oop(addr)->print_on(_st);
 464       _st->print_cr("--------------------------------------");
 465     }
 466   } else { // free block
 467     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 468       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 469     if (CMSPrintChunksInDump) {
 470       ((FreeChunk*)addr)->print_on(_st);
 471       _st->print_cr("--------------------------------------");
 472     }
 473   }
 474   return sz;
 475 }
 476 
 477 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 478   st->print_cr("=========================");
 479   st->print_cr("Block layout in CMS Heap:");
 480   st->print_cr("=========================");
 481   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 482   blk_iterate(&bpcl);
 483 
 484   st->print_cr("=======================================");
 485   st->print_cr("Order & Layout of Promotion Info Blocks");
 486   st->print_cr("=======================================");
 487   print_promo_info_blocks(st);
 488 
 489   st->print_cr("===========================");
 490   st->print_cr("Order of Indexed Free Lists");
 491   st->print_cr("=========================");
 492   print_indexed_free_lists(st);
 493 
 494   st->print_cr("=================================");
 495   st->print_cr("Order of Free Lists in Dictionary");
 496   st->print_cr("=================================");
 497   print_dictionary_free_lists(st);
 498 }
 499 
 500 
 501 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 502   assert_lock_strong(&_freelistLock);
 503   LogHandle(gc, freelist, stats) log;
 504   if (!log.is_debug()) {
 505     return;
 506   }
 507   log.debug("%s", title);
 508   _dictionary->report_statistics(log.debug_stream());
 509   if (log.is_trace()) {
 510     ResourceMark rm;
 511     reportIndexedFreeListStatistics(log.trace_stream());
 512     size_t total_size = totalSizeInIndexedFreeLists() +
 513                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 514     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 515   }
 516 }
 517 
 518 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 519   assert_lock_strong(&_freelistLock);
 520   st->print_cr("Statistics for IndexedFreeLists:");
 521   st->print_cr("--------------------------------");
 522   size_t total_size = totalSizeInIndexedFreeLists();
 523   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 524   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 525   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 526   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 527   if (free_blocks != 0) {
 528     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 529   }
 530 }
 531 
 532 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 533   size_t res = 0;
 534   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 535     debug_only(
 536       ssize_t recount = 0;
 537       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 538          fc = fc->next()) {
 539         recount += 1;
 540       }
 541       assert(recount == _indexedFreeList[i].count(),
 542         "Incorrect count in list");
 543     )
 544     res += _indexedFreeList[i].count();
 545   }
 546   return res;
 547 }
 548 
 549 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 550   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 551     if (_indexedFreeList[i].head() != NULL) {
 552       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 553       return (size_t)i;
 554     }
 555   }
 556   return 0;
 557 }
 558 
 559 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 560   HeapWord* prevEnd = end();
 561   assert(prevEnd != value, "unnecessary set_end call");
 562   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 563         "New end is below unallocated block");
 564   _end = value;
 565   if (prevEnd != NULL) {
 566     // Resize the underlying block offset table.
 567     _bt.resize(pointer_delta(value, bottom()));
 568     if (value <= prevEnd) {
 569       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 570              "New end is below unallocated block");
 571     } else {
 572       // Now, take this new chunk and add it to the free blocks.
 573       // Note that the BOT has not yet been updated for this block.
 574       size_t newFcSize = pointer_delta(value, prevEnd);
 575       // Add the block to the free lists, if possible coalescing it
 576       // with the last free block, and update the BOT and census data.
 577       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 578     }
 579   }
 580 }
 581 
 582 class FreeListSpaceDCTOC : public FilteringDCTOC {
 583   CompactibleFreeListSpace* _cfls;
 584   CMSCollector* _collector;
 585   bool _parallel;
 586 protected:
 587   // Override.
 588 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 589   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 590                                        HeapWord* bottom, HeapWord* top, \
 591                                        ClosureType* cl);                \
 592       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 593                                        HeapWord* bottom, HeapWord* top, \
 594                                        ClosureType* cl);                \
 595     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 596                                        HeapWord* bottom, HeapWord* top, \
 597                                        ClosureType* cl)
 598   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 599   walk_mem_region_with_cl_DECL(FilteringClosure);
 600 
 601 public:
 602   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 603                      CMSCollector* collector,
 604                      ExtendedOopClosure* cl,
 605                      CardTableModRefBS::PrecisionStyle precision,
 606                      HeapWord* boundary,
 607                      bool parallel) :
 608     FilteringDCTOC(sp, cl, precision, boundary),
 609     _cfls(sp), _collector(collector), _parallel(parallel) {}
 610 };
 611 
 612 // We de-virtualize the block-related calls below, since we know that our
 613 // space is a CompactibleFreeListSpace.
 614 
 615 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 616 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 617                                                  HeapWord* bottom,              \
 618                                                  HeapWord* top,                 \
 619                                                  ClosureType* cl) {             \
 620    if (_parallel) {                                                             \
 621      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 622    } else {                                                                     \
 623      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 624    }                                                                            \
 625 }                                                                               \
 626 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 627                                                      HeapWord* bottom,          \
 628                                                      HeapWord* top,             \
 629                                                      ClosureType* cl) {         \
 630   /* Skip parts that are before "mr", in case "block_start" sent us             \
 631      back too far. */                                                           \
 632   HeapWord* mr_start = mr.start();                                              \
 633   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 634   HeapWord* next = bottom + bot_size;                                           \
 635   while (next < mr_start) {                                                     \
 636     bottom = next;                                                              \
 637     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 638     next = bottom + bot_size;                                                   \
 639   }                                                                             \
 640                                                                                 \
 641   while (bottom < top) {                                                        \
 642     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 643         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 644                     oop(bottom)) &&                                             \
 645         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 646       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 647       bottom += _cfls->adjustObjectSize(word_sz);                               \
 648     } else {                                                                    \
 649       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 650     }                                                                           \
 651   }                                                                             \
 652 }                                                                               \
 653 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 654                                                        HeapWord* bottom,        \
 655                                                        HeapWord* top,           \
 656                                                        ClosureType* cl) {       \
 657   /* Skip parts that are before "mr", in case "block_start" sent us             \
 658      back too far. */                                                           \
 659   HeapWord* mr_start = mr.start();                                              \
 660   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 661   HeapWord* next = bottom + bot_size;                                           \
 662   while (next < mr_start) {                                                     \
 663     bottom = next;                                                              \
 664     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 665     next = bottom + bot_size;                                                   \
 666   }                                                                             \
 667                                                                                 \
 668   while (bottom < top) {                                                        \
 669     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 670         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 671                     oop(bottom)) &&                                             \
 672         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 673       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 674       bottom += _cfls->adjustObjectSize(word_sz);                               \
 675     } else {                                                                    \
 676       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 677     }                                                                           \
 678   }                                                                             \
 679 }
 680 
 681 // (There are only two of these, rather than N, because the split is due
 682 // only to the introduction of the FilteringClosure, a local part of the
 683 // impl of this abstraction.)
 684 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 685 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 686 
 687 DirtyCardToOopClosure*
 688 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 689                                       CardTableModRefBS::PrecisionStyle precision,
 690                                       HeapWord* boundary,
 691                                       bool parallel) {
 692   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 693 }
 694 
 695 
 696 // Note on locking for the space iteration functions:
 697 // since the collector's iteration activities are concurrent with
 698 // allocation activities by mutators, absent a suitable mutual exclusion
 699 // mechanism the iterators may go awry. For instance a block being iterated
 700 // may suddenly be allocated or divided up and part of it allocated and
 701 // so on.
 702 
 703 // Apply the given closure to each block in the space.
 704 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 705   assert_lock_strong(freelistLock());
 706   HeapWord *cur, *limit;
 707   for (cur = bottom(), limit = end(); cur < limit;
 708        cur += cl->do_blk_careful(cur));
 709 }
 710 
 711 // Apply the given closure to each block in the space.
 712 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 713   assert_lock_strong(freelistLock());
 714   HeapWord *cur, *limit;
 715   for (cur = bottom(), limit = end(); cur < limit;
 716        cur += cl->do_blk(cur));
 717 }
 718 
 719 // Apply the given closure to each oop in the space.
 720 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 721   assert_lock_strong(freelistLock());
 722   HeapWord *cur, *limit;
 723   size_t curSize;
 724   for (cur = bottom(), limit = end(); cur < limit;
 725        cur += curSize) {
 726     curSize = block_size(cur);
 727     if (block_is_obj(cur)) {
 728       oop(cur)->oop_iterate(cl);
 729     }
 730   }
 731 }
 732 
 733 // NOTE: In the following methods, in order to safely be able to
 734 // apply the closure to an object, we need to be sure that the
 735 // object has been initialized. We are guaranteed that an object
 736 // is initialized if we are holding the Heap_lock with the
 737 // world stopped.
 738 void CompactibleFreeListSpace::verify_objects_initialized() const {
 739   if (is_init_completed()) {
 740     assert_locked_or_safepoint(Heap_lock);
 741     if (Universe::is_fully_initialized()) {
 742       guarantee(SafepointSynchronize::is_at_safepoint(),
 743                 "Required for objects to be initialized");
 744     }
 745   } // else make a concession at vm start-up
 746 }
 747 
 748 // Apply the given closure to each object in the space
 749 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 750   assert_lock_strong(freelistLock());
 751   NOT_PRODUCT(verify_objects_initialized());
 752   HeapWord *cur, *limit;
 753   size_t curSize;
 754   for (cur = bottom(), limit = end(); cur < limit;
 755        cur += curSize) {
 756     curSize = block_size(cur);
 757     if (block_is_obj(cur)) {
 758       blk->do_object(oop(cur));
 759     }
 760   }
 761 }
 762 
 763 // Apply the given closure to each live object in the space
 764 //   The usage of CompactibleFreeListSpace
 765 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 766 // objects in the space with references to objects that are no longer
 767 // valid.  For example, an object may reference another object
 768 // that has already been sweep up (collected).  This method uses
 769 // obj_is_alive() to determine whether it is safe to apply the closure to
 770 // an object.  See obj_is_alive() for details on how liveness of an
 771 // object is decided.
 772 
 773 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 774   assert_lock_strong(freelistLock());
 775   NOT_PRODUCT(verify_objects_initialized());
 776   HeapWord *cur, *limit;
 777   size_t curSize;
 778   for (cur = bottom(), limit = end(); cur < limit;
 779        cur += curSize) {
 780     curSize = block_size(cur);
 781     if (block_is_obj(cur) && obj_is_alive(cur)) {
 782       blk->do_object(oop(cur));
 783     }
 784   }
 785 }
 786 
 787 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 788                                                   UpwardsObjectClosure* cl) {
 789   assert_locked(freelistLock());
 790   NOT_PRODUCT(verify_objects_initialized());
 791   assert(!mr.is_empty(), "Should be non-empty");
 792   // We use MemRegion(bottom(), end()) rather than used_region() below
 793   // because the two are not necessarily equal for some kinds of
 794   // spaces, in particular, certain kinds of free list spaces.
 795   // We could use the more complicated but more precise:
 796   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 797   // but the slight imprecision seems acceptable in the assertion check.
 798   assert(MemRegion(bottom(), end()).contains(mr),
 799          "Should be within used space");
 800   HeapWord* prev = cl->previous();   // max address from last time
 801   if (prev >= mr.end()) { // nothing to do
 802     return;
 803   }
 804   // This assert will not work when we go from cms space to perm
 805   // space, and use same closure. Easy fix deferred for later. XXX YSR
 806   // assert(prev == NULL || contains(prev), "Should be within space");
 807 
 808   bool last_was_obj_array = false;
 809   HeapWord *blk_start_addr, *region_start_addr;
 810   if (prev > mr.start()) {
 811     region_start_addr = prev;
 812     blk_start_addr    = prev;
 813     // The previous invocation may have pushed "prev" beyond the
 814     // last allocated block yet there may be still be blocks
 815     // in this region due to a particular coalescing policy.
 816     // Relax the assertion so that the case where the unallocated
 817     // block is maintained and "prev" is beyond the unallocated
 818     // block does not cause the assertion to fire.
 819     assert((BlockOffsetArrayUseUnallocatedBlock &&
 820             (!is_in(prev))) ||
 821            (blk_start_addr == block_start(region_start_addr)), "invariant");
 822   } else {
 823     region_start_addr = mr.start();
 824     blk_start_addr    = block_start(region_start_addr);
 825   }
 826   HeapWord* region_end_addr = mr.end();
 827   MemRegion derived_mr(region_start_addr, region_end_addr);
 828   while (blk_start_addr < region_end_addr) {
 829     const size_t size = block_size(blk_start_addr);
 830     if (block_is_obj(blk_start_addr)) {
 831       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 832     } else {
 833       last_was_obj_array = false;
 834     }
 835     blk_start_addr += size;
 836   }
 837   if (!last_was_obj_array) {
 838     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 839            "Should be within (closed) used space");
 840     assert(blk_start_addr > prev, "Invariant");
 841     cl->set_previous(blk_start_addr); // min address for next time
 842   }
 843 }
 844 
 845 // Callers of this iterator beware: The closure application should
 846 // be robust in the face of uninitialized objects and should (always)
 847 // return a correct size so that the next addr + size below gives us a
 848 // valid block boundary. [See for instance,
 849 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 850 // in ConcurrentMarkSweepGeneration.cpp.]
 851 HeapWord*
 852 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 853   ObjectClosureCareful* cl) {
 854   assert_lock_strong(freelistLock());
 855   // Can't use used_region() below because it may not necessarily
 856   // be the same as [bottom(),end()); although we could
 857   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 858   // that appears too cumbersome, so we just do the simpler check
 859   // in the assertion below.
 860   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 861          "mr should be non-empty and within used space");
 862   HeapWord *addr, *end;
 863   size_t size;
 864   for (addr = block_start_careful(mr.start()), end  = mr.end();
 865        addr < end; addr += size) {
 866     FreeChunk* fc = (FreeChunk*)addr;
 867     if (fc->is_free()) {
 868       // Since we hold the free list lock, which protects direct
 869       // allocation in this generation by mutators, a free object
 870       // will remain free throughout this iteration code.
 871       size = fc->size();
 872     } else {
 873       // Note that the object need not necessarily be initialized,
 874       // because (for instance) the free list lock does NOT protect
 875       // object initialization. The closure application below must
 876       // therefore be correct in the face of uninitialized objects.
 877       size = cl->do_object_careful_m(oop(addr), mr);
 878       if (size == 0) {
 879         // An unparsable object found. Signal early termination.
 880         return addr;
 881       }
 882     }
 883   }
 884   return NULL;
 885 }
 886 
 887 
 888 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 889   NOT_PRODUCT(verify_objects_initialized());
 890   return _bt.block_start(p);
 891 }
 892 
 893 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 894   return _bt.block_start_careful(p);
 895 }
 896 
 897 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 898   NOT_PRODUCT(verify_objects_initialized());
 899   // This must be volatile, or else there is a danger that the compiler
 900   // will compile the code below into a sometimes-infinite loop, by keeping
 901   // the value read the first time in a register.
 902   while (true) {
 903     // We must do this until we get a consistent view of the object.
 904     if (FreeChunk::indicatesFreeChunk(p)) {
 905       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 906       size_t res = fc->size();
 907 
 908       // Bugfix for systems with weak memory model (PPC64/IA64). The
 909       // block's free bit was set and we have read the size of the
 910       // block. Acquire and check the free bit again. If the block is
 911       // still free, the read size is correct.
 912       OrderAccess::acquire();
 913 
 914       // If the object is still a free chunk, return the size, else it
 915       // has been allocated so try again.
 916       if (FreeChunk::indicatesFreeChunk(p)) {
 917         assert(res != 0, "Block size should not be 0");
 918         return res;
 919       }
 920     } else {
 921       // must read from what 'p' points to in each loop.
 922       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 923       if (k != NULL) {
 924         assert(k->is_klass(), "Should really be klass oop.");
 925         oop o = (oop)p;
 926         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 927 
 928         // Bugfix for systems with weak memory model (PPC64/IA64).
 929         // The object o may be an array. Acquire to make sure that the array
 930         // size (third word) is consistent.
 931         OrderAccess::acquire();
 932 
 933         size_t res = o->size_given_klass(k);
 934         res = adjustObjectSize(res);
 935         assert(res != 0, "Block size should not be 0");
 936         return res;
 937       }
 938     }
 939   }
 940 }
 941 
 942 // TODO: Now that is_parsable is gone, we should combine these two functions.
 943 // A variant of the above that uses the Printezis bits for
 944 // unparsable but allocated objects. This avoids any possible
 945 // stalls waiting for mutators to initialize objects, and is
 946 // thus potentially faster than the variant above. However,
 947 // this variant may return a zero size for a block that is
 948 // under mutation and for which a consistent size cannot be
 949 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 950 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 951                                                      const CMSCollector* c)
 952 const {
 953   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 954   // This must be volatile, or else there is a danger that the compiler
 955   // will compile the code below into a sometimes-infinite loop, by keeping
 956   // the value read the first time in a register.
 957   DEBUG_ONLY(uint loops = 0;)
 958   while (true) {
 959     // We must do this until we get a consistent view of the object.
 960     if (FreeChunk::indicatesFreeChunk(p)) {
 961       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 962       size_t res = fc->size();
 963 
 964       // Bugfix for systems with weak memory model (PPC64/IA64). The
 965       // free bit of the block was set and we have read the size of
 966       // the block. Acquire and check the free bit again. If the
 967       // block is still free, the read size is correct.
 968       OrderAccess::acquire();
 969 
 970       if (FreeChunk::indicatesFreeChunk(p)) {
 971         assert(res != 0, "Block size should not be 0");
 972         assert(loops == 0, "Should be 0");
 973         return res;
 974       }
 975     } else {
 976       // must read from what 'p' points to in each loop.
 977       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 978       // We trust the size of any object that has a non-NULL
 979       // klass and (for those in the perm gen) is parsable
 980       // -- irrespective of its conc_safe-ty.
 981       if (k != NULL) {
 982         assert(k->is_klass(), "Should really be klass oop.");
 983         oop o = (oop)p;
 984         assert(o->is_oop(), "Should be an oop");
 985 
 986         // Bugfix for systems with weak memory model (PPC64/IA64).
 987         // The object o may be an array. Acquire to make sure that the array
 988         // size (third word) is consistent.
 989         OrderAccess::acquire();
 990 
 991         size_t res = o->size_given_klass(k);
 992         res = adjustObjectSize(res);
 993         assert(res != 0, "Block size should not be 0");
 994         return res;
 995       } else {
 996         // May return 0 if P-bits not present.
 997         return c->block_size_if_printezis_bits(p);
 998       }
 999     }
1000     assert(loops == 0, "Can loop at most once");
1001     DEBUG_ONLY(loops++;)
1002   }
1003 }
1004 
1005 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1006   NOT_PRODUCT(verify_objects_initialized());
1007   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1008   FreeChunk* fc = (FreeChunk*)p;
1009   if (fc->is_free()) {
1010     return fc->size();
1011   } else {
1012     // Ignore mark word because this may be a recently promoted
1013     // object whose mark word is used to chain together grey
1014     // objects (the last one would have a null value).
1015     assert(oop(p)->is_oop(true), "Should be an oop");
1016     return adjustObjectSize(oop(p)->size());
1017   }
1018 }
1019 
1020 // This implementation assumes that the property of "being an object" is
1021 // stable.  But being a free chunk may not be (because of parallel
1022 // promotion.)
1023 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1024   FreeChunk* fc = (FreeChunk*)p;
1025   assert(is_in_reserved(p), "Should be in space");
1026   if (FreeChunk::indicatesFreeChunk(p)) return false;
1027   Klass* k = oop(p)->klass_or_null();
1028   if (k != NULL) {
1029     // Ignore mark word because it may have been used to
1030     // chain together promoted objects (the last one
1031     // would have a null value).
1032     assert(oop(p)->is_oop(true), "Should be an oop");
1033     return true;
1034   } else {
1035     return false;  // Was not an object at the start of collection.
1036   }
1037 }
1038 
1039 // Check if the object is alive. This fact is checked either by consulting
1040 // the main marking bitmap in the sweeping phase or, if it's a permanent
1041 // generation and we're not in the sweeping phase, by checking the
1042 // perm_gen_verify_bit_map where we store the "deadness" information if
1043 // we did not sweep the perm gen in the most recent previous GC cycle.
1044 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1045   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1046          "Else races are possible");
1047   assert(block_is_obj(p), "The address should point to an object");
1048 
1049   // If we're sweeping, we use object liveness information from the main bit map
1050   // for both perm gen and old gen.
1051   // We don't need to lock the bitmap (live_map or dead_map below), because
1052   // EITHER we are in the middle of the sweeping phase, and the
1053   // main marking bit map (live_map below) is locked,
1054   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1055   // is stable, because it's mutated only in the sweeping phase.
1056   // NOTE: This method is also used by jmap where, if class unloading is
1057   // off, the results can return "false" for legitimate perm objects,
1058   // when we are not in the midst of a sweeping phase, which can result
1059   // in jmap not reporting certain perm gen objects. This will be moot
1060   // if/when the perm gen goes away in the future.
1061   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1062     CMSBitMap* live_map = _collector->markBitMap();
1063     return live_map->par_isMarked((HeapWord*) p);
1064   }
1065   return true;
1066 }
1067 
1068 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1069   FreeChunk* fc = (FreeChunk*)p;
1070   assert(is_in_reserved(p), "Should be in space");
1071   assert(_bt.block_start(p) == p, "Should be a block boundary");
1072   if (!fc->is_free()) {
1073     // Ignore mark word because it may have been used to
1074     // chain together promoted objects (the last one
1075     // would have a null value).
1076     assert(oop(p)->is_oop(true), "Should be an oop");
1077     return true;
1078   }
1079   return false;
1080 }
1081 
1082 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1083 // approximate answer if you don't hold the freelistlock when you call this.
1084 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1085   size_t size = 0;
1086   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1087     debug_only(
1088       // We may be calling here without the lock in which case we
1089       // won't do this modest sanity check.
1090       if (freelistLock()->owned_by_self()) {
1091         size_t total_list_size = 0;
1092         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1093           fc = fc->next()) {
1094           total_list_size += i;
1095         }
1096         assert(total_list_size == i * _indexedFreeList[i].count(),
1097                "Count in list is incorrect");
1098       }
1099     )
1100     size += i * _indexedFreeList[i].count();
1101   }
1102   return size;
1103 }
1104 
1105 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1106   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1107   return allocate(size);
1108 }
1109 
1110 HeapWord*
1111 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1112   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1113 }
1114 
1115 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1116   assert_lock_strong(freelistLock());
1117   HeapWord* res = NULL;
1118   assert(size == adjustObjectSize(size),
1119          "use adjustObjectSize() before calling into allocate()");
1120 
1121   res = allocate_adaptive_freelists(size);
1122 
1123   if (res != NULL) {
1124     // check that res does lie in this space!
1125     assert(is_in_reserved(res), "Not in this space!");
1126     assert(is_aligned((void*)res), "alignment check");
1127 
1128     FreeChunk* fc = (FreeChunk*)res;
1129     fc->markNotFree();
1130     assert(!fc->is_free(), "shouldn't be marked free");
1131     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1132     // Verify that the block offset table shows this to
1133     // be a single block, but not one which is unallocated.
1134     _bt.verify_single_block(res, size);
1135     _bt.verify_not_unallocated(res, size);
1136     // mangle a just allocated object with a distinct pattern.
1137     debug_only(fc->mangleAllocated(size));
1138   }
1139 
1140   return res;
1141 }
1142 
1143 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1144   assert_lock_strong(freelistLock());
1145   HeapWord* res = NULL;
1146   assert(size == adjustObjectSize(size),
1147          "use adjustObjectSize() before calling into allocate()");
1148 
1149   // Strategy
1150   //   if small
1151   //     exact size from small object indexed list if small
1152   //     small or large linear allocation block (linAB) as appropriate
1153   //     take from lists of greater sized chunks
1154   //   else
1155   //     dictionary
1156   //     small or large linear allocation block if it has the space
1157   // Try allocating exact size from indexTable first
1158   if (size < IndexSetSize) {
1159     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1160     if(res != NULL) {
1161       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1162         "Not removed from free list");
1163       // no block offset table adjustment is necessary on blocks in
1164       // the indexed lists.
1165 
1166     // Try allocating from the small LinAB
1167     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1168         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1169         // if successful, the above also adjusts block offset table
1170         // Note that this call will refill the LinAB to
1171         // satisfy the request.  This is different that
1172         // evm.
1173         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1174     } else {
1175       // Raid the exact free lists larger than size, even if they are not
1176       // overpopulated.
1177       res = (HeapWord*) getChunkFromGreater(size);
1178     }
1179   } else {
1180     // Big objects get allocated directly from the dictionary.
1181     res = (HeapWord*) getChunkFromDictionaryExact(size);
1182     if (res == NULL) {
1183       // Try hard not to fail since an allocation failure will likely
1184       // trigger a synchronous GC.  Try to get the space from the
1185       // allocation blocks.
1186       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1187     }
1188   }
1189 
1190   return res;
1191 }
1192 
1193 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1194 // when promoting obj.
1195 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1196   // Depending on the object size, expansion may require refilling either a
1197   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1198   // is added because the dictionary may over-allocate to avoid fragmentation.
1199   size_t space = obj_size;
1200   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1201   return space;
1202 }
1203 
1204 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1205   FreeChunk* ret;
1206 
1207   assert(numWords >= MinChunkSize, "Size is less than minimum");
1208   assert(linearAllocationWouldFail() || bestFitFirst(),
1209     "Should not be here");
1210 
1211   size_t i;
1212   size_t currSize = numWords + MinChunkSize;
1213   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1214   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1215     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1216     if (fl->head()) {
1217       ret = getFromListGreater(fl, numWords);
1218       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1219       return ret;
1220     }
1221   }
1222 
1223   currSize = MAX2((size_t)SmallForDictionary,
1224                   (size_t)(numWords + MinChunkSize));
1225 
1226   /* Try to get a chunk that satisfies request, while avoiding
1227      fragmentation that can't be handled. */
1228   {
1229     ret =  dictionary()->get_chunk(currSize);
1230     if (ret != NULL) {
1231       assert(ret->size() - numWords >= MinChunkSize,
1232              "Chunk is too small");
1233       _bt.allocated((HeapWord*)ret, ret->size());
1234       /* Carve returned chunk. */
1235       (void) splitChunkAndReturnRemainder(ret, numWords);
1236       /* Label this as no longer a free chunk. */
1237       assert(ret->is_free(), "This chunk should be free");
1238       ret->link_prev(NULL);
1239     }
1240     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1241     return ret;
1242   }
1243   ShouldNotReachHere();
1244 }
1245 
1246 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1247   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1248   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1249 }
1250 
1251 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1252   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1253          (_smallLinearAllocBlock._word_size == fc->size()),
1254          "Linear allocation block shows incorrect size");
1255   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1256           (_smallLinearAllocBlock._word_size == fc->size()));
1257 }
1258 
1259 // Check if the purported free chunk is present either as a linear
1260 // allocation block, the size-indexed table of (smaller) free blocks,
1261 // or the larger free blocks kept in the binary tree dictionary.
1262 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1263   if (verify_chunk_is_linear_alloc_block(fc)) {
1264     return true;
1265   } else if (fc->size() < IndexSetSize) {
1266     return verifyChunkInIndexedFreeLists(fc);
1267   } else {
1268     return dictionary()->verify_chunk_in_free_list(fc);
1269   }
1270 }
1271 
1272 #ifndef PRODUCT
1273 void CompactibleFreeListSpace::assert_locked() const {
1274   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1275 }
1276 
1277 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1278   CMSLockVerifier::assert_locked(lock);
1279 }
1280 #endif
1281 
1282 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1283   // In the parallel case, the main thread holds the free list lock
1284   // on behalf the parallel threads.
1285   FreeChunk* fc;
1286   {
1287     // If GC is parallel, this might be called by several threads.
1288     // This should be rare enough that the locking overhead won't affect
1289     // the sequential code.
1290     MutexLockerEx x(parDictionaryAllocLock(),
1291                     Mutex::_no_safepoint_check_flag);
1292     fc = getChunkFromDictionary(size);
1293   }
1294   if (fc != NULL) {
1295     fc->dontCoalesce();
1296     assert(fc->is_free(), "Should be free, but not coalescable");
1297     // Verify that the block offset table shows this to
1298     // be a single block, but not one which is unallocated.
1299     _bt.verify_single_block((HeapWord*)fc, fc->size());
1300     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1301   }
1302   return fc;
1303 }
1304 
1305 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1306   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1307   assert_locked();
1308 
1309   // if we are tracking promotions, then first ensure space for
1310   // promotion (including spooling space for saving header if necessary).
1311   // then allocate and copy, then track promoted info if needed.
1312   // When tracking (see PromotionInfo::track()), the mark word may
1313   // be displaced and in this case restoration of the mark word
1314   // occurs in the (oop_since_save_marks_)iterate phase.
1315   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1316     return NULL;
1317   }
1318   // Call the allocate(size_t, bool) form directly to avoid the
1319   // additional call through the allocate(size_t) form.  Having
1320   // the compile inline the call is problematic because allocate(size_t)
1321   // is a virtual method.
1322   HeapWord* res = allocate(adjustObjectSize(obj_size));
1323   if (res != NULL) {
1324     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1325     // if we should be tracking promotions, do so.
1326     if (_promoInfo.tracking()) {
1327         _promoInfo.track((PromotedObject*)res);
1328     }
1329   }
1330   return oop(res);
1331 }
1332 
1333 HeapWord*
1334 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1335   assert_locked();
1336   assert(size >= MinChunkSize, "minimum chunk size");
1337   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1338     "maximum from smallLinearAllocBlock");
1339   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1340 }
1341 
1342 HeapWord*
1343 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1344                                                        size_t size) {
1345   assert_locked();
1346   assert(size >= MinChunkSize, "too small");
1347   HeapWord* res = NULL;
1348   // Try to do linear allocation from blk, making sure that
1349   if (blk->_word_size == 0) {
1350     // We have probably been unable to fill this either in the prologue or
1351     // when it was exhausted at the last linear allocation. Bail out until
1352     // next time.
1353     assert(blk->_ptr == NULL, "consistency check");
1354     return NULL;
1355   }
1356   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1357   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1358   if (res != NULL) return res;
1359 
1360   // about to exhaust this linear allocation block
1361   if (blk->_word_size == size) { // exactly satisfied
1362     res = blk->_ptr;
1363     _bt.allocated(res, blk->_word_size);
1364   } else if (size + MinChunkSize <= blk->_refillSize) {
1365     size_t sz = blk->_word_size;
1366     // Update _unallocated_block if the size is such that chunk would be
1367     // returned to the indexed free list.  All other chunks in the indexed
1368     // free lists are allocated from the dictionary so that _unallocated_block
1369     // has already been adjusted for them.  Do it here so that the cost
1370     // for all chunks added back to the indexed free lists.
1371     if (sz < SmallForDictionary) {
1372       _bt.allocated(blk->_ptr, sz);
1373     }
1374     // Return the chunk that isn't big enough, and then refill below.
1375     addChunkToFreeLists(blk->_ptr, sz);
1376     split_birth(sz);
1377     // Don't keep statistics on adding back chunk from a LinAB.
1378   } else {
1379     // A refilled block would not satisfy the request.
1380     return NULL;
1381   }
1382 
1383   blk->_ptr = NULL; blk->_word_size = 0;
1384   refillLinearAllocBlock(blk);
1385   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1386          "block was replenished");
1387   if (res != NULL) {
1388     split_birth(size);
1389     repairLinearAllocBlock(blk);
1390   } else if (blk->_ptr != NULL) {
1391     res = blk->_ptr;
1392     size_t blk_size = blk->_word_size;
1393     blk->_word_size -= size;
1394     blk->_ptr  += size;
1395     split_birth(size);
1396     repairLinearAllocBlock(blk);
1397     // Update BOT last so that other (parallel) GC threads see a consistent
1398     // view of the BOT and free blocks.
1399     // Above must occur before BOT is updated below.
1400     OrderAccess::storestore();
1401     _bt.split_block(res, blk_size, size);  // adjust block offset table
1402   }
1403   return res;
1404 }
1405 
1406 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1407                                         LinearAllocBlock* blk,
1408                                         size_t size) {
1409   assert_locked();
1410   assert(size >= MinChunkSize, "too small");
1411 
1412   HeapWord* res = NULL;
1413   // This is the common case.  Keep it simple.
1414   if (blk->_word_size >= size + MinChunkSize) {
1415     assert(blk->_ptr != NULL, "consistency check");
1416     res = blk->_ptr;
1417     // Note that the BOT is up-to-date for the linAB before allocation.  It
1418     // indicates the start of the linAB.  The split_block() updates the
1419     // BOT for the linAB after the allocation (indicates the start of the
1420     // next chunk to be allocated).
1421     size_t blk_size = blk->_word_size;
1422     blk->_word_size -= size;
1423     blk->_ptr  += size;
1424     split_birth(size);
1425     repairLinearAllocBlock(blk);
1426     // Update BOT last so that other (parallel) GC threads see a consistent
1427     // view of the BOT and free blocks.
1428     // Above must occur before BOT is updated below.
1429     OrderAccess::storestore();
1430     _bt.split_block(res, blk_size, size);  // adjust block offset table
1431     _bt.allocated(res, size);
1432   }
1433   return res;
1434 }
1435 
1436 FreeChunk*
1437 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1438   assert_locked();
1439   assert(size < SmallForDictionary, "just checking");
1440   FreeChunk* res;
1441   res = _indexedFreeList[size].get_chunk_at_head();
1442   if (res == NULL) {
1443     res = getChunkFromIndexedFreeListHelper(size);
1444   }
1445   _bt.verify_not_unallocated((HeapWord*) res, size);
1446   assert(res == NULL || res->size() == size, "Incorrect block size");
1447   return res;
1448 }
1449 
1450 FreeChunk*
1451 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1452   bool replenish) {
1453   assert_locked();
1454   FreeChunk* fc = NULL;
1455   if (size < SmallForDictionary) {
1456     assert(_indexedFreeList[size].head() == NULL ||
1457       _indexedFreeList[size].surplus() <= 0,
1458       "List for this size should be empty or under populated");
1459     // Try best fit in exact lists before replenishing the list
1460     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1461       // Replenish list.
1462       //
1463       // Things tried that failed.
1464       //   Tried allocating out of the two LinAB's first before
1465       // replenishing lists.
1466       //   Tried small linAB of size 256 (size in indexed list)
1467       // and replenishing indexed lists from the small linAB.
1468       //
1469       FreeChunk* newFc = NULL;
1470       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1471       if (replenish_size < SmallForDictionary) {
1472         // Do not replenish from an underpopulated size.
1473         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1474             _indexedFreeList[replenish_size].head() != NULL) {
1475           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1476         } else if (bestFitFirst()) {
1477           newFc = bestFitSmall(replenish_size);
1478         }
1479       }
1480       if (newFc == NULL && replenish_size > size) {
1481         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1482         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1483       }
1484       // Note: The stats update re split-death of block obtained above
1485       // will be recorded below precisely when we know we are going to
1486       // be actually splitting it into more than one pieces below.
1487       if (newFc != NULL) {
1488         if  (replenish || CMSReplenishIntermediate) {
1489           // Replenish this list and return one block to caller.
1490           size_t i;
1491           FreeChunk *curFc, *nextFc;
1492           size_t num_blk = newFc->size() / size;
1493           assert(num_blk >= 1, "Smaller than requested?");
1494           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1495           if (num_blk > 1) {
1496             // we are sure we will be splitting the block just obtained
1497             // into multiple pieces; record the split-death of the original
1498             splitDeath(replenish_size);
1499           }
1500           // carve up and link blocks 0, ..., num_blk - 2
1501           // The last chunk is not added to the lists but is returned as the
1502           // free chunk.
1503           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1504                i = 0;
1505                i < (num_blk - 1);
1506                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1507                i++) {
1508             curFc->set_size(size);
1509             // Don't record this as a return in order to try and
1510             // determine the "returns" from a GC.
1511             _bt.verify_not_unallocated((HeapWord*) fc, size);
1512             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1513             _bt.mark_block((HeapWord*)curFc, size);
1514             split_birth(size);
1515             // Don't record the initial population of the indexed list
1516             // as a split birth.
1517           }
1518 
1519           // check that the arithmetic was OK above
1520           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1521             "inconsistency in carving newFc");
1522           curFc->set_size(size);
1523           _bt.mark_block((HeapWord*)curFc, size);
1524           split_birth(size);
1525           fc = curFc;
1526         } else {
1527           // Return entire block to caller
1528           fc = newFc;
1529         }
1530       }
1531     }
1532   } else {
1533     // Get a free chunk from the free chunk dictionary to be returned to
1534     // replenish the indexed free list.
1535     fc = getChunkFromDictionaryExact(size);
1536   }
1537   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1538   return fc;
1539 }
1540 
1541 FreeChunk*
1542 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1543   assert_locked();
1544   FreeChunk* fc = _dictionary->get_chunk(size,
1545                                          FreeBlockDictionary<FreeChunk>::atLeast);
1546   if (fc == NULL) {
1547     return NULL;
1548   }
1549   _bt.allocated((HeapWord*)fc, fc->size());
1550   if (fc->size() >= size + MinChunkSize) {
1551     fc = splitChunkAndReturnRemainder(fc, size);
1552   }
1553   assert(fc->size() >= size, "chunk too small");
1554   assert(fc->size() < size + MinChunkSize, "chunk too big");
1555   _bt.verify_single_block((HeapWord*)fc, fc->size());
1556   return fc;
1557 }
1558 
1559 FreeChunk*
1560 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1561   assert_locked();
1562   FreeChunk* fc = _dictionary->get_chunk(size,
1563                                          FreeBlockDictionary<FreeChunk>::atLeast);
1564   if (fc == NULL) {
1565     return fc;
1566   }
1567   _bt.allocated((HeapWord*)fc, fc->size());
1568   if (fc->size() == size) {
1569     _bt.verify_single_block((HeapWord*)fc, size);
1570     return fc;
1571   }
1572   assert(fc->size() > size, "get_chunk() guarantee");
1573   if (fc->size() < size + MinChunkSize) {
1574     // Return the chunk to the dictionary and go get a bigger one.
1575     returnChunkToDictionary(fc);
1576     fc = _dictionary->get_chunk(size + MinChunkSize,
1577                                 FreeBlockDictionary<FreeChunk>::atLeast);
1578     if (fc == NULL) {
1579       return NULL;
1580     }
1581     _bt.allocated((HeapWord*)fc, fc->size());
1582   }
1583   assert(fc->size() >= size + MinChunkSize, "tautology");
1584   fc = splitChunkAndReturnRemainder(fc, size);
1585   assert(fc->size() == size, "chunk is wrong size");
1586   _bt.verify_single_block((HeapWord*)fc, size);
1587   return fc;
1588 }
1589 
1590 void
1591 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1592   assert_locked();
1593 
1594   size_t size = chunk->size();
1595   _bt.verify_single_block((HeapWord*)chunk, size);
1596   // adjust _unallocated_block downward, as necessary
1597   _bt.freed((HeapWord*)chunk, size);
1598   _dictionary->return_chunk(chunk);
1599 #ifndef PRODUCT
1600   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1601     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1602     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1603     tl->verify_stats();
1604   }
1605 #endif // PRODUCT
1606 }
1607 
1608 void
1609 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1610   assert_locked();
1611   size_t size = fc->size();
1612   _bt.verify_single_block((HeapWord*) fc, size);
1613   _bt.verify_not_unallocated((HeapWord*) fc, size);
1614   _indexedFreeList[size].return_chunk_at_tail(fc);
1615 #ifndef PRODUCT
1616   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1617      _indexedFreeList[size].verify_stats();
1618   }
1619 #endif // PRODUCT
1620 }
1621 
1622 // Add chunk to end of last block -- if it's the largest
1623 // block -- and update BOT and census data. We would
1624 // of course have preferred to coalesce it with the
1625 // last block, but it's currently less expensive to find the
1626 // largest block than it is to find the last.
1627 void
1628 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1629   HeapWord* chunk, size_t     size) {
1630   // check that the chunk does lie in this space!
1631   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1632   // One of the parallel gc task threads may be here
1633   // whilst others are allocating.
1634   Mutex* lock = &_parDictionaryAllocLock;
1635   FreeChunk* ec;
1636   {
1637     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1638     ec = dictionary()->find_largest_dict();  // get largest block
1639     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1640       // It's a coterminal block - we can coalesce.
1641       size_t old_size = ec->size();
1642       coalDeath(old_size);
1643       removeChunkFromDictionary(ec);
1644       size += old_size;
1645     } else {
1646       ec = (FreeChunk*)chunk;
1647     }
1648   }
1649   ec->set_size(size);
1650   debug_only(ec->mangleFreed(size));
1651   if (size < SmallForDictionary) {
1652     lock = _indexedFreeListParLocks[size];
1653   }
1654   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1655   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1656   // record the birth under the lock since the recording involves
1657   // manipulation of the list on which the chunk lives and
1658   // if the chunk is allocated and is the last on the list,
1659   // the list can go away.
1660   coalBirth(size);
1661 }
1662 
1663 void
1664 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1665                                               size_t     size) {
1666   // check that the chunk does lie in this space!
1667   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1668   assert_locked();
1669   _bt.verify_single_block(chunk, size);
1670 
1671   FreeChunk* fc = (FreeChunk*) chunk;
1672   fc->set_size(size);
1673   debug_only(fc->mangleFreed(size));
1674   if (size < SmallForDictionary) {
1675     returnChunkToFreeList(fc);
1676   } else {
1677     returnChunkToDictionary(fc);
1678   }
1679 }
1680 
1681 void
1682 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1683   size_t size, bool coalesced) {
1684   assert_locked();
1685   assert(chunk != NULL, "null chunk");
1686   if (coalesced) {
1687     // repair BOT
1688     _bt.single_block(chunk, size);
1689   }
1690   addChunkToFreeLists(chunk, size);
1691 }
1692 
1693 // We _must_ find the purported chunk on our free lists;
1694 // we assert if we don't.
1695 void
1696 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1697   size_t size = fc->size();
1698   assert_locked();
1699   debug_only(verifyFreeLists());
1700   if (size < SmallForDictionary) {
1701     removeChunkFromIndexedFreeList(fc);
1702   } else {
1703     removeChunkFromDictionary(fc);
1704   }
1705   _bt.verify_single_block((HeapWord*)fc, size);
1706   debug_only(verifyFreeLists());
1707 }
1708 
1709 void
1710 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1711   size_t size = fc->size();
1712   assert_locked();
1713   assert(fc != NULL, "null chunk");
1714   _bt.verify_single_block((HeapWord*)fc, size);
1715   _dictionary->remove_chunk(fc);
1716   // adjust _unallocated_block upward, as necessary
1717   _bt.allocated((HeapWord*)fc, size);
1718 }
1719 
1720 void
1721 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1722   assert_locked();
1723   size_t size = fc->size();
1724   _bt.verify_single_block((HeapWord*)fc, size);
1725   NOT_PRODUCT(
1726     if (FLSVerifyIndexTable) {
1727       verifyIndexedFreeList(size);
1728     }
1729   )
1730   _indexedFreeList[size].remove_chunk(fc);
1731   NOT_PRODUCT(
1732     if (FLSVerifyIndexTable) {
1733       verifyIndexedFreeList(size);
1734     }
1735   )
1736 }
1737 
1738 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1739   /* A hint is the next larger size that has a surplus.
1740      Start search at a size large enough to guarantee that
1741      the excess is >= MIN_CHUNK. */
1742   size_t start = align_object_size(numWords + MinChunkSize);
1743   if (start < IndexSetSize) {
1744     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1745     size_t    hint = _indexedFreeList[start].hint();
1746     while (hint < IndexSetSize) {
1747       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1748       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1749       if (fl->surplus() > 0 && fl->head() != NULL) {
1750         // Found a list with surplus, reset original hint
1751         // and split out a free chunk which is returned.
1752         _indexedFreeList[start].set_hint(hint);
1753         FreeChunk* res = getFromListGreater(fl, numWords);
1754         assert(res == NULL || res->is_free(),
1755           "Should be returning a free chunk");
1756         return res;
1757       }
1758       hint = fl->hint(); /* keep looking */
1759     }
1760     /* None found. */
1761     it[start].set_hint(IndexSetSize);
1762   }
1763   return NULL;
1764 }
1765 
1766 /* Requires fl->size >= numWords + MinChunkSize */
1767 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1768   size_t numWords) {
1769   FreeChunk *curr = fl->head();
1770   size_t oldNumWords = curr->size();
1771   assert(numWords >= MinChunkSize, "Word size is too small");
1772   assert(curr != NULL, "List is empty");
1773   assert(oldNumWords >= numWords + MinChunkSize,
1774         "Size of chunks in the list is too small");
1775 
1776   fl->remove_chunk(curr);
1777   // recorded indirectly by splitChunkAndReturnRemainder -
1778   // smallSplit(oldNumWords, numWords);
1779   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1780   // Does anything have to be done for the remainder in terms of
1781   // fixing the card table?
1782   assert(new_chunk == NULL || new_chunk->is_free(),
1783     "Should be returning a free chunk");
1784   return new_chunk;
1785 }
1786 
1787 FreeChunk*
1788 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1789   size_t new_size) {
1790   assert_locked();
1791   size_t size = chunk->size();
1792   assert(size > new_size, "Split from a smaller block?");
1793   assert(is_aligned(chunk), "alignment problem");
1794   assert(size == adjustObjectSize(size), "alignment problem");
1795   size_t rem_sz = size - new_size;
1796   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1797   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1798   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1799   assert(is_aligned(ffc), "alignment problem");
1800   ffc->set_size(rem_sz);
1801   ffc->link_next(NULL);
1802   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1803   // Above must occur before BOT is updated below.
1804   // adjust block offset table
1805   OrderAccess::storestore();
1806   assert(chunk->is_free() && ffc->is_free(), "Error");
1807   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1808   if (rem_sz < SmallForDictionary) {
1809     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1810     bool is_par = Thread::current()->is_GC_task_thread();
1811     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1812     returnChunkToFreeList(ffc);
1813     split(size, rem_sz);
1814     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1815   } else {
1816     returnChunkToDictionary(ffc);
1817     split(size, rem_sz);
1818   }
1819   chunk->set_size(new_size);
1820   return chunk;
1821 }
1822 
1823 void
1824 CompactibleFreeListSpace::sweep_completed() {
1825   // Now that space is probably plentiful, refill linear
1826   // allocation blocks as needed.
1827   refillLinearAllocBlocksIfNeeded();
1828 }
1829 
1830 void
1831 CompactibleFreeListSpace::gc_prologue() {
1832   assert_locked();
1833   reportFreeListStatistics("Before GC:");
1834   refillLinearAllocBlocksIfNeeded();
1835 }
1836 
1837 void
1838 CompactibleFreeListSpace::gc_epilogue() {
1839   assert_locked();
1840   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1841   _promoInfo.stopTrackingPromotions();
1842   repairLinearAllocationBlocks();
1843   reportFreeListStatistics("After GC:");
1844 }
1845 
1846 // Iteration support, mostly delegated from a CMS generation
1847 
1848 void CompactibleFreeListSpace::save_marks() {
1849   assert(Thread::current()->is_VM_thread(),
1850          "Global variable should only be set when single-threaded");
1851   // Mark the "end" of the used space at the time of this call;
1852   // note, however, that promoted objects from this point
1853   // on are tracked in the _promoInfo below.
1854   set_saved_mark_word(unallocated_block());
1855 #ifdef ASSERT
1856   // Check the sanity of save_marks() etc.
1857   MemRegion ur    = used_region();
1858   MemRegion urasm = used_region_at_save_marks();
1859   assert(ur.contains(urasm),
1860          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1861          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1862          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1863 #endif
1864   // inform allocator that promotions should be tracked.
1865   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1866   _promoInfo.startTrackingPromotions();
1867 }
1868 
1869 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1870   assert(_promoInfo.tracking(), "No preceding save_marks?");
1871   return _promoInfo.noPromotions();
1872 }
1873 
1874 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1875                                                                             \
1876 void CompactibleFreeListSpace::                                             \
1877 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1878   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1879   /*                                                                        \
1880    * This also restores any displaced headers and removes the elements from \
1881    * the iteration set as they are processed, so that we have a clean slate \
1882    * at the end of the iteration. Note, thus, that if new objects are       \
1883    * promoted as a result of the iteration they are iterated over as well.  \
1884    */                                                                       \
1885   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1886 }
1887 
1888 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1889 
1890 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1891   return _smallLinearAllocBlock._word_size == 0;
1892 }
1893 
1894 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1895   // Fix up linear allocation blocks to look like free blocks
1896   repairLinearAllocBlock(&_smallLinearAllocBlock);
1897 }
1898 
1899 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1900   assert_locked();
1901   if (blk->_ptr != NULL) {
1902     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1903            "Minimum block size requirement");
1904     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1905     fc->set_size(blk->_word_size);
1906     fc->link_prev(NULL);   // mark as free
1907     fc->dontCoalesce();
1908     assert(fc->is_free(), "just marked it free");
1909     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1910   }
1911 }
1912 
1913 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1914   assert_locked();
1915   if (_smallLinearAllocBlock._ptr == NULL) {
1916     assert(_smallLinearAllocBlock._word_size == 0,
1917       "Size of linAB should be zero if the ptr is NULL");
1918     // Reset the linAB refill and allocation size limit.
1919     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1920   }
1921   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1922 }
1923 
1924 void
1925 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1926   assert_locked();
1927   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1928          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1929          "blk invariant");
1930   if (blk->_ptr == NULL) {
1931     refillLinearAllocBlock(blk);
1932   }
1933 }
1934 
1935 void
1936 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1937   assert_locked();
1938   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1939          "linear allocation block should be empty");
1940   FreeChunk* fc;
1941   if (blk->_refillSize < SmallForDictionary &&
1942       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1943     // A linAB's strategy might be to use small sizes to reduce
1944     // fragmentation but still get the benefits of allocation from a
1945     // linAB.
1946   } else {
1947     fc = getChunkFromDictionary(blk->_refillSize);
1948   }
1949   if (fc != NULL) {
1950     blk->_ptr  = (HeapWord*)fc;
1951     blk->_word_size = fc->size();
1952     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1953   }
1954 }
1955 
1956 // Support for compaction
1957 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1958   scan_and_forward(this, cp);
1959   // Prepare_for_compaction() uses the space between live objects
1960   // so that later phase can skip dead space quickly.  So verification
1961   // of the free lists doesn't work after.
1962 }
1963 
1964 void CompactibleFreeListSpace::adjust_pointers() {
1965   // In other versions of adjust_pointers(), a bail out
1966   // based on the amount of live data in the generation
1967   // (i.e., if 0, bail out) may be used.
1968   // Cannot test used() == 0 here because the free lists have already
1969   // been mangled by the compaction.
1970 
1971   scan_and_adjust_pointers(this);
1972   // See note about verification in prepare_for_compaction().
1973 }
1974 
1975 void CompactibleFreeListSpace::compact() {
1976   scan_and_compact(this);
1977 }
1978 
1979 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1980 // where fbs is free block sizes
1981 double CompactibleFreeListSpace::flsFrag() const {
1982   size_t itabFree = totalSizeInIndexedFreeLists();
1983   double frag = 0.0;
1984   size_t i;
1985 
1986   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1987     double sz  = i;
1988     frag      += _indexedFreeList[i].count() * (sz * sz);
1989   }
1990 
1991   double totFree = itabFree +
1992                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1993   if (totFree > 0) {
1994     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1995             (totFree * totFree));
1996     frag = (double)1.0  - frag;
1997   } else {
1998     assert(frag == 0.0, "Follows from totFree == 0");
1999   }
2000   return frag;
2001 }
2002 
2003 void CompactibleFreeListSpace::beginSweepFLCensus(
2004   float inter_sweep_current,
2005   float inter_sweep_estimate,
2006   float intra_sweep_estimate) {
2007   assert_locked();
2008   size_t i;
2009   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2010     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2011     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2012     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2013     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2014     fl->set_before_sweep(fl->count());
2015     fl->set_bfr_surp(fl->surplus());
2016   }
2017   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2018                                     inter_sweep_current,
2019                                     inter_sweep_estimate,
2020                                     intra_sweep_estimate);
2021 }
2022 
2023 void CompactibleFreeListSpace::setFLSurplus() {
2024   assert_locked();
2025   size_t i;
2026   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2027     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2028     fl->set_surplus(fl->count() -
2029                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2030   }
2031 }
2032 
2033 void CompactibleFreeListSpace::setFLHints() {
2034   assert_locked();
2035   size_t i;
2036   size_t h = IndexSetSize;
2037   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2038     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2039     fl->set_hint(h);
2040     if (fl->surplus() > 0) {
2041       h = i;
2042     }
2043   }
2044 }
2045 
2046 void CompactibleFreeListSpace::clearFLCensus() {
2047   assert_locked();
2048   size_t i;
2049   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2050     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2051     fl->set_prev_sweep(fl->count());
2052     fl->set_coal_births(0);
2053     fl->set_coal_deaths(0);
2054     fl->set_split_births(0);
2055     fl->set_split_deaths(0);
2056   }
2057 }
2058 
2059 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2060   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2061   setFLSurplus();
2062   setFLHints();
2063   printFLCensus(sweep_count);
2064   clearFLCensus();
2065   assert_locked();
2066   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2067 }
2068 
2069 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2070   if (size < SmallForDictionary) {
2071     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2072     return (fl->coal_desired() < 0) ||
2073            ((int)fl->count() > fl->coal_desired());
2074   } else {
2075     return dictionary()->coal_dict_over_populated(size);
2076   }
2077 }
2078 
2079 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2080   assert(size < SmallForDictionary, "Size too large for indexed list");
2081   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2082   fl->increment_coal_births();
2083   fl->increment_surplus();
2084 }
2085 
2086 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2087   assert(size < SmallForDictionary, "Size too large for indexed list");
2088   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2089   fl->increment_coal_deaths();
2090   fl->decrement_surplus();
2091 }
2092 
2093 void CompactibleFreeListSpace::coalBirth(size_t size) {
2094   if (size  < SmallForDictionary) {
2095     smallCoalBirth(size);
2096   } else {
2097     dictionary()->dict_census_update(size,
2098                                    false /* split */,
2099                                    true /* birth */);
2100   }
2101 }
2102 
2103 void CompactibleFreeListSpace::coalDeath(size_t size) {
2104   if(size  < SmallForDictionary) {
2105     smallCoalDeath(size);
2106   } else {
2107     dictionary()->dict_census_update(size,
2108                                    false /* split */,
2109                                    false /* birth */);
2110   }
2111 }
2112 
2113 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2114   assert(size < SmallForDictionary, "Size too large for indexed list");
2115   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2116   fl->increment_split_births();
2117   fl->increment_surplus();
2118 }
2119 
2120 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2121   assert(size < SmallForDictionary, "Size too large for indexed list");
2122   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2123   fl->increment_split_deaths();
2124   fl->decrement_surplus();
2125 }
2126 
2127 void CompactibleFreeListSpace::split_birth(size_t size) {
2128   if (size  < SmallForDictionary) {
2129     smallSplitBirth(size);
2130   } else {
2131     dictionary()->dict_census_update(size,
2132                                    true /* split */,
2133                                    true /* birth */);
2134   }
2135 }
2136 
2137 void CompactibleFreeListSpace::splitDeath(size_t size) {
2138   if (size  < SmallForDictionary) {
2139     smallSplitDeath(size);
2140   } else {
2141     dictionary()->dict_census_update(size,
2142                                    true /* split */,
2143                                    false /* birth */);
2144   }
2145 }
2146 
2147 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2148   size_t to2 = from - to1;
2149   splitDeath(from);
2150   split_birth(to1);
2151   split_birth(to2);
2152 }
2153 
2154 void CompactibleFreeListSpace::print() const {
2155   print_on(tty);
2156 }
2157 
2158 void CompactibleFreeListSpace::prepare_for_verify() {
2159   assert_locked();
2160   repairLinearAllocationBlocks();
2161   // Verify that the SpoolBlocks look like free blocks of
2162   // appropriate sizes... To be done ...
2163 }
2164 
2165 class VerifyAllBlksClosure: public BlkClosure {
2166  private:
2167   const CompactibleFreeListSpace* _sp;
2168   const MemRegion                 _span;
2169   HeapWord*                       _last_addr;
2170   size_t                          _last_size;
2171   bool                            _last_was_obj;
2172   bool                            _last_was_live;
2173 
2174  public:
2175   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2176     MemRegion span) :  _sp(sp), _span(span),
2177                        _last_addr(NULL), _last_size(0),
2178                        _last_was_obj(false), _last_was_live(false) { }
2179 
2180   virtual size_t do_blk(HeapWord* addr) {
2181     size_t res;
2182     bool   was_obj  = false;
2183     bool   was_live = false;
2184     if (_sp->block_is_obj(addr)) {
2185       was_obj = true;
2186       oop p = oop(addr);
2187       guarantee(p->is_oop(), "Should be an oop");
2188       res = _sp->adjustObjectSize(p->size());
2189       if (_sp->obj_is_alive(addr)) {
2190         was_live = true;
2191         p->verify();
2192       }
2193     } else {
2194       FreeChunk* fc = (FreeChunk*)addr;
2195       res = fc->size();
2196       if (FLSVerifyLists && !fc->cantCoalesce()) {
2197         guarantee(_sp->verify_chunk_in_free_list(fc),
2198                   "Chunk should be on a free list");
2199       }
2200     }
2201     if (res == 0) {
2202       LogHandle(gc, verify) log;
2203       log.error("Livelock: no rank reduction!");
2204       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2205                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2206         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2207         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2208       ResourceMark rm;
2209       _sp->print_on(log.error_stream());
2210       guarantee(false, "Verification failed.");
2211     }
2212     _last_addr = addr;
2213     _last_size = res;
2214     _last_was_obj  = was_obj;
2215     _last_was_live = was_live;
2216     return res;
2217   }
2218 };
2219 
2220 class VerifyAllOopsClosure: public OopClosure {
2221  private:
2222   const CMSCollector*             _collector;
2223   const CompactibleFreeListSpace* _sp;
2224   const MemRegion                 _span;
2225   const bool                      _past_remark;
2226   const CMSBitMap*                _bit_map;
2227 
2228  protected:
2229   void do_oop(void* p, oop obj) {
2230     if (_span.contains(obj)) { // the interior oop points into CMS heap
2231       if (!_span.contains(p)) { // reference from outside CMS heap
2232         // Should be a valid object; the first disjunct below allows
2233         // us to sidestep an assertion in block_is_obj() that insists
2234         // that p be in _sp. Note that several generations (and spaces)
2235         // are spanned by _span (CMS heap) above.
2236         guarantee(!_sp->is_in_reserved(obj) ||
2237                   _sp->block_is_obj((HeapWord*)obj),
2238                   "Should be an object");
2239         guarantee(obj->is_oop(), "Should be an oop");
2240         obj->verify();
2241         if (_past_remark) {
2242           // Remark has been completed, the object should be marked
2243           _bit_map->isMarked((HeapWord*)obj);
2244         }
2245       } else { // reference within CMS heap
2246         if (_past_remark) {
2247           // Remark has been completed -- so the referent should have
2248           // been marked, if referring object is.
2249           if (_bit_map->isMarked(_collector->block_start(p))) {
2250             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2251           }
2252         }
2253       }
2254     } else if (_sp->is_in_reserved(p)) {
2255       // the reference is from FLS, and points out of FLS
2256       guarantee(obj->is_oop(), "Should be an oop");
2257       obj->verify();
2258     }
2259   }
2260 
2261   template <class T> void do_oop_work(T* p) {
2262     T heap_oop = oopDesc::load_heap_oop(p);
2263     if (!oopDesc::is_null(heap_oop)) {
2264       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2265       do_oop(p, obj);
2266     }
2267   }
2268 
2269  public:
2270   VerifyAllOopsClosure(const CMSCollector* collector,
2271     const CompactibleFreeListSpace* sp, MemRegion span,
2272     bool past_remark, CMSBitMap* bit_map) :
2273     _collector(collector), _sp(sp), _span(span),
2274     _past_remark(past_remark), _bit_map(bit_map) { }
2275 
2276   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2277   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2278 };
2279 
2280 void CompactibleFreeListSpace::verify() const {
2281   assert_lock_strong(&_freelistLock);
2282   verify_objects_initialized();
2283   MemRegion span = _collector->_span;
2284   bool past_remark = (_collector->abstract_state() ==
2285                       CMSCollector::Sweeping);
2286 
2287   ResourceMark rm;
2288   HandleMark  hm;
2289 
2290   // Check integrity of CFL data structures
2291   _promoInfo.verify();
2292   _dictionary->verify();
2293   if (FLSVerifyIndexTable) {
2294     verifyIndexedFreeLists();
2295   }
2296   // Check integrity of all objects and free blocks in space
2297   {
2298     VerifyAllBlksClosure cl(this, span);
2299     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2300   }
2301   // Check that all references in the heap to FLS
2302   // are to valid objects in FLS or that references in
2303   // FLS are to valid objects elsewhere in the heap
2304   if (FLSVerifyAllHeapReferences)
2305   {
2306     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2307       _collector->markBitMap());
2308 
2309     // Iterate over all oops in the heap. Uses the _no_header version
2310     // since we are not interested in following the klass pointers.
2311     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2312   }
2313 
2314   if (VerifyObjectStartArray) {
2315     // Verify the block offset table
2316     _bt.verify();
2317   }
2318 }
2319 
2320 #ifndef PRODUCT
2321 void CompactibleFreeListSpace::verifyFreeLists() const {
2322   if (FLSVerifyLists) {
2323     _dictionary->verify();
2324     verifyIndexedFreeLists();
2325   } else {
2326     if (FLSVerifyDictionary) {
2327       _dictionary->verify();
2328     }
2329     if (FLSVerifyIndexTable) {
2330       verifyIndexedFreeLists();
2331     }
2332   }
2333 }
2334 #endif
2335 
2336 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2337   size_t i = 0;
2338   for (; i < IndexSetStart; i++) {
2339     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2340   }
2341   for (; i < IndexSetSize; i++) {
2342     verifyIndexedFreeList(i);
2343   }
2344 }
2345 
2346 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2347   FreeChunk* fc   =  _indexedFreeList[size].head();
2348   FreeChunk* tail =  _indexedFreeList[size].tail();
2349   size_t    num = _indexedFreeList[size].count();
2350   size_t      n = 0;
2351   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2352             "Slot should have been empty");
2353   for (; fc != NULL; fc = fc->next(), n++) {
2354     guarantee(fc->size() == size, "Size inconsistency");
2355     guarantee(fc->is_free(), "!free?");
2356     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2357     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2358   }
2359   guarantee(n == num, "Incorrect count");
2360 }
2361 
2362 #ifndef PRODUCT
2363 void CompactibleFreeListSpace::check_free_list_consistency() const {
2364   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2365     "Some sizes can't be allocated without recourse to"
2366     " linear allocation buffers");
2367   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2368     "else MIN_TREE_CHUNK_SIZE is wrong");
2369   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2370   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2371 }
2372 #endif
2373 
2374 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2375   assert_lock_strong(&_freelistLock);
2376   LogHandle(gc, freelist, census) log;
2377   if (!log.is_debug()) {
2378     return;
2379   }
2380   AdaptiveFreeList<FreeChunk> total;
2381   log.debug("end sweep# " SIZE_FORMAT, sweep_count);
2382   ResourceMark rm;
2383   outputStream* out = log.debug_stream();
2384   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2385   size_t total_free = 0;
2386   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2387     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2388     total_free += fl->count() * fl->size();
2389     if (i % (40*IndexSetStride) == 0) {
2390       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2391     }
2392     fl->print_on(out);
2393     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2394     total.set_surplus(    total.surplus()     + fl->surplus()    );
2395     total.set_desired(    total.desired()     + fl->desired()    );
2396     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2397     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2398     total.set_count(      total.count()       + fl->count()      );
2399     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2400     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2401     total.set_split_births(total.split_births() + fl->split_births());
2402     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2403   }
2404   total.print_on(out, "TOTAL");
2405   log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2406   log.debug("growth: %8.5f  deficit: %8.5f",
2407             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2408                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2409             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2410   _dictionary->print_dict_census(out);
2411 }
2412 
2413 ///////////////////////////////////////////////////////////////////////////
2414 // CompactibleFreeListSpaceLAB
2415 ///////////////////////////////////////////////////////////////////////////
2416 
2417 #define VECTOR_257(x)                                                                                  \
2418   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2419   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2420      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2421      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2422      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2423      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2424      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2425      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2426      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2427      x }
2428 
2429 // Initialize with default setting for CMS, _not_
2430 // generic OldPLABSize, whose static default is different; if overridden at the
2431 // command-line, this will get reinitialized via a call to
2432 // modify_initialization() below.
2433 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2434   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2435 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2436 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2437 
2438 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2439   _cfls(cfls)
2440 {
2441   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2442   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2443        i < CompactibleFreeListSpace::IndexSetSize;
2444        i += CompactibleFreeListSpace::IndexSetStride) {
2445     _indexedFreeList[i].set_size(i);
2446     _num_blocks[i] = 0;
2447   }
2448 }
2449 
2450 static bool _CFLS_LAB_modified = false;
2451 
2452 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2453   assert(!_CFLS_LAB_modified, "Call only once");
2454   _CFLS_LAB_modified = true;
2455   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2456        i < CompactibleFreeListSpace::IndexSetSize;
2457        i += CompactibleFreeListSpace::IndexSetStride) {
2458     _blocks_to_claim[i].modify(n, wt, true /* force */);
2459   }
2460 }
2461 
2462 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2463   FreeChunk* res;
2464   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2465   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2466     // This locking manages sync with other large object allocations.
2467     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2468                     Mutex::_no_safepoint_check_flag);
2469     res = _cfls->getChunkFromDictionaryExact(word_sz);
2470     if (res == NULL) return NULL;
2471   } else {
2472     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2473     if (fl->count() == 0) {
2474       // Attempt to refill this local free list.
2475       get_from_global_pool(word_sz, fl);
2476       // If it didn't work, give up.
2477       if (fl->count() == 0) return NULL;
2478     }
2479     res = fl->get_chunk_at_head();
2480     assert(res != NULL, "Why was count non-zero?");
2481   }
2482   res->markNotFree();
2483   assert(!res->is_free(), "shouldn't be marked free");
2484   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2485   // mangle a just allocated object with a distinct pattern.
2486   debug_only(res->mangleAllocated(word_sz));
2487   return (HeapWord*)res;
2488 }
2489 
2490 // Get a chunk of blocks of the right size and update related
2491 // book-keeping stats
2492 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2493   // Get the #blocks we want to claim
2494   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2495   assert(n_blks > 0, "Error");
2496   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2497   // In some cases, when the application has a phase change,
2498   // there may be a sudden and sharp shift in the object survival
2499   // profile, and updating the counts at the end of a scavenge
2500   // may not be quick enough, giving rise to large scavenge pauses
2501   // during these phase changes. It is beneficial to detect such
2502   // changes on-the-fly during a scavenge and avoid such a phase-change
2503   // pothole. The following code is a heuristic attempt to do that.
2504   // It is protected by a product flag until we have gained
2505   // enough experience with this heuristic and fine-tuned its behavior.
2506   // WARNING: This might increase fragmentation if we overreact to
2507   // small spikes, so some kind of historical smoothing based on
2508   // previous experience with the greater reactivity might be useful.
2509   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2510   // default.
2511   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2512     //
2513     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2514     // which is why there is a cast to double.
2515     //
2516     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2517     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2518     n_blks = MIN2(n_blks, CMSOldPLABMax);
2519   }
2520   assert(n_blks > 0, "Error");
2521   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2522   // Update stats table entry for this block size
2523   _num_blocks[word_sz] += fl->count();
2524 }
2525 
2526 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2527   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2528        i < CompactibleFreeListSpace::IndexSetSize;
2529        i += CompactibleFreeListSpace::IndexSetStride) {
2530     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2531            "Counter inconsistency");
2532     if (_global_num_workers[i] > 0) {
2533       // Need to smooth wrt historical average
2534       if (ResizeOldPLAB) {
2535         _blocks_to_claim[i].sample(
2536           MAX2(CMSOldPLABMin,
2537           MIN2(CMSOldPLABMax,
2538                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2539       }
2540       // Reset counters for next round
2541       _global_num_workers[i] = 0;
2542       _global_num_blocks[i] = 0;
2543       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2544     }
2545   }
2546 }
2547 
2548 // If this is changed in the future to allow parallel
2549 // access, one would need to take the FL locks and,
2550 // depending on how it is used, stagger access from
2551 // parallel threads to reduce contention.
2552 void CompactibleFreeListSpaceLAB::retire(int tid) {
2553   // We run this single threaded with the world stopped;
2554   // so no need for locks and such.
2555   NOT_PRODUCT(Thread* t = Thread::current();)
2556   assert(Thread::current()->is_VM_thread(), "Error");
2557   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2558        i < CompactibleFreeListSpace::IndexSetSize;
2559        i += CompactibleFreeListSpace::IndexSetStride) {
2560     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2561            "Can't retire more than what we obtained");
2562     if (_num_blocks[i] > 0) {
2563       size_t num_retire =  _indexedFreeList[i].count();
2564       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2565       {
2566         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2567         //                Mutex::_no_safepoint_check_flag);
2568 
2569         // Update globals stats for num_blocks used
2570         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2571         _global_num_workers[i]++;
2572         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2573         if (num_retire > 0) {
2574           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2575           // Reset this list.
2576           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2577           _indexedFreeList[i].set_size(i);
2578         }
2579       }
2580       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2581                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2582       // Reset stats for next round
2583       _num_blocks[i]         = 0;
2584     }
2585   }
2586 }
2587 
2588 // Used by par_get_chunk_of_blocks() for the chunks from the
2589 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2590 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2591 // to the free list "fl".  "n" is the maximum number of chunks to
2592 // be added to "fl".
2593 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2594 
2595   // We'll try all multiples of word_sz in the indexed set, starting with
2596   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2597   // then try getting a big chunk and splitting it.
2598   {
2599     bool found;
2600     int  k;
2601     size_t cur_sz;
2602     for (k = 1, cur_sz = k * word_sz, found = false;
2603          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2604          (CMSSplitIndexedFreeListBlocks || k <= 1);
2605          k++, cur_sz = k * word_sz) {
2606       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2607       fl_for_cur_sz.set_size(cur_sz);
2608       {
2609         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2610                         Mutex::_no_safepoint_check_flag);
2611         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2612         if (gfl->count() != 0) {
2613           // nn is the number of chunks of size cur_sz that
2614           // we'd need to split k-ways each, in order to create
2615           // "n" chunks of size word_sz each.
2616           const size_t nn = MAX2(n/k, (size_t)1);
2617           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2618           found = true;
2619           if (k > 1) {
2620             // Update split death stats for the cur_sz-size blocks list:
2621             // we increment the split death count by the number of blocks
2622             // we just took from the cur_sz-size blocks list and which
2623             // we will be splitting below.
2624             ssize_t deaths = gfl->split_deaths() +
2625                              fl_for_cur_sz.count();
2626             gfl->set_split_deaths(deaths);
2627           }
2628         }
2629       }
2630       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2631       if (found) {
2632         if (k == 1) {
2633           fl->prepend(&fl_for_cur_sz);
2634         } else {
2635           // Divide each block on fl_for_cur_sz up k ways.
2636           FreeChunk* fc;
2637           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2638             // Must do this in reverse order, so that anybody attempting to
2639             // access the main chunk sees it as a single free block until we
2640             // change it.
2641             size_t fc_size = fc->size();
2642             assert(fc->is_free(), "Error");
2643             for (int i = k-1; i >= 0; i--) {
2644               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2645               assert((i != 0) ||
2646                         ((fc == ffc) && ffc->is_free() &&
2647                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2648                         "Counting error");
2649               ffc->set_size(word_sz);
2650               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2651               ffc->link_next(NULL);
2652               // Above must occur before BOT is updated below.
2653               OrderAccess::storestore();
2654               // splitting from the right, fc_size == i * word_sz
2655               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2656               fc_size -= word_sz;
2657               assert(fc_size == i*word_sz, "Error");
2658               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2659               _bt.verify_single_block((HeapWord*)fc, fc_size);
2660               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2661               // Push this on "fl".
2662               fl->return_chunk_at_head(ffc);
2663             }
2664             // TRAP
2665             assert(fl->tail()->next() == NULL, "List invariant.");
2666           }
2667         }
2668         // Update birth stats for this block size.
2669         size_t num = fl->count();
2670         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2671                         Mutex::_no_safepoint_check_flag);
2672         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2673         _indexedFreeList[word_sz].set_split_births(births);
2674         return true;
2675       }
2676     }
2677     return found;
2678   }
2679 }
2680 
2681 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2682 
2683   FreeChunk* fc = NULL;
2684   FreeChunk* rem_fc = NULL;
2685   size_t rem;
2686   {
2687     MutexLockerEx x(parDictionaryAllocLock(),
2688                     Mutex::_no_safepoint_check_flag);
2689     while (n > 0) {
2690       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2691                                   FreeBlockDictionary<FreeChunk>::atLeast);
2692       if (fc != NULL) {
2693         break;
2694       } else {
2695         n--;
2696       }
2697     }
2698     if (fc == NULL) return NULL;
2699     // Otherwise, split up that block.
2700     assert((ssize_t)n >= 1, "Control point invariant");
2701     assert(fc->is_free(), "Error: should be a free block");
2702     _bt.verify_single_block((HeapWord*)fc, fc->size());
2703     const size_t nn = fc->size() / word_sz;
2704     n = MIN2(nn, n);
2705     assert((ssize_t)n >= 1, "Control point invariant");
2706     rem = fc->size() - n * word_sz;
2707     // If there is a remainder, and it's too small, allocate one fewer.
2708     if (rem > 0 && rem < MinChunkSize) {
2709       n--; rem += word_sz;
2710     }
2711     // Note that at this point we may have n == 0.
2712     assert((ssize_t)n >= 0, "Control point invariant");
2713 
2714     // If n is 0, the chunk fc that was found is not large
2715     // enough to leave a viable remainder.  We are unable to
2716     // allocate even one block.  Return fc to the
2717     // dictionary and return, leaving "fl" empty.
2718     if (n == 0) {
2719       returnChunkToDictionary(fc);
2720       return NULL;
2721     }
2722 
2723     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2724     dictionary()->dict_census_update(fc->size(),
2725                                      true /*split*/,
2726                                      false /*birth*/);
2727 
2728     // First return the remainder, if any.
2729     // Note that we hold the lock until we decide if we're going to give
2730     // back the remainder to the dictionary, since a concurrent allocation
2731     // may otherwise see the heap as empty.  (We're willing to take that
2732     // hit if the block is a small block.)
2733     if (rem > 0) {
2734       size_t prefix_size = n * word_sz;
2735       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2736       rem_fc->set_size(rem);
2737       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2738       rem_fc->link_next(NULL);
2739       // Above must occur before BOT is updated below.
2740       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2741       OrderAccess::storestore();
2742       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2743       assert(fc->is_free(), "Error");
2744       fc->set_size(prefix_size);
2745       if (rem >= IndexSetSize) {
2746         returnChunkToDictionary(rem_fc);
2747         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2748         rem_fc = NULL;
2749       }
2750       // Otherwise, return it to the small list below.
2751     }
2752   }
2753   if (rem_fc != NULL) {
2754     MutexLockerEx x(_indexedFreeListParLocks[rem],
2755                     Mutex::_no_safepoint_check_flag);
2756     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2757     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2758     smallSplitBirth(rem);
2759   }
2760   assert(n * word_sz == fc->size(),
2761          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2762          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2763          fc->size(), n, word_sz);
2764   return fc;
2765 }
2766 
2767 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2768 
2769   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2770 
2771   if (fc == NULL) {
2772     return;
2773   }
2774 
2775   size_t n = fc->size() / word_sz;
2776 
2777   assert((ssize_t)n > 0, "Consistency");
2778   // Now do the splitting up.
2779   // Must do this in reverse order, so that anybody attempting to
2780   // access the main chunk sees it as a single free block until we
2781   // change it.
2782   size_t fc_size = n * word_sz;
2783   // All but first chunk in this loop
2784   for (ssize_t i = n-1; i > 0; i--) {
2785     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2786     ffc->set_size(word_sz);
2787     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2788     ffc->link_next(NULL);
2789     // Above must occur before BOT is updated below.
2790     OrderAccess::storestore();
2791     // splitting from the right, fc_size == (n - i + 1) * wordsize
2792     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2793     fc_size -= word_sz;
2794     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2795     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2796     _bt.verify_single_block((HeapWord*)fc, fc_size);
2797     // Push this on "fl".
2798     fl->return_chunk_at_head(ffc);
2799   }
2800   // First chunk
2801   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2802   // The blocks above should show their new sizes before the first block below
2803   fc->set_size(word_sz);
2804   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2805   fc->link_next(NULL);
2806   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2807   _bt.verify_single_block((HeapWord*)fc, fc->size());
2808   fl->return_chunk_at_head(fc);
2809 
2810   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2811   {
2812     // Update the stats for this block size.
2813     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2814                     Mutex::_no_safepoint_check_flag);
2815     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2816     _indexedFreeList[word_sz].set_split_births(births);
2817     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2818     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2819   }
2820 
2821   // TRAP
2822   assert(fl->tail()->next() == NULL, "List invariant.");
2823 }
2824 
2825 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2826   assert(fl->count() == 0, "Precondition.");
2827   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2828          "Precondition");
2829 
2830   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2831     // Got it
2832     return;
2833   }
2834 
2835   // Otherwise, we'll split a block from the dictionary.
2836   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2837 }
2838 
2839 // Set up the space's par_seq_tasks structure for work claiming
2840 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2841 // XXX Need to suitably abstract and generalize this and the next
2842 // method into one.
2843 void
2844 CompactibleFreeListSpace::
2845 initialize_sequential_subtasks_for_rescan(int n_threads) {
2846   // The "size" of each task is fixed according to rescan_task_size.
2847   assert(n_threads > 0, "Unexpected n_threads argument");
2848   const size_t task_size = rescan_task_size();
2849   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2850   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2851   assert(n_tasks == 0 ||
2852          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2853           (used_region().start() + n_tasks*task_size >= used_region().end())),
2854          "n_tasks calculation incorrect");
2855   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2856   assert(!pst->valid(), "Clobbering existing data?");
2857   // Sets the condition for completion of the subtask (how many threads
2858   // need to finish in order to be done).
2859   pst->set_n_threads(n_threads);
2860   pst->set_n_tasks((int)n_tasks);
2861 }
2862 
2863 // Set up the space's par_seq_tasks structure for work claiming
2864 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2865 void
2866 CompactibleFreeListSpace::
2867 initialize_sequential_subtasks_for_marking(int n_threads,
2868                                            HeapWord* low) {
2869   // The "size" of each task is fixed according to rescan_task_size.
2870   assert(n_threads > 0, "Unexpected n_threads argument");
2871   const size_t task_size = marking_task_size();
2872   assert(task_size > CardTableModRefBS::card_size_in_words &&
2873          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2874          "Otherwise arithmetic below would be incorrect");
2875   MemRegion span = _old_gen->reserved();
2876   if (low != NULL) {
2877     if (span.contains(low)) {
2878       // Align low down to  a card boundary so that
2879       // we can use block_offset_careful() on span boundaries.
2880       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2881                                  CardTableModRefBS::card_size);
2882       // Clip span prefix at aligned_low
2883       span = span.intersection(MemRegion(aligned_low, span.end()));
2884     } else if (low > span.end()) {
2885       span = MemRegion(low, low);  // Null region
2886     } // else use entire span
2887   }
2888   assert(span.is_empty() ||
2889          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2890         "span should start at a card boundary");
2891   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2892   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2893   assert(n_tasks == 0 ||
2894          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2895           (span.start() + n_tasks*task_size >= span.end())),
2896          "n_tasks calculation incorrect");
2897   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2898   assert(!pst->valid(), "Clobbering existing data?");
2899   // Sets the condition for completion of the subtask (how many threads
2900   // need to finish in order to be done).
2901   pst->set_n_threads(n_threads);
2902   pst->set_n_tasks((int)n_tasks);
2903 }