1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/liveRange.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 223   return adjustObjectSize(oop(addr)->size());
 224 }
 225 
 226 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 227   for (size_t i = 1; i < IndexSetSize; i++) {
 228     assert(_indexedFreeList[i].size() == (size_t) i,
 229       "Indexed free list sizes are incorrect");
 230     _indexedFreeList[i].reset(IndexSetSize);
 231     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 232     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 233     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 235   }
 236 }
 237 
 238 void CompactibleFreeListSpace::reset(MemRegion mr) {
 239   resetIndexedFreeListArray();
 240   dictionary()->reset();
 241   if (BlockOffsetArrayUseUnallocatedBlock) {
 242     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 243     // Everything's allocated until proven otherwise.
 244     _bt.set_unallocated_block(end());
 245   }
 246   if (!mr.is_empty()) {
 247     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 248     _bt.single_block(mr.start(), mr.word_size());
 249     FreeChunk* fc = (FreeChunk*) mr.start();
 250     fc->set_size(mr.word_size());
 251     if (mr.word_size() >= IndexSetSize ) {
 252       returnChunkToDictionary(fc);
 253     } else {
 254       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 255       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 256     }
 257     coalBirth(mr.word_size());
 258   }
 259   _promoInfo.reset();
 260   _smallLinearAllocBlock._ptr = NULL;
 261   _smallLinearAllocBlock._word_size = 0;
 262 }
 263 
 264 void CompactibleFreeListSpace::reset_after_compaction() {
 265   // Reset the space to the new reality - one free chunk.
 266   MemRegion mr(compaction_top(), end());
 267   reset(mr);
 268   // Now refill the linear allocation block(s) if possible.
 269   refillLinearAllocBlocksIfNeeded();
 270 }
 271 
 272 // Walks the entire dictionary, returning a coterminal
 273 // chunk, if it exists. Use with caution since it involves
 274 // a potentially complete walk of a potentially large tree.
 275 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 276 
 277   assert_lock_strong(&_freelistLock);
 278 
 279   return dictionary()->find_chunk_ends_at(end());
 280 }
 281 
 282 
 283 #ifndef PRODUCT
 284 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 285   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 286     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 287   }
 288 }
 289 
 290 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 291   size_t sum = 0;
 292   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 293     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 294   }
 295   return sum;
 296 }
 297 
 298 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 299   size_t count = 0;
 300   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 301     debug_only(
 302       ssize_t total_list_count = 0;
 303       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 304          fc = fc->next()) {
 305         total_list_count++;
 306       }
 307       assert(total_list_count ==  _indexedFreeList[i].count(),
 308         "Count in list is incorrect");
 309     )
 310     count += _indexedFreeList[i].count();
 311   }
 312   return count;
 313 }
 314 
 315 size_t CompactibleFreeListSpace::totalCount() {
 316   size_t num = totalCountInIndexedFreeLists();
 317   num +=  dictionary()->total_count();
 318   if (_smallLinearAllocBlock._word_size != 0) {
 319     num++;
 320   }
 321   return num;
 322 }
 323 #endif
 324 
 325 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 326   FreeChunk* fc = (FreeChunk*) p;
 327   return fc->is_free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::used() const {
 331   return capacity() - free();
 332 }
 333 
 334 size_t CompactibleFreeListSpace::free() const {
 335   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 336   // if you do this while the structures are in flux you
 337   // may get an approximate answer only; for instance
 338   // because there is concurrent allocation either
 339   // directly by mutators or for promotion during a GC.
 340   // It's "MT-safe", however, in the sense that you are guaranteed
 341   // not to crash and burn, for instance, because of walking
 342   // pointers that could disappear as you were walking them.
 343   // The approximation is because the various components
 344   // that are read below are not read atomically (and
 345   // further the computation of totalSizeInIndexedFreeLists()
 346   // is itself a non-atomic computation. The normal use of
 347   // this is during a resize operation at the end of GC
 348   // and at that time you are guaranteed to get the
 349   // correct actual value. However, for instance, this is
 350   // also read completely asynchronously by the "perf-sampler"
 351   // that supports jvmstat, and you are apt to see the values
 352   // flicker in such cases.
 353   assert(_dictionary != NULL, "No _dictionary?");
 354   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 355           totalSizeInIndexedFreeLists() +
 356           _smallLinearAllocBlock._word_size) * HeapWordSize;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 360   assert(_dictionary != NULL, "No _dictionary?");
 361   assert_locked();
 362   size_t res = _dictionary->max_chunk_size();
 363   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 364                        (size_t) SmallForLinearAlloc - 1));
 365   // XXX the following could potentially be pretty slow;
 366   // should one, pessimistically for the rare cases when res
 367   // calculated above is less than IndexSetSize,
 368   // just return res calculated above? My reasoning was that
 369   // those cases will be so rare that the extra time spent doesn't
 370   // really matter....
 371   // Note: do not change the loop test i >= res + IndexSetStride
 372   // to i > res below, because i is unsigned and res may be zero.
 373   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 374        i -= IndexSetStride) {
 375     if (_indexedFreeList[i].head() != NULL) {
 376       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 377       return i;
 378     }
 379   }
 380   return res;
 381 }
 382 
 383 void LinearAllocBlock::print_on(outputStream* st) const {
 384   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 385             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 386             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 387 }
 388 
 389 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 390   st->print_cr("COMPACTIBLE FREELIST SPACE");
 391   st->print_cr(" Space:");
 392   Space::print_on(st);
 393 
 394   st->print_cr("promoInfo:");
 395   _promoInfo.print_on(st);
 396 
 397   st->print_cr("_smallLinearAllocBlock");
 398   _smallLinearAllocBlock.print_on(st);
 399 
 400   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 401 
 402   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 403 }
 404 
 405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 406 const {
 407   reportIndexedFreeListStatistics(st);
 408   st->print_cr("Layout of Indexed Freelists");
 409   st->print_cr("---------------------------");
 410   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 411   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 412     _indexedFreeList[i].print_on(st);
 413     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 414       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 415                    p2i(fc), p2i((HeapWord*)fc + i),
 416                    fc->cantCoalesce() ? "\t CC" : "");
 417     }
 418   }
 419 }
 420 
 421 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 422 const {
 423   _promoInfo.print_on(st);
 424 }
 425 
 426 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 427 const {
 428   _dictionary->report_statistics(st);
 429   st->print_cr("Layout of Freelists in Tree");
 430   st->print_cr("---------------------------");
 431   _dictionary->print_free_lists(st);
 432 }
 433 
 434 class BlkPrintingClosure: public BlkClosure {
 435   const CMSCollector*             _collector;
 436   const CompactibleFreeListSpace* _sp;
 437   const CMSBitMap*                _live_bit_map;
 438   const bool                      _post_remark;
 439   outputStream*                   _st;
 440 public:
 441   BlkPrintingClosure(const CMSCollector* collector,
 442                      const CompactibleFreeListSpace* sp,
 443                      const CMSBitMap* live_bit_map,
 444                      outputStream* st):
 445     _collector(collector),
 446     _sp(sp),
 447     _live_bit_map(live_bit_map),
 448     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 449     _st(st) { }
 450   size_t do_blk(HeapWord* addr);
 451 };
 452 
 453 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 454   size_t sz = _sp->block_size_no_stall(addr, _collector);
 455   assert(sz != 0, "Should always be able to compute a size");
 456   if (_sp->block_is_obj(addr)) {
 457     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 458     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 459       p2i(addr),
 460       dead ? "dead" : "live",
 461       sz,
 462       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 463     if (CMSPrintObjectsInDump && !dead) {
 464       oop(addr)->print_on(_st);
 465       _st->print_cr("--------------------------------------");
 466     }
 467   } else { // free block
 468     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 469       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 470     if (CMSPrintChunksInDump) {
 471       ((FreeChunk*)addr)->print_on(_st);
 472       _st->print_cr("--------------------------------------");
 473     }
 474   }
 475   return sz;
 476 }
 477 
 478 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 479   st->print_cr("=========================");
 480   st->print_cr("Block layout in CMS Heap:");
 481   st->print_cr("=========================");
 482   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 483   blk_iterate(&bpcl);
 484 
 485   st->print_cr("=======================================");
 486   st->print_cr("Order & Layout of Promotion Info Blocks");
 487   st->print_cr("=======================================");
 488   print_promo_info_blocks(st);
 489 
 490   st->print_cr("===========================");
 491   st->print_cr("Order of Indexed Free Lists");
 492   st->print_cr("=========================");
 493   print_indexed_free_lists(st);
 494 
 495   st->print_cr("=================================");
 496   st->print_cr("Order of Free Lists in Dictionary");
 497   st->print_cr("=================================");
 498   print_dictionary_free_lists(st);
 499 }
 500 
 501 
 502 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 503   assert_lock_strong(&_freelistLock);
 504   LogHandle(gc, freelist, stats) log;
 505   if (!log.is_debug()) {
 506     return;
 507   }
 508   log.debug("%s", title);
 509   _dictionary->report_statistics(log.debug_stream());
 510   if (log.is_trace()) {
 511     ResourceMark rm;
 512     reportIndexedFreeListStatistics(log.trace_stream());
 513     size_t total_size = totalSizeInIndexedFreeLists() +
 514                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 515     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 516   }
 517 }
 518 
 519 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 520   assert_lock_strong(&_freelistLock);
 521   st->print_cr("Statistics for IndexedFreeLists:");
 522   st->print_cr("--------------------------------");
 523   size_t total_size = totalSizeInIndexedFreeLists();
 524   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 525   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 526   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 527   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 528   if (free_blocks != 0) {
 529     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 530   }
 531 }
 532 
 533 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 534   size_t res = 0;
 535   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 536     debug_only(
 537       ssize_t recount = 0;
 538       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 539          fc = fc->next()) {
 540         recount += 1;
 541       }
 542       assert(recount == _indexedFreeList[i].count(),
 543         "Incorrect count in list");
 544     )
 545     res += _indexedFreeList[i].count();
 546   }
 547   return res;
 548 }
 549 
 550 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 551   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 552     if (_indexedFreeList[i].head() != NULL) {
 553       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 554       return (size_t)i;
 555     }
 556   }
 557   return 0;
 558 }
 559 
 560 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 561   HeapWord* prevEnd = end();
 562   assert(prevEnd != value, "unnecessary set_end call");
 563   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 564         "New end is below unallocated block");
 565   _end = value;
 566   if (prevEnd != NULL) {
 567     // Resize the underlying block offset table.
 568     _bt.resize(pointer_delta(value, bottom()));
 569     if (value <= prevEnd) {
 570       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 571              "New end is below unallocated block");
 572     } else {
 573       // Now, take this new chunk and add it to the free blocks.
 574       // Note that the BOT has not yet been updated for this block.
 575       size_t newFcSize = pointer_delta(value, prevEnd);
 576       // Add the block to the free lists, if possible coalescing it
 577       // with the last free block, and update the BOT and census data.
 578       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 579     }
 580   }
 581 }
 582 
 583 class FreeListSpaceDCTOC : public FilteringDCTOC {
 584   CompactibleFreeListSpace* _cfls;
 585   CMSCollector* _collector;
 586   bool _parallel;
 587 protected:
 588   // Override.
 589 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 590   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 591                                        HeapWord* bottom, HeapWord* top, \
 592                                        ClosureType* cl);                \
 593       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 594                                        HeapWord* bottom, HeapWord* top, \
 595                                        ClosureType* cl);                \
 596     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 597                                        HeapWord* bottom, HeapWord* top, \
 598                                        ClosureType* cl)
 599   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 600   walk_mem_region_with_cl_DECL(FilteringClosure);
 601 
 602 public:
 603   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 604                      CMSCollector* collector,
 605                      ExtendedOopClosure* cl,
 606                      CardTableModRefBS::PrecisionStyle precision,
 607                      HeapWord* boundary,
 608                      bool parallel) :
 609     FilteringDCTOC(sp, cl, precision, boundary),
 610     _cfls(sp), _collector(collector), _parallel(parallel) {}
 611 };
 612 
 613 // We de-virtualize the block-related calls below, since we know that our
 614 // space is a CompactibleFreeListSpace.
 615 
 616 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 617 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 618                                                  HeapWord* bottom,              \
 619                                                  HeapWord* top,                 \
 620                                                  ClosureType* cl) {             \
 621    if (_parallel) {                                                             \
 622      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 623    } else {                                                                     \
 624      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 625    }                                                                            \
 626 }                                                                               \
 627 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 628                                                      HeapWord* bottom,          \
 629                                                      HeapWord* top,             \
 630                                                      ClosureType* cl) {         \
 631   /* Skip parts that are before "mr", in case "block_start" sent us             \
 632      back too far. */                                                           \
 633   HeapWord* mr_start = mr.start();                                              \
 634   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 635   HeapWord* next = bottom + bot_size;                                           \
 636   while (next < mr_start) {                                                     \
 637     bottom = next;                                                              \
 638     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 639     next = bottom + bot_size;                                                   \
 640   }                                                                             \
 641                                                                                 \
 642   while (bottom < top) {                                                        \
 643     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 644         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 645                     oop(bottom)) &&                                             \
 646         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 647       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 648       bottom += _cfls->adjustObjectSize(word_sz);                               \
 649     } else {                                                                    \
 650       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 651     }                                                                           \
 652   }                                                                             \
 653 }                                                                               \
 654 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 655                                                        HeapWord* bottom,        \
 656                                                        HeapWord* top,           \
 657                                                        ClosureType* cl) {       \
 658   /* Skip parts that are before "mr", in case "block_start" sent us             \
 659      back too far. */                                                           \
 660   HeapWord* mr_start = mr.start();                                              \
 661   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 662   HeapWord* next = bottom + bot_size;                                           \
 663   while (next < mr_start) {                                                     \
 664     bottom = next;                                                              \
 665     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 666     next = bottom + bot_size;                                                   \
 667   }                                                                             \
 668                                                                                 \
 669   while (bottom < top) {                                                        \
 670     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 671         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 672                     oop(bottom)) &&                                             \
 673         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 674       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 675       bottom += _cfls->adjustObjectSize(word_sz);                               \
 676     } else {                                                                    \
 677       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 678     }                                                                           \
 679   }                                                                             \
 680 }
 681 
 682 // (There are only two of these, rather than N, because the split is due
 683 // only to the introduction of the FilteringClosure, a local part of the
 684 // impl of this abstraction.)
 685 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 686 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 687 
 688 DirtyCardToOopClosure*
 689 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 690                                       CardTableModRefBS::PrecisionStyle precision,
 691                                       HeapWord* boundary,
 692                                       bool parallel) {
 693   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 694 }
 695 
 696 
 697 // Note on locking for the space iteration functions:
 698 // since the collector's iteration activities are concurrent with
 699 // allocation activities by mutators, absent a suitable mutual exclusion
 700 // mechanism the iterators may go awry. For instance a block being iterated
 701 // may suddenly be allocated or divided up and part of it allocated and
 702 // so on.
 703 
 704 // Apply the given closure to each block in the space.
 705 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 706   assert_lock_strong(freelistLock());
 707   HeapWord *cur, *limit;
 708   for (cur = bottom(), limit = end(); cur < limit;
 709        cur += cl->do_blk_careful(cur));
 710 }
 711 
 712 // Apply the given closure to each block in the space.
 713 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 714   assert_lock_strong(freelistLock());
 715   HeapWord *cur, *limit;
 716   for (cur = bottom(), limit = end(); cur < limit;
 717        cur += cl->do_blk(cur));
 718 }
 719 
 720 // Apply the given closure to each oop in the space.
 721 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 722   assert_lock_strong(freelistLock());
 723   HeapWord *cur, *limit;
 724   size_t curSize;
 725   for (cur = bottom(), limit = end(); cur < limit;
 726        cur += curSize) {
 727     curSize = block_size(cur);
 728     if (block_is_obj(cur)) {
 729       oop(cur)->oop_iterate(cl);
 730     }
 731   }
 732 }
 733 
 734 // NOTE: In the following methods, in order to safely be able to
 735 // apply the closure to an object, we need to be sure that the
 736 // object has been initialized. We are guaranteed that an object
 737 // is initialized if we are holding the Heap_lock with the
 738 // world stopped.
 739 void CompactibleFreeListSpace::verify_objects_initialized() const {
 740   if (is_init_completed()) {
 741     assert_locked_or_safepoint(Heap_lock);
 742     if (Universe::is_fully_initialized()) {
 743       guarantee(SafepointSynchronize::is_at_safepoint(),
 744                 "Required for objects to be initialized");
 745     }
 746   } // else make a concession at vm start-up
 747 }
 748 
 749 // Apply the given closure to each object in the space
 750 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 751   assert_lock_strong(freelistLock());
 752   NOT_PRODUCT(verify_objects_initialized());
 753   HeapWord *cur, *limit;
 754   size_t curSize;
 755   for (cur = bottom(), limit = end(); cur < limit;
 756        cur += curSize) {
 757     curSize = block_size(cur);
 758     if (block_is_obj(cur)) {
 759       blk->do_object(oop(cur));
 760     }
 761   }
 762 }
 763 
 764 // Apply the given closure to each live object in the space
 765 //   The usage of CompactibleFreeListSpace
 766 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 767 // objects in the space with references to objects that are no longer
 768 // valid.  For example, an object may reference another object
 769 // that has already been sweep up (collected).  This method uses
 770 // obj_is_alive() to determine whether it is safe to apply the closure to
 771 // an object.  See obj_is_alive() for details on how liveness of an
 772 // object is decided.
 773 
 774 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 775   assert_lock_strong(freelistLock());
 776   NOT_PRODUCT(verify_objects_initialized());
 777   HeapWord *cur, *limit;
 778   size_t curSize;
 779   for (cur = bottom(), limit = end(); cur < limit;
 780        cur += curSize) {
 781     curSize = block_size(cur);
 782     if (block_is_obj(cur) && obj_is_alive(cur)) {
 783       blk->do_object(oop(cur));
 784     }
 785   }
 786 }
 787 
 788 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 789                                                   UpwardsObjectClosure* cl) {
 790   assert_locked(freelistLock());
 791   NOT_PRODUCT(verify_objects_initialized());
 792   assert(!mr.is_empty(), "Should be non-empty");
 793   // We use MemRegion(bottom(), end()) rather than used_region() below
 794   // because the two are not necessarily equal for some kinds of
 795   // spaces, in particular, certain kinds of free list spaces.
 796   // We could use the more complicated but more precise:
 797   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 798   // but the slight imprecision seems acceptable in the assertion check.
 799   assert(MemRegion(bottom(), end()).contains(mr),
 800          "Should be within used space");
 801   HeapWord* prev = cl->previous();   // max address from last time
 802   if (prev >= mr.end()) { // nothing to do
 803     return;
 804   }
 805   // This assert will not work when we go from cms space to perm
 806   // space, and use same closure. Easy fix deferred for later. XXX YSR
 807   // assert(prev == NULL || contains(prev), "Should be within space");
 808 
 809   bool last_was_obj_array = false;
 810   HeapWord *blk_start_addr, *region_start_addr;
 811   if (prev > mr.start()) {
 812     region_start_addr = prev;
 813     blk_start_addr    = prev;
 814     // The previous invocation may have pushed "prev" beyond the
 815     // last allocated block yet there may be still be blocks
 816     // in this region due to a particular coalescing policy.
 817     // Relax the assertion so that the case where the unallocated
 818     // block is maintained and "prev" is beyond the unallocated
 819     // block does not cause the assertion to fire.
 820     assert((BlockOffsetArrayUseUnallocatedBlock &&
 821             (!is_in(prev))) ||
 822            (blk_start_addr == block_start(region_start_addr)), "invariant");
 823   } else {
 824     region_start_addr = mr.start();
 825     blk_start_addr    = block_start(region_start_addr);
 826   }
 827   HeapWord* region_end_addr = mr.end();
 828   MemRegion derived_mr(region_start_addr, region_end_addr);
 829   while (blk_start_addr < region_end_addr) {
 830     const size_t size = block_size(blk_start_addr);
 831     if (block_is_obj(blk_start_addr)) {
 832       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 833     } else {
 834       last_was_obj_array = false;
 835     }
 836     blk_start_addr += size;
 837   }
 838   if (!last_was_obj_array) {
 839     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 840            "Should be within (closed) used space");
 841     assert(blk_start_addr > prev, "Invariant");
 842     cl->set_previous(blk_start_addr); // min address for next time
 843   }
 844 }
 845 
 846 // Callers of this iterator beware: The closure application should
 847 // be robust in the face of uninitialized objects and should (always)
 848 // return a correct size so that the next addr + size below gives us a
 849 // valid block boundary. [See for instance,
 850 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 851 // in ConcurrentMarkSweepGeneration.cpp.]
 852 HeapWord*
 853 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 854   ObjectClosureCareful* cl) {
 855   assert_lock_strong(freelistLock());
 856   // Can't use used_region() below because it may not necessarily
 857   // be the same as [bottom(),end()); although we could
 858   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 859   // that appears too cumbersome, so we just do the simpler check
 860   // in the assertion below.
 861   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 862          "mr should be non-empty and within used space");
 863   HeapWord *addr, *end;
 864   size_t size;
 865   for (addr = block_start_careful(mr.start()), end  = mr.end();
 866        addr < end; addr += size) {
 867     FreeChunk* fc = (FreeChunk*)addr;
 868     if (fc->is_free()) {
 869       // Since we hold the free list lock, which protects direct
 870       // allocation in this generation by mutators, a free object
 871       // will remain free throughout this iteration code.
 872       size = fc->size();
 873     } else {
 874       // Note that the object need not necessarily be initialized,
 875       // because (for instance) the free list lock does NOT protect
 876       // object initialization. The closure application below must
 877       // therefore be correct in the face of uninitialized objects.
 878       size = cl->do_object_careful_m(oop(addr), mr);
 879       if (size == 0) {
 880         // An unparsable object found. Signal early termination.
 881         return addr;
 882       }
 883     }
 884   }
 885   return NULL;
 886 }
 887 
 888 
 889 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 890   NOT_PRODUCT(verify_objects_initialized());
 891   return _bt.block_start(p);
 892 }
 893 
 894 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 895   return _bt.block_start_careful(p);
 896 }
 897 
 898 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 899   NOT_PRODUCT(verify_objects_initialized());
 900   // This must be volatile, or else there is a danger that the compiler
 901   // will compile the code below into a sometimes-infinite loop, by keeping
 902   // the value read the first time in a register.
 903   while (true) {
 904     // We must do this until we get a consistent view of the object.
 905     if (FreeChunk::indicatesFreeChunk(p)) {
 906       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 907       size_t res = fc->size();
 908 
 909       // Bugfix for systems with weak memory model (PPC64/IA64). The
 910       // block's free bit was set and we have read the size of the
 911       // block. Acquire and check the free bit again. If the block is
 912       // still free, the read size is correct.
 913       OrderAccess::acquire();
 914 
 915       // If the object is still a free chunk, return the size, else it
 916       // has been allocated so try again.
 917       if (FreeChunk::indicatesFreeChunk(p)) {
 918         assert(res != 0, "Block size should not be 0");
 919         return res;
 920       }
 921     } else {
 922       // must read from what 'p' points to in each loop.
 923       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 924       if (k != NULL) {
 925         assert(k->is_klass(), "Should really be klass oop.");
 926         oop o = (oop)p;
 927         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 928 
 929         // Bugfix for systems with weak memory model (PPC64/IA64).
 930         // The object o may be an array. Acquire to make sure that the array
 931         // size (third word) is consistent.
 932         OrderAccess::acquire();
 933 
 934         size_t res = o->size_given_klass(k);
 935         res = adjustObjectSize(res);
 936         assert(res != 0, "Block size should not be 0");
 937         return res;
 938       }
 939     }
 940   }
 941 }
 942 
 943 // TODO: Now that is_parsable is gone, we should combine these two functions.
 944 // A variant of the above that uses the Printezis bits for
 945 // unparsable but allocated objects. This avoids any possible
 946 // stalls waiting for mutators to initialize objects, and is
 947 // thus potentially faster than the variant above. However,
 948 // this variant may return a zero size for a block that is
 949 // under mutation and for which a consistent size cannot be
 950 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 951 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 952                                                      const CMSCollector* c)
 953 const {
 954   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 955   // This must be volatile, or else there is a danger that the compiler
 956   // will compile the code below into a sometimes-infinite loop, by keeping
 957   // the value read the first time in a register.
 958   DEBUG_ONLY(uint loops = 0;)
 959   while (true) {
 960     // We must do this until we get a consistent view of the object.
 961     if (FreeChunk::indicatesFreeChunk(p)) {
 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // free bit of the block was set and we have read the size of
 967       // the block. Acquire and check the free bit again. If the
 968       // block is still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       if (FreeChunk::indicatesFreeChunk(p)) {
 972         assert(res != 0, "Block size should not be 0");
 973         assert(loops == 0, "Should be 0");
 974         return res;
 975       }
 976     } else {
 977       // must read from what 'p' points to in each loop.
 978       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 979       // We trust the size of any object that has a non-NULL
 980       // klass and (for those in the perm gen) is parsable
 981       // -- irrespective of its conc_safe-ty.
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(), "Should be an oop");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       } else {
 997         // May return 0 if P-bits not present.
 998         return c->block_size_if_printezis_bits(p);
 999       }
1000     }
1001     assert(loops == 0, "Can loop at most once");
1002     DEBUG_ONLY(loops++;)
1003   }
1004 }
1005 
1006 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1007   NOT_PRODUCT(verify_objects_initialized());
1008   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1009   FreeChunk* fc = (FreeChunk*)p;
1010   if (fc->is_free()) {
1011     return fc->size();
1012   } else {
1013     // Ignore mark word because this may be a recently promoted
1014     // object whose mark word is used to chain together grey
1015     // objects (the last one would have a null value).
1016     assert(oop(p)->is_oop(true), "Should be an oop");
1017     return adjustObjectSize(oop(p)->size());
1018   }
1019 }
1020 
1021 // This implementation assumes that the property of "being an object" is
1022 // stable.  But being a free chunk may not be (because of parallel
1023 // promotion.)
1024 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1025   FreeChunk* fc = (FreeChunk*)p;
1026   assert(is_in_reserved(p), "Should be in space");
1027   if (FreeChunk::indicatesFreeChunk(p)) return false;
1028   Klass* k = oop(p)->klass_or_null();
1029   if (k != NULL) {
1030     // Ignore mark word because it may have been used to
1031     // chain together promoted objects (the last one
1032     // would have a null value).
1033     assert(oop(p)->is_oop(true), "Should be an oop");
1034     return true;
1035   } else {
1036     return false;  // Was not an object at the start of collection.
1037   }
1038 }
1039 
1040 // Check if the object is alive. This fact is checked either by consulting
1041 // the main marking bitmap in the sweeping phase or, if it's a permanent
1042 // generation and we're not in the sweeping phase, by checking the
1043 // perm_gen_verify_bit_map where we store the "deadness" information if
1044 // we did not sweep the perm gen in the most recent previous GC cycle.
1045 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1046   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1047          "Else races are possible");
1048   assert(block_is_obj(p), "The address should point to an object");
1049 
1050   // If we're sweeping, we use object liveness information from the main bit map
1051   // for both perm gen and old gen.
1052   // We don't need to lock the bitmap (live_map or dead_map below), because
1053   // EITHER we are in the middle of the sweeping phase, and the
1054   // main marking bit map (live_map below) is locked,
1055   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1056   // is stable, because it's mutated only in the sweeping phase.
1057   // NOTE: This method is also used by jmap where, if class unloading is
1058   // off, the results can return "false" for legitimate perm objects,
1059   // when we are not in the midst of a sweeping phase, which can result
1060   // in jmap not reporting certain perm gen objects. This will be moot
1061   // if/when the perm gen goes away in the future.
1062   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1063     CMSBitMap* live_map = _collector->markBitMap();
1064     return live_map->par_isMarked((HeapWord*) p);
1065   }
1066   return true;
1067 }
1068 
1069 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1070   FreeChunk* fc = (FreeChunk*)p;
1071   assert(is_in_reserved(p), "Should be in space");
1072   assert(_bt.block_start(p) == p, "Should be a block boundary");
1073   if (!fc->is_free()) {
1074     // Ignore mark word because it may have been used to
1075     // chain together promoted objects (the last one
1076     // would have a null value).
1077     assert(oop(p)->is_oop(true), "Should be an oop");
1078     return true;
1079   }
1080   return false;
1081 }
1082 
1083 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1084 // approximate answer if you don't hold the freelistlock when you call this.
1085 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1086   size_t size = 0;
1087   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1088     debug_only(
1089       // We may be calling here without the lock in which case we
1090       // won't do this modest sanity check.
1091       if (freelistLock()->owned_by_self()) {
1092         size_t total_list_size = 0;
1093         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1094           fc = fc->next()) {
1095           total_list_size += i;
1096         }
1097         assert(total_list_size == i * _indexedFreeList[i].count(),
1098                "Count in list is incorrect");
1099       }
1100     )
1101     size += i * _indexedFreeList[i].count();
1102   }
1103   return size;
1104 }
1105 
1106 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1107   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1108   return allocate(size);
1109 }
1110 
1111 HeapWord*
1112 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1113   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1114 }
1115 
1116 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1117   assert_lock_strong(freelistLock());
1118   HeapWord* res = NULL;
1119   assert(size == adjustObjectSize(size),
1120          "use adjustObjectSize() before calling into allocate()");
1121 
1122   res = allocate_adaptive_freelists(size);
1123 
1124   if (res != NULL) {
1125     // check that res does lie in this space!
1126     assert(is_in_reserved(res), "Not in this space!");
1127     assert(is_aligned((void*)res), "alignment check");
1128 
1129     FreeChunk* fc = (FreeChunk*)res;
1130     fc->markNotFree();
1131     assert(!fc->is_free(), "shouldn't be marked free");
1132     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1133     // Verify that the block offset table shows this to
1134     // be a single block, but not one which is unallocated.
1135     _bt.verify_single_block(res, size);
1136     _bt.verify_not_unallocated(res, size);
1137     // mangle a just allocated object with a distinct pattern.
1138     debug_only(fc->mangleAllocated(size));
1139   }
1140 
1141   return res;
1142 }
1143 
1144 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1145   assert_lock_strong(freelistLock());
1146   HeapWord* res = NULL;
1147   assert(size == adjustObjectSize(size),
1148          "use adjustObjectSize() before calling into allocate()");
1149 
1150   // Strategy
1151   //   if small
1152   //     exact size from small object indexed list if small
1153   //     small or large linear allocation block (linAB) as appropriate
1154   //     take from lists of greater sized chunks
1155   //   else
1156   //     dictionary
1157   //     small or large linear allocation block if it has the space
1158   // Try allocating exact size from indexTable first
1159   if (size < IndexSetSize) {
1160     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1161     if(res != NULL) {
1162       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1163         "Not removed from free list");
1164       // no block offset table adjustment is necessary on blocks in
1165       // the indexed lists.
1166 
1167     // Try allocating from the small LinAB
1168     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1169         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1170         // if successful, the above also adjusts block offset table
1171         // Note that this call will refill the LinAB to
1172         // satisfy the request.  This is different that
1173         // evm.
1174         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1175     } else {
1176       // Raid the exact free lists larger than size, even if they are not
1177       // overpopulated.
1178       res = (HeapWord*) getChunkFromGreater(size);
1179     }
1180   } else {
1181     // Big objects get allocated directly from the dictionary.
1182     res = (HeapWord*) getChunkFromDictionaryExact(size);
1183     if (res == NULL) {
1184       // Try hard not to fail since an allocation failure will likely
1185       // trigger a synchronous GC.  Try to get the space from the
1186       // allocation blocks.
1187       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1188     }
1189   }
1190 
1191   return res;
1192 }
1193 
1194 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1195 // when promoting obj.
1196 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1197   // Depending on the object size, expansion may require refilling either a
1198   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1199   // is added because the dictionary may over-allocate to avoid fragmentation.
1200   size_t space = obj_size;
1201   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1202   return space;
1203 }
1204 
1205 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1206   FreeChunk* ret;
1207 
1208   assert(numWords >= MinChunkSize, "Size is less than minimum");
1209   assert(linearAllocationWouldFail() || bestFitFirst(),
1210     "Should not be here");
1211 
1212   size_t i;
1213   size_t currSize = numWords + MinChunkSize;
1214   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1215   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1216     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1217     if (fl->head()) {
1218       ret = getFromListGreater(fl, numWords);
1219       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1220       return ret;
1221     }
1222   }
1223 
1224   currSize = MAX2((size_t)SmallForDictionary,
1225                   (size_t)(numWords + MinChunkSize));
1226 
1227   /* Try to get a chunk that satisfies request, while avoiding
1228      fragmentation that can't be handled. */
1229   {
1230     ret =  dictionary()->get_chunk(currSize);
1231     if (ret != NULL) {
1232       assert(ret->size() - numWords >= MinChunkSize,
1233              "Chunk is too small");
1234       _bt.allocated((HeapWord*)ret, ret->size());
1235       /* Carve returned chunk. */
1236       (void) splitChunkAndReturnRemainder(ret, numWords);
1237       /* Label this as no longer a free chunk. */
1238       assert(ret->is_free(), "This chunk should be free");
1239       ret->link_prev(NULL);
1240     }
1241     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1242     return ret;
1243   }
1244   ShouldNotReachHere();
1245 }
1246 
1247 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1248   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1249   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1250 }
1251 
1252 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1253   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1254          (_smallLinearAllocBlock._word_size == fc->size()),
1255          "Linear allocation block shows incorrect size");
1256   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1257           (_smallLinearAllocBlock._word_size == fc->size()));
1258 }
1259 
1260 // Check if the purported free chunk is present either as a linear
1261 // allocation block, the size-indexed table of (smaller) free blocks,
1262 // or the larger free blocks kept in the binary tree dictionary.
1263 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1264   if (verify_chunk_is_linear_alloc_block(fc)) {
1265     return true;
1266   } else if (fc->size() < IndexSetSize) {
1267     return verifyChunkInIndexedFreeLists(fc);
1268   } else {
1269     return dictionary()->verify_chunk_in_free_list(fc);
1270   }
1271 }
1272 
1273 #ifndef PRODUCT
1274 void CompactibleFreeListSpace::assert_locked() const {
1275   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1276 }
1277 
1278 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1279   CMSLockVerifier::assert_locked(lock);
1280 }
1281 #endif
1282 
1283 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1284   // In the parallel case, the main thread holds the free list lock
1285   // on behalf the parallel threads.
1286   FreeChunk* fc;
1287   {
1288     // If GC is parallel, this might be called by several threads.
1289     // This should be rare enough that the locking overhead won't affect
1290     // the sequential code.
1291     MutexLockerEx x(parDictionaryAllocLock(),
1292                     Mutex::_no_safepoint_check_flag);
1293     fc = getChunkFromDictionary(size);
1294   }
1295   if (fc != NULL) {
1296     fc->dontCoalesce();
1297     assert(fc->is_free(), "Should be free, but not coalescable");
1298     // Verify that the block offset table shows this to
1299     // be a single block, but not one which is unallocated.
1300     _bt.verify_single_block((HeapWord*)fc, fc->size());
1301     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1302   }
1303   return fc;
1304 }
1305 
1306 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1307   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1308   assert_locked();
1309 
1310   // if we are tracking promotions, then first ensure space for
1311   // promotion (including spooling space for saving header if necessary).
1312   // then allocate and copy, then track promoted info if needed.
1313   // When tracking (see PromotionInfo::track()), the mark word may
1314   // be displaced and in this case restoration of the mark word
1315   // occurs in the (oop_since_save_marks_)iterate phase.
1316   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1317     return NULL;
1318   }
1319   // Call the allocate(size_t, bool) form directly to avoid the
1320   // additional call through the allocate(size_t) form.  Having
1321   // the compile inline the call is problematic because allocate(size_t)
1322   // is a virtual method.
1323   HeapWord* res = allocate(adjustObjectSize(obj_size));
1324   if (res != NULL) {
1325     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1326     // if we should be tracking promotions, do so.
1327     if (_promoInfo.tracking()) {
1328         _promoInfo.track((PromotedObject*)res);
1329     }
1330   }
1331   return oop(res);
1332 }
1333 
1334 HeapWord*
1335 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1336   assert_locked();
1337   assert(size >= MinChunkSize, "minimum chunk size");
1338   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1339     "maximum from smallLinearAllocBlock");
1340   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1341 }
1342 
1343 HeapWord*
1344 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1345                                                        size_t size) {
1346   assert_locked();
1347   assert(size >= MinChunkSize, "too small");
1348   HeapWord* res = NULL;
1349   // Try to do linear allocation from blk, making sure that
1350   if (blk->_word_size == 0) {
1351     // We have probably been unable to fill this either in the prologue or
1352     // when it was exhausted at the last linear allocation. Bail out until
1353     // next time.
1354     assert(blk->_ptr == NULL, "consistency check");
1355     return NULL;
1356   }
1357   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1358   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1359   if (res != NULL) return res;
1360 
1361   // about to exhaust this linear allocation block
1362   if (blk->_word_size == size) { // exactly satisfied
1363     res = blk->_ptr;
1364     _bt.allocated(res, blk->_word_size);
1365   } else if (size + MinChunkSize <= blk->_refillSize) {
1366     size_t sz = blk->_word_size;
1367     // Update _unallocated_block if the size is such that chunk would be
1368     // returned to the indexed free list.  All other chunks in the indexed
1369     // free lists are allocated from the dictionary so that _unallocated_block
1370     // has already been adjusted for them.  Do it here so that the cost
1371     // for all chunks added back to the indexed free lists.
1372     if (sz < SmallForDictionary) {
1373       _bt.allocated(blk->_ptr, sz);
1374     }
1375     // Return the chunk that isn't big enough, and then refill below.
1376     addChunkToFreeLists(blk->_ptr, sz);
1377     split_birth(sz);
1378     // Don't keep statistics on adding back chunk from a LinAB.
1379   } else {
1380     // A refilled block would not satisfy the request.
1381     return NULL;
1382   }
1383 
1384   blk->_ptr = NULL; blk->_word_size = 0;
1385   refillLinearAllocBlock(blk);
1386   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1387          "block was replenished");
1388   if (res != NULL) {
1389     split_birth(size);
1390     repairLinearAllocBlock(blk);
1391   } else if (blk->_ptr != NULL) {
1392     res = blk->_ptr;
1393     size_t blk_size = blk->_word_size;
1394     blk->_word_size -= size;
1395     blk->_ptr  += size;
1396     split_birth(size);
1397     repairLinearAllocBlock(blk);
1398     // Update BOT last so that other (parallel) GC threads see a consistent
1399     // view of the BOT and free blocks.
1400     // Above must occur before BOT is updated below.
1401     OrderAccess::storestore();
1402     _bt.split_block(res, blk_size, size);  // adjust block offset table
1403   }
1404   return res;
1405 }
1406 
1407 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1408                                         LinearAllocBlock* blk,
1409                                         size_t size) {
1410   assert_locked();
1411   assert(size >= MinChunkSize, "too small");
1412 
1413   HeapWord* res = NULL;
1414   // This is the common case.  Keep it simple.
1415   if (blk->_word_size >= size + MinChunkSize) {
1416     assert(blk->_ptr != NULL, "consistency check");
1417     res = blk->_ptr;
1418     // Note that the BOT is up-to-date for the linAB before allocation.  It
1419     // indicates the start of the linAB.  The split_block() updates the
1420     // BOT for the linAB after the allocation (indicates the start of the
1421     // next chunk to be allocated).
1422     size_t blk_size = blk->_word_size;
1423     blk->_word_size -= size;
1424     blk->_ptr  += size;
1425     split_birth(size);
1426     repairLinearAllocBlock(blk);
1427     // Update BOT last so that other (parallel) GC threads see a consistent
1428     // view of the BOT and free blocks.
1429     // Above must occur before BOT is updated below.
1430     OrderAccess::storestore();
1431     _bt.split_block(res, blk_size, size);  // adjust block offset table
1432     _bt.allocated(res, size);
1433   }
1434   return res;
1435 }
1436 
1437 FreeChunk*
1438 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1439   assert_locked();
1440   assert(size < SmallForDictionary, "just checking");
1441   FreeChunk* res;
1442   res = _indexedFreeList[size].get_chunk_at_head();
1443   if (res == NULL) {
1444     res = getChunkFromIndexedFreeListHelper(size);
1445   }
1446   _bt.verify_not_unallocated((HeapWord*) res, size);
1447   assert(res == NULL || res->size() == size, "Incorrect block size");
1448   return res;
1449 }
1450 
1451 FreeChunk*
1452 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1453   bool replenish) {
1454   assert_locked();
1455   FreeChunk* fc = NULL;
1456   if (size < SmallForDictionary) {
1457     assert(_indexedFreeList[size].head() == NULL ||
1458       _indexedFreeList[size].surplus() <= 0,
1459       "List for this size should be empty or under populated");
1460     // Try best fit in exact lists before replenishing the list
1461     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1462       // Replenish list.
1463       //
1464       // Things tried that failed.
1465       //   Tried allocating out of the two LinAB's first before
1466       // replenishing lists.
1467       //   Tried small linAB of size 256 (size in indexed list)
1468       // and replenishing indexed lists from the small linAB.
1469       //
1470       FreeChunk* newFc = NULL;
1471       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1472       if (replenish_size < SmallForDictionary) {
1473         // Do not replenish from an underpopulated size.
1474         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1475             _indexedFreeList[replenish_size].head() != NULL) {
1476           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1477         } else if (bestFitFirst()) {
1478           newFc = bestFitSmall(replenish_size);
1479         }
1480       }
1481       if (newFc == NULL && replenish_size > size) {
1482         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1483         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1484       }
1485       // Note: The stats update re split-death of block obtained above
1486       // will be recorded below precisely when we know we are going to
1487       // be actually splitting it into more than one pieces below.
1488       if (newFc != NULL) {
1489         if  (replenish || CMSReplenishIntermediate) {
1490           // Replenish this list and return one block to caller.
1491           size_t i;
1492           FreeChunk *curFc, *nextFc;
1493           size_t num_blk = newFc->size() / size;
1494           assert(num_blk >= 1, "Smaller than requested?");
1495           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1496           if (num_blk > 1) {
1497             // we are sure we will be splitting the block just obtained
1498             // into multiple pieces; record the split-death of the original
1499             splitDeath(replenish_size);
1500           }
1501           // carve up and link blocks 0, ..., num_blk - 2
1502           // The last chunk is not added to the lists but is returned as the
1503           // free chunk.
1504           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1505                i = 0;
1506                i < (num_blk - 1);
1507                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1508                i++) {
1509             curFc->set_size(size);
1510             // Don't record this as a return in order to try and
1511             // determine the "returns" from a GC.
1512             _bt.verify_not_unallocated((HeapWord*) fc, size);
1513             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1514             _bt.mark_block((HeapWord*)curFc, size);
1515             split_birth(size);
1516             // Don't record the initial population of the indexed list
1517             // as a split birth.
1518           }
1519 
1520           // check that the arithmetic was OK above
1521           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1522             "inconsistency in carving newFc");
1523           curFc->set_size(size);
1524           _bt.mark_block((HeapWord*)curFc, size);
1525           split_birth(size);
1526           fc = curFc;
1527         } else {
1528           // Return entire block to caller
1529           fc = newFc;
1530         }
1531       }
1532     }
1533   } else {
1534     // Get a free chunk from the free chunk dictionary to be returned to
1535     // replenish the indexed free list.
1536     fc = getChunkFromDictionaryExact(size);
1537   }
1538   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1539   return fc;
1540 }
1541 
1542 FreeChunk*
1543 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1544   assert_locked();
1545   FreeChunk* fc = _dictionary->get_chunk(size,
1546                                          FreeBlockDictionary<FreeChunk>::atLeast);
1547   if (fc == NULL) {
1548     return NULL;
1549   }
1550   _bt.allocated((HeapWord*)fc, fc->size());
1551   if (fc->size() >= size + MinChunkSize) {
1552     fc = splitChunkAndReturnRemainder(fc, size);
1553   }
1554   assert(fc->size() >= size, "chunk too small");
1555   assert(fc->size() < size + MinChunkSize, "chunk too big");
1556   _bt.verify_single_block((HeapWord*)fc, fc->size());
1557   return fc;
1558 }
1559 
1560 FreeChunk*
1561 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1562   assert_locked();
1563   FreeChunk* fc = _dictionary->get_chunk(size,
1564                                          FreeBlockDictionary<FreeChunk>::atLeast);
1565   if (fc == NULL) {
1566     return fc;
1567   }
1568   _bt.allocated((HeapWord*)fc, fc->size());
1569   if (fc->size() == size) {
1570     _bt.verify_single_block((HeapWord*)fc, size);
1571     return fc;
1572   }
1573   assert(fc->size() > size, "get_chunk() guarantee");
1574   if (fc->size() < size + MinChunkSize) {
1575     // Return the chunk to the dictionary and go get a bigger one.
1576     returnChunkToDictionary(fc);
1577     fc = _dictionary->get_chunk(size + MinChunkSize,
1578                                 FreeBlockDictionary<FreeChunk>::atLeast);
1579     if (fc == NULL) {
1580       return NULL;
1581     }
1582     _bt.allocated((HeapWord*)fc, fc->size());
1583   }
1584   assert(fc->size() >= size + MinChunkSize, "tautology");
1585   fc = splitChunkAndReturnRemainder(fc, size);
1586   assert(fc->size() == size, "chunk is wrong size");
1587   _bt.verify_single_block((HeapWord*)fc, size);
1588   return fc;
1589 }
1590 
1591 void
1592 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1593   assert_locked();
1594 
1595   size_t size = chunk->size();
1596   _bt.verify_single_block((HeapWord*)chunk, size);
1597   // adjust _unallocated_block downward, as necessary
1598   _bt.freed((HeapWord*)chunk, size);
1599   _dictionary->return_chunk(chunk);
1600 #ifndef PRODUCT
1601   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1602     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1603     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1604     tl->verify_stats();
1605   }
1606 #endif // PRODUCT
1607 }
1608 
1609 void
1610 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1611   assert_locked();
1612   size_t size = fc->size();
1613   _bt.verify_single_block((HeapWord*) fc, size);
1614   _bt.verify_not_unallocated((HeapWord*) fc, size);
1615   _indexedFreeList[size].return_chunk_at_tail(fc);
1616 #ifndef PRODUCT
1617   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1618      _indexedFreeList[size].verify_stats();
1619   }
1620 #endif // PRODUCT
1621 }
1622 
1623 // Add chunk to end of last block -- if it's the largest
1624 // block -- and update BOT and census data. We would
1625 // of course have preferred to coalesce it with the
1626 // last block, but it's currently less expensive to find the
1627 // largest block than it is to find the last.
1628 void
1629 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1630   HeapWord* chunk, size_t     size) {
1631   // check that the chunk does lie in this space!
1632   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1633   // One of the parallel gc task threads may be here
1634   // whilst others are allocating.
1635   Mutex* lock = &_parDictionaryAllocLock;
1636   FreeChunk* ec;
1637   {
1638     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1639     ec = dictionary()->find_largest_dict();  // get largest block
1640     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1641       // It's a coterminal block - we can coalesce.
1642       size_t old_size = ec->size();
1643       coalDeath(old_size);
1644       removeChunkFromDictionary(ec);
1645       size += old_size;
1646     } else {
1647       ec = (FreeChunk*)chunk;
1648     }
1649   }
1650   ec->set_size(size);
1651   debug_only(ec->mangleFreed(size));
1652   if (size < SmallForDictionary) {
1653     lock = _indexedFreeListParLocks[size];
1654   }
1655   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1656   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1657   // record the birth under the lock since the recording involves
1658   // manipulation of the list on which the chunk lives and
1659   // if the chunk is allocated and is the last on the list,
1660   // the list can go away.
1661   coalBirth(size);
1662 }
1663 
1664 void
1665 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1666                                               size_t     size) {
1667   // check that the chunk does lie in this space!
1668   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1669   assert_locked();
1670   _bt.verify_single_block(chunk, size);
1671 
1672   FreeChunk* fc = (FreeChunk*) chunk;
1673   fc->set_size(size);
1674   debug_only(fc->mangleFreed(size));
1675   if (size < SmallForDictionary) {
1676     returnChunkToFreeList(fc);
1677   } else {
1678     returnChunkToDictionary(fc);
1679   }
1680 }
1681 
1682 void
1683 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1684   size_t size, bool coalesced) {
1685   assert_locked();
1686   assert(chunk != NULL, "null chunk");
1687   if (coalesced) {
1688     // repair BOT
1689     _bt.single_block(chunk, size);
1690   }
1691   addChunkToFreeLists(chunk, size);
1692 }
1693 
1694 // We _must_ find the purported chunk on our free lists;
1695 // we assert if we don't.
1696 void
1697 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1698   size_t size = fc->size();
1699   assert_locked();
1700   debug_only(verifyFreeLists());
1701   if (size < SmallForDictionary) {
1702     removeChunkFromIndexedFreeList(fc);
1703   } else {
1704     removeChunkFromDictionary(fc);
1705   }
1706   _bt.verify_single_block((HeapWord*)fc, size);
1707   debug_only(verifyFreeLists());
1708 }
1709 
1710 void
1711 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1712   size_t size = fc->size();
1713   assert_locked();
1714   assert(fc != NULL, "null chunk");
1715   _bt.verify_single_block((HeapWord*)fc, size);
1716   _dictionary->remove_chunk(fc);
1717   // adjust _unallocated_block upward, as necessary
1718   _bt.allocated((HeapWord*)fc, size);
1719 }
1720 
1721 void
1722 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1723   assert_locked();
1724   size_t size = fc->size();
1725   _bt.verify_single_block((HeapWord*)fc, size);
1726   NOT_PRODUCT(
1727     if (FLSVerifyIndexTable) {
1728       verifyIndexedFreeList(size);
1729     }
1730   )
1731   _indexedFreeList[size].remove_chunk(fc);
1732   NOT_PRODUCT(
1733     if (FLSVerifyIndexTable) {
1734       verifyIndexedFreeList(size);
1735     }
1736   )
1737 }
1738 
1739 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1740   /* A hint is the next larger size that has a surplus.
1741      Start search at a size large enough to guarantee that
1742      the excess is >= MIN_CHUNK. */
1743   size_t start = align_object_size(numWords + MinChunkSize);
1744   if (start < IndexSetSize) {
1745     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1746     size_t    hint = _indexedFreeList[start].hint();
1747     while (hint < IndexSetSize) {
1748       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1749       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1750       if (fl->surplus() > 0 && fl->head() != NULL) {
1751         // Found a list with surplus, reset original hint
1752         // and split out a free chunk which is returned.
1753         _indexedFreeList[start].set_hint(hint);
1754         FreeChunk* res = getFromListGreater(fl, numWords);
1755         assert(res == NULL || res->is_free(),
1756           "Should be returning a free chunk");
1757         return res;
1758       }
1759       hint = fl->hint(); /* keep looking */
1760     }
1761     /* None found. */
1762     it[start].set_hint(IndexSetSize);
1763   }
1764   return NULL;
1765 }
1766 
1767 /* Requires fl->size >= numWords + MinChunkSize */
1768 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1769   size_t numWords) {
1770   FreeChunk *curr = fl->head();
1771   size_t oldNumWords = curr->size();
1772   assert(numWords >= MinChunkSize, "Word size is too small");
1773   assert(curr != NULL, "List is empty");
1774   assert(oldNumWords >= numWords + MinChunkSize,
1775         "Size of chunks in the list is too small");
1776 
1777   fl->remove_chunk(curr);
1778   // recorded indirectly by splitChunkAndReturnRemainder -
1779   // smallSplit(oldNumWords, numWords);
1780   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1781   // Does anything have to be done for the remainder in terms of
1782   // fixing the card table?
1783   assert(new_chunk == NULL || new_chunk->is_free(),
1784     "Should be returning a free chunk");
1785   return new_chunk;
1786 }
1787 
1788 FreeChunk*
1789 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1790   size_t new_size) {
1791   assert_locked();
1792   size_t size = chunk->size();
1793   assert(size > new_size, "Split from a smaller block?");
1794   assert(is_aligned(chunk), "alignment problem");
1795   assert(size == adjustObjectSize(size), "alignment problem");
1796   size_t rem_sz = size - new_size;
1797   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1798   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1799   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1800   assert(is_aligned(ffc), "alignment problem");
1801   ffc->set_size(rem_sz);
1802   ffc->link_next(NULL);
1803   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1804   // Above must occur before BOT is updated below.
1805   // adjust block offset table
1806   OrderAccess::storestore();
1807   assert(chunk->is_free() && ffc->is_free(), "Error");
1808   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1809   if (rem_sz < SmallForDictionary) {
1810     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1811     bool is_par = Thread::current()->is_GC_task_thread();
1812     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1813     returnChunkToFreeList(ffc);
1814     split(size, rem_sz);
1815     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1816   } else {
1817     returnChunkToDictionary(ffc);
1818     split(size, rem_sz);
1819   }
1820   chunk->set_size(new_size);
1821   return chunk;
1822 }
1823 
1824 void
1825 CompactibleFreeListSpace::sweep_completed() {
1826   // Now that space is probably plentiful, refill linear
1827   // allocation blocks as needed.
1828   refillLinearAllocBlocksIfNeeded();
1829 }
1830 
1831 void
1832 CompactibleFreeListSpace::gc_prologue() {
1833   assert_locked();
1834   reportFreeListStatistics("Before GC:");
1835   refillLinearAllocBlocksIfNeeded();
1836 }
1837 
1838 void
1839 CompactibleFreeListSpace::gc_epilogue() {
1840   assert_locked();
1841   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1842   _promoInfo.stopTrackingPromotions();
1843   repairLinearAllocationBlocks();
1844   reportFreeListStatistics("After GC:");
1845 }
1846 
1847 // Iteration support, mostly delegated from a CMS generation
1848 
1849 void CompactibleFreeListSpace::save_marks() {
1850   assert(Thread::current()->is_VM_thread(),
1851          "Global variable should only be set when single-threaded");
1852   // Mark the "end" of the used space at the time of this call;
1853   // note, however, that promoted objects from this point
1854   // on are tracked in the _promoInfo below.
1855   set_saved_mark_word(unallocated_block());
1856 #ifdef ASSERT
1857   // Check the sanity of save_marks() etc.
1858   MemRegion ur    = used_region();
1859   MemRegion urasm = used_region_at_save_marks();
1860   assert(ur.contains(urasm),
1861          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1862          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1863          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1864 #endif
1865   // inform allocator that promotions should be tracked.
1866   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1867   _promoInfo.startTrackingPromotions();
1868 }
1869 
1870 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1871   assert(_promoInfo.tracking(), "No preceding save_marks?");
1872   return _promoInfo.noPromotions();
1873 }
1874 
1875 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1876                                                                             \
1877 void CompactibleFreeListSpace::                                             \
1878 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1879   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1880   /*                                                                        \
1881    * This also restores any displaced headers and removes the elements from \
1882    * the iteration set as they are processed, so that we have a clean slate \
1883    * at the end of the iteration. Note, thus, that if new objects are       \
1884    * promoted as a result of the iteration they are iterated over as well.  \
1885    */                                                                       \
1886   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1887 }
1888 
1889 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1890 
1891 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1892   return _smallLinearAllocBlock._word_size == 0;
1893 }
1894 
1895 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1896   // Fix up linear allocation blocks to look like free blocks
1897   repairLinearAllocBlock(&_smallLinearAllocBlock);
1898 }
1899 
1900 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1901   assert_locked();
1902   if (blk->_ptr != NULL) {
1903     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1904            "Minimum block size requirement");
1905     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1906     fc->set_size(blk->_word_size);
1907     fc->link_prev(NULL);   // mark as free
1908     fc->dontCoalesce();
1909     assert(fc->is_free(), "just marked it free");
1910     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1911   }
1912 }
1913 
1914 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1915   assert_locked();
1916   if (_smallLinearAllocBlock._ptr == NULL) {
1917     assert(_smallLinearAllocBlock._word_size == 0,
1918       "Size of linAB should be zero if the ptr is NULL");
1919     // Reset the linAB refill and allocation size limit.
1920     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1921   }
1922   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1923 }
1924 
1925 void
1926 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1927   assert_locked();
1928   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1929          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1930          "blk invariant");
1931   if (blk->_ptr == NULL) {
1932     refillLinearAllocBlock(blk);
1933   }
1934   if (PrintMiscellaneous && Verbose) {
1935     if (blk->_word_size == 0) {
1936       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1937     }
1938   }
1939 }
1940 
1941 void
1942 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1943   assert_locked();
1944   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1945          "linear allocation block should be empty");
1946   FreeChunk* fc;
1947   if (blk->_refillSize < SmallForDictionary &&
1948       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1949     // A linAB's strategy might be to use small sizes to reduce
1950     // fragmentation but still get the benefits of allocation from a
1951     // linAB.
1952   } else {
1953     fc = getChunkFromDictionary(blk->_refillSize);
1954   }
1955   if (fc != NULL) {
1956     blk->_ptr  = (HeapWord*)fc;
1957     blk->_word_size = fc->size();
1958     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1959   }
1960 }
1961 
1962 // Support for compaction
1963 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1964   scan_and_forward(this, cp);
1965   // Prepare_for_compaction() uses the space between live objects
1966   // so that later phase can skip dead space quickly.  So verification
1967   // of the free lists doesn't work after.
1968 }
1969 
1970 void CompactibleFreeListSpace::adjust_pointers() {
1971   // In other versions of adjust_pointers(), a bail out
1972   // based on the amount of live data in the generation
1973   // (i.e., if 0, bail out) may be used.
1974   // Cannot test used() == 0 here because the free lists have already
1975   // been mangled by the compaction.
1976 
1977   scan_and_adjust_pointers(this);
1978   // See note about verification in prepare_for_compaction().
1979 }
1980 
1981 void CompactibleFreeListSpace::compact() {
1982   scan_and_compact(this);
1983 }
1984 
1985 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1986 // where fbs is free block sizes
1987 double CompactibleFreeListSpace::flsFrag() const {
1988   size_t itabFree = totalSizeInIndexedFreeLists();
1989   double frag = 0.0;
1990   size_t i;
1991 
1992   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1993     double sz  = i;
1994     frag      += _indexedFreeList[i].count() * (sz * sz);
1995   }
1996 
1997   double totFree = itabFree +
1998                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1999   if (totFree > 0) {
2000     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2001             (totFree * totFree));
2002     frag = (double)1.0  - frag;
2003   } else {
2004     assert(frag == 0.0, "Follows from totFree == 0");
2005   }
2006   return frag;
2007 }
2008 
2009 void CompactibleFreeListSpace::beginSweepFLCensus(
2010   float inter_sweep_current,
2011   float inter_sweep_estimate,
2012   float intra_sweep_estimate) {
2013   assert_locked();
2014   size_t i;
2015   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2016     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2017     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2018     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2019     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2020     fl->set_before_sweep(fl->count());
2021     fl->set_bfr_surp(fl->surplus());
2022   }
2023   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2024                                     inter_sweep_current,
2025                                     inter_sweep_estimate,
2026                                     intra_sweep_estimate);
2027 }
2028 
2029 void CompactibleFreeListSpace::setFLSurplus() {
2030   assert_locked();
2031   size_t i;
2032   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2033     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2034     fl->set_surplus(fl->count() -
2035                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2036   }
2037 }
2038 
2039 void CompactibleFreeListSpace::setFLHints() {
2040   assert_locked();
2041   size_t i;
2042   size_t h = IndexSetSize;
2043   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2044     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2045     fl->set_hint(h);
2046     if (fl->surplus() > 0) {
2047       h = i;
2048     }
2049   }
2050 }
2051 
2052 void CompactibleFreeListSpace::clearFLCensus() {
2053   assert_locked();
2054   size_t i;
2055   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2056     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2057     fl->set_prev_sweep(fl->count());
2058     fl->set_coal_births(0);
2059     fl->set_coal_deaths(0);
2060     fl->set_split_births(0);
2061     fl->set_split_deaths(0);
2062   }
2063 }
2064 
2065 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2066   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2067   setFLSurplus();
2068   setFLHints();
2069   printFLCensus(sweep_count);
2070   clearFLCensus();
2071   assert_locked();
2072   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2073 }
2074 
2075 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2076   if (size < SmallForDictionary) {
2077     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2078     return (fl->coal_desired() < 0) ||
2079            ((int)fl->count() > fl->coal_desired());
2080   } else {
2081     return dictionary()->coal_dict_over_populated(size);
2082   }
2083 }
2084 
2085 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2086   assert(size < SmallForDictionary, "Size too large for indexed list");
2087   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2088   fl->increment_coal_births();
2089   fl->increment_surplus();
2090 }
2091 
2092 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2093   assert(size < SmallForDictionary, "Size too large for indexed list");
2094   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2095   fl->increment_coal_deaths();
2096   fl->decrement_surplus();
2097 }
2098 
2099 void CompactibleFreeListSpace::coalBirth(size_t size) {
2100   if (size  < SmallForDictionary) {
2101     smallCoalBirth(size);
2102   } else {
2103     dictionary()->dict_census_update(size,
2104                                    false /* split */,
2105                                    true /* birth */);
2106   }
2107 }
2108 
2109 void CompactibleFreeListSpace::coalDeath(size_t size) {
2110   if(size  < SmallForDictionary) {
2111     smallCoalDeath(size);
2112   } else {
2113     dictionary()->dict_census_update(size,
2114                                    false /* split */,
2115                                    false /* birth */);
2116   }
2117 }
2118 
2119 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2120   assert(size < SmallForDictionary, "Size too large for indexed list");
2121   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2122   fl->increment_split_births();
2123   fl->increment_surplus();
2124 }
2125 
2126 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2127   assert(size < SmallForDictionary, "Size too large for indexed list");
2128   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2129   fl->increment_split_deaths();
2130   fl->decrement_surplus();
2131 }
2132 
2133 void CompactibleFreeListSpace::split_birth(size_t size) {
2134   if (size  < SmallForDictionary) {
2135     smallSplitBirth(size);
2136   } else {
2137     dictionary()->dict_census_update(size,
2138                                    true /* split */,
2139                                    true /* birth */);
2140   }
2141 }
2142 
2143 void CompactibleFreeListSpace::splitDeath(size_t size) {
2144   if (size  < SmallForDictionary) {
2145     smallSplitDeath(size);
2146   } else {
2147     dictionary()->dict_census_update(size,
2148                                    true /* split */,
2149                                    false /* birth */);
2150   }
2151 }
2152 
2153 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2154   size_t to2 = from - to1;
2155   splitDeath(from);
2156   split_birth(to1);
2157   split_birth(to2);
2158 }
2159 
2160 void CompactibleFreeListSpace::print() const {
2161   print_on(tty);
2162 }
2163 
2164 void CompactibleFreeListSpace::prepare_for_verify() {
2165   assert_locked();
2166   repairLinearAllocationBlocks();
2167   // Verify that the SpoolBlocks look like free blocks of
2168   // appropriate sizes... To be done ...
2169 }
2170 
2171 class VerifyAllBlksClosure: public BlkClosure {
2172  private:
2173   const CompactibleFreeListSpace* _sp;
2174   const MemRegion                 _span;
2175   HeapWord*                       _last_addr;
2176   size_t                          _last_size;
2177   bool                            _last_was_obj;
2178   bool                            _last_was_live;
2179 
2180  public:
2181   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2182     MemRegion span) :  _sp(sp), _span(span),
2183                        _last_addr(NULL), _last_size(0),
2184                        _last_was_obj(false), _last_was_live(false) { }
2185 
2186   virtual size_t do_blk(HeapWord* addr) {
2187     size_t res;
2188     bool   was_obj  = false;
2189     bool   was_live = false;
2190     if (_sp->block_is_obj(addr)) {
2191       was_obj = true;
2192       oop p = oop(addr);
2193       guarantee(p->is_oop(), "Should be an oop");
2194       res = _sp->adjustObjectSize(p->size());
2195       if (_sp->obj_is_alive(addr)) {
2196         was_live = true;
2197         p->verify();
2198       }
2199     } else {
2200       FreeChunk* fc = (FreeChunk*)addr;
2201       res = fc->size();
2202       if (FLSVerifyLists && !fc->cantCoalesce()) {
2203         guarantee(_sp->verify_chunk_in_free_list(fc),
2204                   "Chunk should be on a free list");
2205       }
2206     }
2207     if (res == 0) {
2208       LogHandle(gc, verify) log;
2209       log.error("Livelock: no rank reduction!");
2210       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2211                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2212         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2213         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2214       ResourceMark rm;
2215       _sp->print_on(log.error_stream());
2216       guarantee(false, "Verification failed.");
2217     }
2218     _last_addr = addr;
2219     _last_size = res;
2220     _last_was_obj  = was_obj;
2221     _last_was_live = was_live;
2222     return res;
2223   }
2224 };
2225 
2226 class VerifyAllOopsClosure: public OopClosure {
2227  private:
2228   const CMSCollector*             _collector;
2229   const CompactibleFreeListSpace* _sp;
2230   const MemRegion                 _span;
2231   const bool                      _past_remark;
2232   const CMSBitMap*                _bit_map;
2233 
2234  protected:
2235   void do_oop(void* p, oop obj) {
2236     if (_span.contains(obj)) { // the interior oop points into CMS heap
2237       if (!_span.contains(p)) { // reference from outside CMS heap
2238         // Should be a valid object; the first disjunct below allows
2239         // us to sidestep an assertion in block_is_obj() that insists
2240         // that p be in _sp. Note that several generations (and spaces)
2241         // are spanned by _span (CMS heap) above.
2242         guarantee(!_sp->is_in_reserved(obj) ||
2243                   _sp->block_is_obj((HeapWord*)obj),
2244                   "Should be an object");
2245         guarantee(obj->is_oop(), "Should be an oop");
2246         obj->verify();
2247         if (_past_remark) {
2248           // Remark has been completed, the object should be marked
2249           _bit_map->isMarked((HeapWord*)obj);
2250         }
2251       } else { // reference within CMS heap
2252         if (_past_remark) {
2253           // Remark has been completed -- so the referent should have
2254           // been marked, if referring object is.
2255           if (_bit_map->isMarked(_collector->block_start(p))) {
2256             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2257           }
2258         }
2259       }
2260     } else if (_sp->is_in_reserved(p)) {
2261       // the reference is from FLS, and points out of FLS
2262       guarantee(obj->is_oop(), "Should be an oop");
2263       obj->verify();
2264     }
2265   }
2266 
2267   template <class T> void do_oop_work(T* p) {
2268     T heap_oop = oopDesc::load_heap_oop(p);
2269     if (!oopDesc::is_null(heap_oop)) {
2270       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2271       do_oop(p, obj);
2272     }
2273   }
2274 
2275  public:
2276   VerifyAllOopsClosure(const CMSCollector* collector,
2277     const CompactibleFreeListSpace* sp, MemRegion span,
2278     bool past_remark, CMSBitMap* bit_map) :
2279     _collector(collector), _sp(sp), _span(span),
2280     _past_remark(past_remark), _bit_map(bit_map) { }
2281 
2282   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2283   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2284 };
2285 
2286 void CompactibleFreeListSpace::verify() const {
2287   assert_lock_strong(&_freelistLock);
2288   verify_objects_initialized();
2289   MemRegion span = _collector->_span;
2290   bool past_remark = (_collector->abstract_state() ==
2291                       CMSCollector::Sweeping);
2292 
2293   ResourceMark rm;
2294   HandleMark  hm;
2295 
2296   // Check integrity of CFL data structures
2297   _promoInfo.verify();
2298   _dictionary->verify();
2299   if (FLSVerifyIndexTable) {
2300     verifyIndexedFreeLists();
2301   }
2302   // Check integrity of all objects and free blocks in space
2303   {
2304     VerifyAllBlksClosure cl(this, span);
2305     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2306   }
2307   // Check that all references in the heap to FLS
2308   // are to valid objects in FLS or that references in
2309   // FLS are to valid objects elsewhere in the heap
2310   if (FLSVerifyAllHeapReferences)
2311   {
2312     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2313       _collector->markBitMap());
2314 
2315     // Iterate over all oops in the heap. Uses the _no_header version
2316     // since we are not interested in following the klass pointers.
2317     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2318   }
2319 
2320   if (VerifyObjectStartArray) {
2321     // Verify the block offset table
2322     _bt.verify();
2323   }
2324 }
2325 
2326 #ifndef PRODUCT
2327 void CompactibleFreeListSpace::verifyFreeLists() const {
2328   if (FLSVerifyLists) {
2329     _dictionary->verify();
2330     verifyIndexedFreeLists();
2331   } else {
2332     if (FLSVerifyDictionary) {
2333       _dictionary->verify();
2334     }
2335     if (FLSVerifyIndexTable) {
2336       verifyIndexedFreeLists();
2337     }
2338   }
2339 }
2340 #endif
2341 
2342 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2343   size_t i = 0;
2344   for (; i < IndexSetStart; i++) {
2345     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2346   }
2347   for (; i < IndexSetSize; i++) {
2348     verifyIndexedFreeList(i);
2349   }
2350 }
2351 
2352 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2353   FreeChunk* fc   =  _indexedFreeList[size].head();
2354   FreeChunk* tail =  _indexedFreeList[size].tail();
2355   size_t    num = _indexedFreeList[size].count();
2356   size_t      n = 0;
2357   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2358             "Slot should have been empty");
2359   for (; fc != NULL; fc = fc->next(), n++) {
2360     guarantee(fc->size() == size, "Size inconsistency");
2361     guarantee(fc->is_free(), "!free?");
2362     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2363     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2364   }
2365   guarantee(n == num, "Incorrect count");
2366 }
2367 
2368 #ifndef PRODUCT
2369 void CompactibleFreeListSpace::check_free_list_consistency() const {
2370   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2371     "Some sizes can't be allocated without recourse to"
2372     " linear allocation buffers");
2373   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2374     "else MIN_TREE_CHUNK_SIZE is wrong");
2375   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2376   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2377 }
2378 #endif
2379 
2380 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2381   assert_lock_strong(&_freelistLock);
2382   LogHandle(gc, freelist, census) log;
2383   if (!log.is_debug()) {
2384     return;
2385   }
2386   AdaptiveFreeList<FreeChunk> total;
2387   log.debug("end sweep# " SIZE_FORMAT, sweep_count);
2388   ResourceMark rm;
2389   outputStream* out = log.debug_stream();
2390   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2391   size_t total_free = 0;
2392   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2393     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2394     total_free += fl->count() * fl->size();
2395     if (i % (40*IndexSetStride) == 0) {
2396       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2397     }
2398     fl->print_on(out);
2399     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2400     total.set_surplus(    total.surplus()     + fl->surplus()    );
2401     total.set_desired(    total.desired()     + fl->desired()    );
2402     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2403     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2404     total.set_count(      total.count()       + fl->count()      );
2405     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2406     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2407     total.set_split_births(total.split_births() + fl->split_births());
2408     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2409   }
2410   total.print_on(out, "TOTAL");
2411   log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2412   log.debug("growth: %8.5f  deficit: %8.5f",
2413             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2414                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2415             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2416   _dictionary->print_dict_census(out);
2417 }
2418 
2419 ///////////////////////////////////////////////////////////////////////////
2420 // CompactibleFreeListSpaceLAB
2421 ///////////////////////////////////////////////////////////////////////////
2422 
2423 #define VECTOR_257(x)                                                                                  \
2424   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2425   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2426      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2427      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2428      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2429      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2430      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2431      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2432      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2433      x }
2434 
2435 // Initialize with default setting for CMS, _not_
2436 // generic OldPLABSize, whose static default is different; if overridden at the
2437 // command-line, this will get reinitialized via a call to
2438 // modify_initialization() below.
2439 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2440   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2441 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2442 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2443 
2444 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2445   _cfls(cfls)
2446 {
2447   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2448   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2449        i < CompactibleFreeListSpace::IndexSetSize;
2450        i += CompactibleFreeListSpace::IndexSetStride) {
2451     _indexedFreeList[i].set_size(i);
2452     _num_blocks[i] = 0;
2453   }
2454 }
2455 
2456 static bool _CFLS_LAB_modified = false;
2457 
2458 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2459   assert(!_CFLS_LAB_modified, "Call only once");
2460   _CFLS_LAB_modified = true;
2461   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2462        i < CompactibleFreeListSpace::IndexSetSize;
2463        i += CompactibleFreeListSpace::IndexSetStride) {
2464     _blocks_to_claim[i].modify(n, wt, true /* force */);
2465   }
2466 }
2467 
2468 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2469   FreeChunk* res;
2470   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2471   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2472     // This locking manages sync with other large object allocations.
2473     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2474                     Mutex::_no_safepoint_check_flag);
2475     res = _cfls->getChunkFromDictionaryExact(word_sz);
2476     if (res == NULL) return NULL;
2477   } else {
2478     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2479     if (fl->count() == 0) {
2480       // Attempt to refill this local free list.
2481       get_from_global_pool(word_sz, fl);
2482       // If it didn't work, give up.
2483       if (fl->count() == 0) return NULL;
2484     }
2485     res = fl->get_chunk_at_head();
2486     assert(res != NULL, "Why was count non-zero?");
2487   }
2488   res->markNotFree();
2489   assert(!res->is_free(), "shouldn't be marked free");
2490   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2491   // mangle a just allocated object with a distinct pattern.
2492   debug_only(res->mangleAllocated(word_sz));
2493   return (HeapWord*)res;
2494 }
2495 
2496 // Get a chunk of blocks of the right size and update related
2497 // book-keeping stats
2498 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2499   // Get the #blocks we want to claim
2500   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2501   assert(n_blks > 0, "Error");
2502   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2503   // In some cases, when the application has a phase change,
2504   // there may be a sudden and sharp shift in the object survival
2505   // profile, and updating the counts at the end of a scavenge
2506   // may not be quick enough, giving rise to large scavenge pauses
2507   // during these phase changes. It is beneficial to detect such
2508   // changes on-the-fly during a scavenge and avoid such a phase-change
2509   // pothole. The following code is a heuristic attempt to do that.
2510   // It is protected by a product flag until we have gained
2511   // enough experience with this heuristic and fine-tuned its behavior.
2512   // WARNING: This might increase fragmentation if we overreact to
2513   // small spikes, so some kind of historical smoothing based on
2514   // previous experience with the greater reactivity might be useful.
2515   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2516   // default.
2517   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2518     //
2519     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2520     // which is why there is a cast to double.
2521     //
2522     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2523     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2524     n_blks = MIN2(n_blks, CMSOldPLABMax);
2525   }
2526   assert(n_blks > 0, "Error");
2527   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2528   // Update stats table entry for this block size
2529   _num_blocks[word_sz] += fl->count();
2530 }
2531 
2532 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2533   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2534        i < CompactibleFreeListSpace::IndexSetSize;
2535        i += CompactibleFreeListSpace::IndexSetStride) {
2536     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2537            "Counter inconsistency");
2538     if (_global_num_workers[i] > 0) {
2539       // Need to smooth wrt historical average
2540       if (ResizeOldPLAB) {
2541         _blocks_to_claim[i].sample(
2542           MAX2(CMSOldPLABMin,
2543           MIN2(CMSOldPLABMax,
2544                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2545       }
2546       // Reset counters for next round
2547       _global_num_workers[i] = 0;
2548       _global_num_blocks[i] = 0;
2549       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2550     }
2551   }
2552 }
2553 
2554 // If this is changed in the future to allow parallel
2555 // access, one would need to take the FL locks and,
2556 // depending on how it is used, stagger access from
2557 // parallel threads to reduce contention.
2558 void CompactibleFreeListSpaceLAB::retire(int tid) {
2559   // We run this single threaded with the world stopped;
2560   // so no need for locks and such.
2561   NOT_PRODUCT(Thread* t = Thread::current();)
2562   assert(Thread::current()->is_VM_thread(), "Error");
2563   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2564        i < CompactibleFreeListSpace::IndexSetSize;
2565        i += CompactibleFreeListSpace::IndexSetStride) {
2566     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2567            "Can't retire more than what we obtained");
2568     if (_num_blocks[i] > 0) {
2569       size_t num_retire =  _indexedFreeList[i].count();
2570       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2571       {
2572         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2573         //                Mutex::_no_safepoint_check_flag);
2574 
2575         // Update globals stats for num_blocks used
2576         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2577         _global_num_workers[i]++;
2578         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2579         if (num_retire > 0) {
2580           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2581           // Reset this list.
2582           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2583           _indexedFreeList[i].set_size(i);
2584         }
2585       }
2586       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2587                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2588       // Reset stats for next round
2589       _num_blocks[i]         = 0;
2590     }
2591   }
2592 }
2593 
2594 // Used by par_get_chunk_of_blocks() for the chunks from the
2595 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2596 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2597 // to the free list "fl".  "n" is the maximum number of chunks to
2598 // be added to "fl".
2599 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2600 
2601   // We'll try all multiples of word_sz in the indexed set, starting with
2602   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2603   // then try getting a big chunk and splitting it.
2604   {
2605     bool found;
2606     int  k;
2607     size_t cur_sz;
2608     for (k = 1, cur_sz = k * word_sz, found = false;
2609          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2610          (CMSSplitIndexedFreeListBlocks || k <= 1);
2611          k++, cur_sz = k * word_sz) {
2612       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2613       fl_for_cur_sz.set_size(cur_sz);
2614       {
2615         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2616                         Mutex::_no_safepoint_check_flag);
2617         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2618         if (gfl->count() != 0) {
2619           // nn is the number of chunks of size cur_sz that
2620           // we'd need to split k-ways each, in order to create
2621           // "n" chunks of size word_sz each.
2622           const size_t nn = MAX2(n/k, (size_t)1);
2623           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2624           found = true;
2625           if (k > 1) {
2626             // Update split death stats for the cur_sz-size blocks list:
2627             // we increment the split death count by the number of blocks
2628             // we just took from the cur_sz-size blocks list and which
2629             // we will be splitting below.
2630             ssize_t deaths = gfl->split_deaths() +
2631                              fl_for_cur_sz.count();
2632             gfl->set_split_deaths(deaths);
2633           }
2634         }
2635       }
2636       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2637       if (found) {
2638         if (k == 1) {
2639           fl->prepend(&fl_for_cur_sz);
2640         } else {
2641           // Divide each block on fl_for_cur_sz up k ways.
2642           FreeChunk* fc;
2643           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2644             // Must do this in reverse order, so that anybody attempting to
2645             // access the main chunk sees it as a single free block until we
2646             // change it.
2647             size_t fc_size = fc->size();
2648             assert(fc->is_free(), "Error");
2649             for (int i = k-1; i >= 0; i--) {
2650               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2651               assert((i != 0) ||
2652                         ((fc == ffc) && ffc->is_free() &&
2653                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2654                         "Counting error");
2655               ffc->set_size(word_sz);
2656               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2657               ffc->link_next(NULL);
2658               // Above must occur before BOT is updated below.
2659               OrderAccess::storestore();
2660               // splitting from the right, fc_size == i * word_sz
2661               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2662               fc_size -= word_sz;
2663               assert(fc_size == i*word_sz, "Error");
2664               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2665               _bt.verify_single_block((HeapWord*)fc, fc_size);
2666               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2667               // Push this on "fl".
2668               fl->return_chunk_at_head(ffc);
2669             }
2670             // TRAP
2671             assert(fl->tail()->next() == NULL, "List invariant.");
2672           }
2673         }
2674         // Update birth stats for this block size.
2675         size_t num = fl->count();
2676         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2677                         Mutex::_no_safepoint_check_flag);
2678         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2679         _indexedFreeList[word_sz].set_split_births(births);
2680         return true;
2681       }
2682     }
2683     return found;
2684   }
2685 }
2686 
2687 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2688 
2689   FreeChunk* fc = NULL;
2690   FreeChunk* rem_fc = NULL;
2691   size_t rem;
2692   {
2693     MutexLockerEx x(parDictionaryAllocLock(),
2694                     Mutex::_no_safepoint_check_flag);
2695     while (n > 0) {
2696       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2697                                   FreeBlockDictionary<FreeChunk>::atLeast);
2698       if (fc != NULL) {
2699         break;
2700       } else {
2701         n--;
2702       }
2703     }
2704     if (fc == NULL) return NULL;
2705     // Otherwise, split up that block.
2706     assert((ssize_t)n >= 1, "Control point invariant");
2707     assert(fc->is_free(), "Error: should be a free block");
2708     _bt.verify_single_block((HeapWord*)fc, fc->size());
2709     const size_t nn = fc->size() / word_sz;
2710     n = MIN2(nn, n);
2711     assert((ssize_t)n >= 1, "Control point invariant");
2712     rem = fc->size() - n * word_sz;
2713     // If there is a remainder, and it's too small, allocate one fewer.
2714     if (rem > 0 && rem < MinChunkSize) {
2715       n--; rem += word_sz;
2716     }
2717     // Note that at this point we may have n == 0.
2718     assert((ssize_t)n >= 0, "Control point invariant");
2719 
2720     // If n is 0, the chunk fc that was found is not large
2721     // enough to leave a viable remainder.  We are unable to
2722     // allocate even one block.  Return fc to the
2723     // dictionary and return, leaving "fl" empty.
2724     if (n == 0) {
2725       returnChunkToDictionary(fc);
2726       return NULL;
2727     }
2728 
2729     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2730     dictionary()->dict_census_update(fc->size(),
2731                                      true /*split*/,
2732                                      false /*birth*/);
2733 
2734     // First return the remainder, if any.
2735     // Note that we hold the lock until we decide if we're going to give
2736     // back the remainder to the dictionary, since a concurrent allocation
2737     // may otherwise see the heap as empty.  (We're willing to take that
2738     // hit if the block is a small block.)
2739     if (rem > 0) {
2740       size_t prefix_size = n * word_sz;
2741       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2742       rem_fc->set_size(rem);
2743       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2744       rem_fc->link_next(NULL);
2745       // Above must occur before BOT is updated below.
2746       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2747       OrderAccess::storestore();
2748       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2749       assert(fc->is_free(), "Error");
2750       fc->set_size(prefix_size);
2751       if (rem >= IndexSetSize) {
2752         returnChunkToDictionary(rem_fc);
2753         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2754         rem_fc = NULL;
2755       }
2756       // Otherwise, return it to the small list below.
2757     }
2758   }
2759   if (rem_fc != NULL) {
2760     MutexLockerEx x(_indexedFreeListParLocks[rem],
2761                     Mutex::_no_safepoint_check_flag);
2762     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2763     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2764     smallSplitBirth(rem);
2765   }
2766   assert(n * word_sz == fc->size(),
2767          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2768          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2769          fc->size(), n, word_sz);
2770   return fc;
2771 }
2772 
2773 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2774 
2775   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2776 
2777   if (fc == NULL) {
2778     return;
2779   }
2780 
2781   size_t n = fc->size() / word_sz;
2782 
2783   assert((ssize_t)n > 0, "Consistency");
2784   // Now do the splitting up.
2785   // Must do this in reverse order, so that anybody attempting to
2786   // access the main chunk sees it as a single free block until we
2787   // change it.
2788   size_t fc_size = n * word_sz;
2789   // All but first chunk in this loop
2790   for (ssize_t i = n-1; i > 0; i--) {
2791     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2792     ffc->set_size(word_sz);
2793     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2794     ffc->link_next(NULL);
2795     // Above must occur before BOT is updated below.
2796     OrderAccess::storestore();
2797     // splitting from the right, fc_size == (n - i + 1) * wordsize
2798     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2799     fc_size -= word_sz;
2800     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2801     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2802     _bt.verify_single_block((HeapWord*)fc, fc_size);
2803     // Push this on "fl".
2804     fl->return_chunk_at_head(ffc);
2805   }
2806   // First chunk
2807   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2808   // The blocks above should show their new sizes before the first block below
2809   fc->set_size(word_sz);
2810   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2811   fc->link_next(NULL);
2812   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2813   _bt.verify_single_block((HeapWord*)fc, fc->size());
2814   fl->return_chunk_at_head(fc);
2815 
2816   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2817   {
2818     // Update the stats for this block size.
2819     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2820                     Mutex::_no_safepoint_check_flag);
2821     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2822     _indexedFreeList[word_sz].set_split_births(births);
2823     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2824     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2825   }
2826 
2827   // TRAP
2828   assert(fl->tail()->next() == NULL, "List invariant.");
2829 }
2830 
2831 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2832   assert(fl->count() == 0, "Precondition.");
2833   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2834          "Precondition");
2835 
2836   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2837     // Got it
2838     return;
2839   }
2840 
2841   // Otherwise, we'll split a block from the dictionary.
2842   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2843 }
2844 
2845 // Set up the space's par_seq_tasks structure for work claiming
2846 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2847 // XXX Need to suitably abstract and generalize this and the next
2848 // method into one.
2849 void
2850 CompactibleFreeListSpace::
2851 initialize_sequential_subtasks_for_rescan(int n_threads) {
2852   // The "size" of each task is fixed according to rescan_task_size.
2853   assert(n_threads > 0, "Unexpected n_threads argument");
2854   const size_t task_size = rescan_task_size();
2855   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2856   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2857   assert(n_tasks == 0 ||
2858          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2859           (used_region().start() + n_tasks*task_size >= used_region().end())),
2860          "n_tasks calculation incorrect");
2861   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2862   assert(!pst->valid(), "Clobbering existing data?");
2863   // Sets the condition for completion of the subtask (how many threads
2864   // need to finish in order to be done).
2865   pst->set_n_threads(n_threads);
2866   pst->set_n_tasks((int)n_tasks);
2867 }
2868 
2869 // Set up the space's par_seq_tasks structure for work claiming
2870 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2871 void
2872 CompactibleFreeListSpace::
2873 initialize_sequential_subtasks_for_marking(int n_threads,
2874                                            HeapWord* low) {
2875   // The "size" of each task is fixed according to rescan_task_size.
2876   assert(n_threads > 0, "Unexpected n_threads argument");
2877   const size_t task_size = marking_task_size();
2878   assert(task_size > CardTableModRefBS::card_size_in_words &&
2879          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2880          "Otherwise arithmetic below would be incorrect");
2881   MemRegion span = _old_gen->reserved();
2882   if (low != NULL) {
2883     if (span.contains(low)) {
2884       // Align low down to  a card boundary so that
2885       // we can use block_offset_careful() on span boundaries.
2886       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2887                                  CardTableModRefBS::card_size);
2888       // Clip span prefix at aligned_low
2889       span = span.intersection(MemRegion(aligned_low, span.end()));
2890     } else if (low > span.end()) {
2891       span = MemRegion(low, low);  // Null region
2892     } // else use entire span
2893   }
2894   assert(span.is_empty() ||
2895          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2896         "span should start at a card boundary");
2897   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2898   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2899   assert(n_tasks == 0 ||
2900          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2901           (span.start() + n_tasks*task_size >= span.end())),
2902          "n_tasks calculation incorrect");
2903   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2904   assert(!pst->valid(), "Clobbering existing data?");
2905   // Sets the condition for completion of the subtask (how many threads
2906   // need to finish in order to be done).
2907   pst->set_n_threads(n_threads);
2908   pst->set_n_tasks((int)n_tasks);
2909 }