1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/liveRange.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 223   return adjustObjectSize(oop(addr)->size());
 224 }
 225 
 226 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 227   for (size_t i = 1; i < IndexSetSize; i++) {
 228     assert(_indexedFreeList[i].size() == (size_t) i,
 229       "Indexed free list sizes are incorrect");
 230     _indexedFreeList[i].reset(IndexSetSize);
 231     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 232     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 233     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 235   }
 236 }
 237 
 238 void CompactibleFreeListSpace::reset(MemRegion mr) {
 239   resetIndexedFreeListArray();
 240   dictionary()->reset();
 241   if (BlockOffsetArrayUseUnallocatedBlock) {
 242     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 243     // Everything's allocated until proven otherwise.
 244     _bt.set_unallocated_block(end());
 245   }
 246   if (!mr.is_empty()) {
 247     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 248     _bt.single_block(mr.start(), mr.word_size());
 249     FreeChunk* fc = (FreeChunk*) mr.start();
 250     fc->set_size(mr.word_size());
 251     if (mr.word_size() >= IndexSetSize ) {
 252       returnChunkToDictionary(fc);
 253     } else {
 254       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 255       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 256     }
 257     coalBirth(mr.word_size());
 258   }
 259   _promoInfo.reset();
 260   _smallLinearAllocBlock._ptr = NULL;
 261   _smallLinearAllocBlock._word_size = 0;
 262 }
 263 
 264 void CompactibleFreeListSpace::reset_after_compaction() {
 265   // Reset the space to the new reality - one free chunk.
 266   MemRegion mr(compaction_top(), end());
 267   reset(mr);
 268   // Now refill the linear allocation block(s) if possible.
 269   refillLinearAllocBlocksIfNeeded();
 270 }
 271 
 272 // Walks the entire dictionary, returning a coterminal
 273 // chunk, if it exists. Use with caution since it involves
 274 // a potentially complete walk of a potentially large tree.
 275 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 276 
 277   assert_lock_strong(&_freelistLock);
 278 
 279   return dictionary()->find_chunk_ends_at(end());
 280 }
 281 
 282 
 283 #ifndef PRODUCT
 284 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 285   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 286     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 287   }
 288 }
 289 
 290 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 291   size_t sum = 0;
 292   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 293     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 294   }
 295   return sum;
 296 }
 297 
 298 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 299   size_t count = 0;
 300   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 301     debug_only(
 302       ssize_t total_list_count = 0;
 303       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 304          fc = fc->next()) {
 305         total_list_count++;
 306       }
 307       assert(total_list_count ==  _indexedFreeList[i].count(),
 308         "Count in list is incorrect");
 309     )
 310     count += _indexedFreeList[i].count();
 311   }
 312   return count;
 313 }
 314 
 315 size_t CompactibleFreeListSpace::totalCount() {
 316   size_t num = totalCountInIndexedFreeLists();
 317   num +=  dictionary()->total_count();
 318   if (_smallLinearAllocBlock._word_size != 0) {
 319     num++;
 320   }
 321   return num;
 322 }
 323 #endif
 324 
 325 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 326   FreeChunk* fc = (FreeChunk*) p;
 327   return fc->is_free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::used() const {
 331   return capacity() - free();
 332 }
 333 
 334 size_t CompactibleFreeListSpace::free() const {
 335   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 336   // if you do this while the structures are in flux you
 337   // may get an approximate answer only; for instance
 338   // because there is concurrent allocation either
 339   // directly by mutators or for promotion during a GC.
 340   // It's "MT-safe", however, in the sense that you are guaranteed
 341   // not to crash and burn, for instance, because of walking
 342   // pointers that could disappear as you were walking them.
 343   // The approximation is because the various components
 344   // that are read below are not read atomically (and
 345   // further the computation of totalSizeInIndexedFreeLists()
 346   // is itself a non-atomic computation. The normal use of
 347   // this is during a resize operation at the end of GC
 348   // and at that time you are guaranteed to get the
 349   // correct actual value. However, for instance, this is
 350   // also read completely asynchronously by the "perf-sampler"
 351   // that supports jvmstat, and you are apt to see the values
 352   // flicker in such cases.
 353   assert(_dictionary != NULL, "No _dictionary?");
 354   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 355           totalSizeInIndexedFreeLists() +
 356           _smallLinearAllocBlock._word_size) * HeapWordSize;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 360   assert(_dictionary != NULL, "No _dictionary?");
 361   assert_locked();
 362   size_t res = _dictionary->max_chunk_size();
 363   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 364                        (size_t) SmallForLinearAlloc - 1));
 365   // XXX the following could potentially be pretty slow;
 366   // should one, pessimistically for the rare cases when res
 367   // calculated above is less than IndexSetSize,
 368   // just return res calculated above? My reasoning was that
 369   // those cases will be so rare that the extra time spent doesn't
 370   // really matter....
 371   // Note: do not change the loop test i >= res + IndexSetStride
 372   // to i > res below, because i is unsigned and res may be zero.
 373   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 374        i -= IndexSetStride) {
 375     if (_indexedFreeList[i].head() != NULL) {
 376       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 377       return i;
 378     }
 379   }
 380   return res;
 381 }
 382 
 383 void LinearAllocBlock::print_on(outputStream* st) const {
 384   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 385             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 386             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 387 }
 388 
 389 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 390   st->print_cr("COMPACTIBLE FREELIST SPACE");
 391   st->print_cr(" Space:");
 392   Space::print_on(st);
 393 
 394   st->print_cr("promoInfo:");
 395   _promoInfo.print_on(st);
 396 
 397   st->print_cr("_smallLinearAllocBlock");
 398   _smallLinearAllocBlock.print_on(st);
 399 
 400   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 401 
 402   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 403 }
 404 
 405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 406 const {
 407   reportIndexedFreeListStatistics(st);
 408   st->print_cr("Layout of Indexed Freelists");
 409   st->print_cr("---------------------------");
 410   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 411   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 412     _indexedFreeList[i].print_on(st);
 413     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 414       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 415                    p2i(fc), p2i((HeapWord*)fc + i),
 416                    fc->cantCoalesce() ? "\t CC" : "");
 417     }
 418   }
 419 }
 420 
 421 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 422 const {
 423   _promoInfo.print_on(st);
 424 }
 425 
 426 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 427 const {
 428   _dictionary->report_statistics(st);
 429   st->print_cr("Layout of Freelists in Tree");
 430   st->print_cr("---------------------------");
 431   _dictionary->print_free_lists(st);
 432 }
 433 
 434 class BlkPrintingClosure: public BlkClosure {
 435   const CMSCollector*             _collector;
 436   const CompactibleFreeListSpace* _sp;
 437   const CMSBitMap*                _live_bit_map;
 438   const bool                      _post_remark;
 439   outputStream*                   _st;
 440 public:
 441   BlkPrintingClosure(const CMSCollector* collector,
 442                      const CompactibleFreeListSpace* sp,
 443                      const CMSBitMap* live_bit_map,
 444                      outputStream* st):
 445     _collector(collector),
 446     _sp(sp),
 447     _live_bit_map(live_bit_map),
 448     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 449     _st(st) { }
 450   size_t do_blk(HeapWord* addr);
 451 };
 452 
 453 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 454   size_t sz = _sp->block_size_no_stall(addr, _collector);
 455   assert(sz != 0, "Should always be able to compute a size");
 456   if (_sp->block_is_obj(addr)) {
 457     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 458     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 459       p2i(addr),
 460       dead ? "dead" : "live",
 461       sz,
 462       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 463     if (CMSPrintObjectsInDump && !dead) {
 464       oop(addr)->print_on(_st);
 465       _st->print_cr("--------------------------------------");
 466     }
 467   } else { // free block
 468     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 469       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 470     if (CMSPrintChunksInDump) {
 471       ((FreeChunk*)addr)->print_on(_st);
 472       _st->print_cr("--------------------------------------");
 473     }
 474   }
 475   return sz;
 476 }
 477 
 478 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 479   st->print_cr("=========================");
 480   st->print_cr("Block layout in CMS Heap:");
 481   st->print_cr("=========================");
 482   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 483   blk_iterate(&bpcl);
 484 
 485   st->print_cr("=======================================");
 486   st->print_cr("Order & Layout of Promotion Info Blocks");
 487   st->print_cr("=======================================");
 488   print_promo_info_blocks(st);
 489 
 490   st->print_cr("===========================");
 491   st->print_cr("Order of Indexed Free Lists");
 492   st->print_cr("=========================");
 493   print_indexed_free_lists(st);
 494 
 495   st->print_cr("=================================");
 496   st->print_cr("Order of Free Lists in Dictionary");
 497   st->print_cr("=================================");
 498   print_dictionary_free_lists(st);
 499 }
 500 
 501 
 502 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 503   assert_lock_strong(&_freelistLock);
 504   LogHandle(gc, freelist, stats) log;
 505   if (!log.is_debug()) {
 506     return;
 507   }
 508   log.debug("%s", title);
 509   _dictionary->report_statistics(log.debug_stream());
 510   if (log.is_trace()) {
 511     ResourceMark rm;
 512     reportIndexedFreeListStatistics(log.trace_stream());
 513     size_t total_size = totalSizeInIndexedFreeLists() +
 514                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 515     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 516   }
 517 }
 518 
 519 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 520   assert_lock_strong(&_freelistLock);
 521   st->print_cr("Statistics for IndexedFreeLists:");
 522   st->print_cr("--------------------------------");
 523   size_t total_size = totalSizeInIndexedFreeLists();
 524   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 525   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 526   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 527   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 528   if (free_blocks != 0) {
 529     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 530   }
 531 }
 532 
 533 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 534   size_t res = 0;
 535   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 536     debug_only(
 537       ssize_t recount = 0;
 538       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 539          fc = fc->next()) {
 540         recount += 1;
 541       }
 542       assert(recount == _indexedFreeList[i].count(),
 543         "Incorrect count in list");
 544     )
 545     res += _indexedFreeList[i].count();
 546   }
 547   return res;
 548 }
 549 
 550 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 551   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 552     if (_indexedFreeList[i].head() != NULL) {
 553       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 554       return (size_t)i;
 555     }
 556   }
 557   return 0;
 558 }
 559 
 560 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 561   HeapWord* prevEnd = end();
 562   assert(prevEnd != value, "unnecessary set_end call");
 563   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 564         "New end is below unallocated block");
 565   _end = value;
 566   if (prevEnd != NULL) {
 567     // Resize the underlying block offset table.
 568     _bt.resize(pointer_delta(value, bottom()));
 569     if (value <= prevEnd) {
 570       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 571              "New end is below unallocated block");
 572     } else {
 573       // Now, take this new chunk and add it to the free blocks.
 574       // Note that the BOT has not yet been updated for this block.
 575       size_t newFcSize = pointer_delta(value, prevEnd);
 576       // Add the block to the free lists, if possible coalescing it
 577       // with the last free block, and update the BOT and census data.
 578       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 579     }
 580   }
 581 }
 582 
 583 class FreeListSpaceDCTOC : public FilteringDCTOC {
 584   CompactibleFreeListSpace* _cfls;
 585   CMSCollector* _collector;
 586   bool _parallel;
 587 protected:
 588   // Override.
 589 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 590   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 591                                        HeapWord* bottom, HeapWord* top, \
 592                                        ClosureType* cl);                \
 593       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 594                                        HeapWord* bottom, HeapWord* top, \
 595                                        ClosureType* cl);                \
 596     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 597                                        HeapWord* bottom, HeapWord* top, \
 598                                        ClosureType* cl)
 599   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 600   walk_mem_region_with_cl_DECL(FilteringClosure);
 601 
 602 public:
 603   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 604                      CMSCollector* collector,
 605                      ExtendedOopClosure* cl,
 606                      CardTableModRefBS::PrecisionStyle precision,
 607                      HeapWord* boundary,
 608                      bool parallel) :
 609     FilteringDCTOC(sp, cl, precision, boundary),
 610     _cfls(sp), _collector(collector), _parallel(parallel) {}
 611 };
 612 
 613 // We de-virtualize the block-related calls below, since we know that our
 614 // space is a CompactibleFreeListSpace.
 615 
 616 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 617 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 618                                                  HeapWord* bottom,              \
 619                                                  HeapWord* top,                 \
 620                                                  ClosureType* cl) {             \
 621    if (_parallel) {                                                             \
 622      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 623    } else {                                                                     \
 624      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 625    }                                                                            \
 626 }                                                                               \
 627 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 628                                                      HeapWord* bottom,          \
 629                                                      HeapWord* top,             \
 630                                                      ClosureType* cl) {         \
 631   /* Skip parts that are before "mr", in case "block_start" sent us             \
 632      back too far. */                                                           \
 633   HeapWord* mr_start = mr.start();                                              \
 634   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 635   HeapWord* next = bottom + bot_size;                                           \
 636   while (next < mr_start) {                                                     \
 637     bottom = next;                                                              \
 638     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 639     next = bottom + bot_size;                                                   \
 640   }                                                                             \
 641                                                                                 \
 642   while (bottom < top) {                                                        \
 643     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 644         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 645                     oop(bottom)) &&                                             \
 646         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 647       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 648       bottom += _cfls->adjustObjectSize(word_sz);                               \
 649     } else {                                                                    \
 650       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 651     }                                                                           \
 652   }                                                                             \
 653 }                                                                               \
 654 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 655                                                        HeapWord* bottom,        \
 656                                                        HeapWord* top,           \
 657                                                        ClosureType* cl) {       \
 658   /* Skip parts that are before "mr", in case "block_start" sent us             \
 659      back too far. */                                                           \
 660   HeapWord* mr_start = mr.start();                                              \
 661   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 662   HeapWord* next = bottom + bot_size;                                           \
 663   while (next < mr_start) {                                                     \
 664     bottom = next;                                                              \
 665     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 666     next = bottom + bot_size;                                                   \
 667   }                                                                             \
 668                                                                                 \
 669   while (bottom < top) {                                                        \
 670     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 671         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 672                     oop(bottom)) &&                                             \
 673         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 674       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 675       bottom += _cfls->adjustObjectSize(word_sz);                               \
 676     } else {                                                                    \
 677       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 678     }                                                                           \
 679   }                                                                             \
 680 }
 681 
 682 // (There are only two of these, rather than N, because the split is due
 683 // only to the introduction of the FilteringClosure, a local part of the
 684 // impl of this abstraction.)
 685 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 686 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 687 
 688 DirtyCardToOopClosure*
 689 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 690                                       CardTableModRefBS::PrecisionStyle precision,
 691                                       HeapWord* boundary,
 692                                       bool parallel) {
 693   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 694 }
 695 
 696 
 697 // Note on locking for the space iteration functions:
 698 // since the collector's iteration activities are concurrent with
 699 // allocation activities by mutators, absent a suitable mutual exclusion
 700 // mechanism the iterators may go awry. For instance a block being iterated
 701 // may suddenly be allocated or divided up and part of it allocated and
 702 // so on.
 703 
 704 // Apply the given closure to each block in the space.
 705 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 706   assert_lock_strong(freelistLock());
 707   HeapWord *cur, *limit;
 708   for (cur = bottom(), limit = end(); cur < limit;
 709        cur += cl->do_blk_careful(cur));
 710 }
 711 
 712 // Apply the given closure to each block in the space.
 713 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 714   assert_lock_strong(freelistLock());
 715   HeapWord *cur, *limit;
 716   for (cur = bottom(), limit = end(); cur < limit;
 717        cur += cl->do_blk(cur));
 718 }
 719 
 720 // Apply the given closure to each oop in the space.
 721 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 722   assert_lock_strong(freelistLock());
 723   HeapWord *cur, *limit;
 724   size_t curSize;
 725   for (cur = bottom(), limit = end(); cur < limit;
 726        cur += curSize) {
 727     curSize = block_size(cur);
 728     if (block_is_obj(cur)) {
 729       oop(cur)->oop_iterate(cl);
 730     }
 731   }
 732 }
 733 
 734 // NOTE: In the following methods, in order to safely be able to
 735 // apply the closure to an object, we need to be sure that the
 736 // object has been initialized. We are guaranteed that an object
 737 // is initialized if we are holding the Heap_lock with the
 738 // world stopped.
 739 void CompactibleFreeListSpace::verify_objects_initialized() const {
 740   if (is_init_completed()) {
 741     assert_locked_or_safepoint(Heap_lock);
 742     if (Universe::is_fully_initialized()) {
 743       guarantee(SafepointSynchronize::is_at_safepoint(),
 744                 "Required for objects to be initialized");
 745     }
 746   } // else make a concession at vm start-up
 747 }
 748 
 749 // Apply the given closure to each object in the space
 750 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 751   assert_lock_strong(freelistLock());
 752   NOT_PRODUCT(verify_objects_initialized());
 753   HeapWord *cur, *limit;
 754   size_t curSize;
 755   for (cur = bottom(), limit = end(); cur < limit;
 756        cur += curSize) {
 757     curSize = block_size(cur);
 758     if (block_is_obj(cur)) {
 759       blk->do_object(oop(cur));
 760     }
 761   }
 762 }
 763 
 764 // Apply the given closure to each live object in the space
 765 //   The usage of CompactibleFreeListSpace
 766 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 767 // objects in the space with references to objects that are no longer
 768 // valid.  For example, an object may reference another object
 769 // that has already been sweep up (collected).  This method uses
 770 // obj_is_alive() to determine whether it is safe to apply the closure to
 771 // an object.  See obj_is_alive() for details on how liveness of an
 772 // object is decided.
 773 
 774 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 775   assert_lock_strong(freelistLock());
 776   NOT_PRODUCT(verify_objects_initialized());
 777   HeapWord *cur, *limit;
 778   size_t curSize;
 779   for (cur = bottom(), limit = end(); cur < limit;
 780        cur += curSize) {
 781     curSize = block_size(cur);
 782     if (block_is_obj(cur) && obj_is_alive(cur)) {
 783       blk->do_object(oop(cur));
 784     }
 785   }
 786 }
 787 
 788 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 789                                                   UpwardsObjectClosure* cl) {
 790   assert_locked(freelistLock());
 791   NOT_PRODUCT(verify_objects_initialized());
 792   assert(!mr.is_empty(), "Should be non-empty");
 793   // We use MemRegion(bottom(), end()) rather than used_region() below
 794   // because the two are not necessarily equal for some kinds of
 795   // spaces, in particular, certain kinds of free list spaces.
 796   // We could use the more complicated but more precise:
 797   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 798   // but the slight imprecision seems acceptable in the assertion check.
 799   assert(MemRegion(bottom(), end()).contains(mr),
 800          "Should be within used space");
 801   HeapWord* prev = cl->previous();   // max address from last time
 802   if (prev >= mr.end()) { // nothing to do
 803     return;
 804   }
 805   // This assert will not work when we go from cms space to perm
 806   // space, and use same closure. Easy fix deferred for later. XXX YSR
 807   // assert(prev == NULL || contains(prev), "Should be within space");
 808 
 809   bool last_was_obj_array = false;
 810   HeapWord *blk_start_addr, *region_start_addr;
 811   if (prev > mr.start()) {
 812     region_start_addr = prev;
 813     blk_start_addr    = prev;
 814     // The previous invocation may have pushed "prev" beyond the
 815     // last allocated block yet there may be still be blocks
 816     // in this region due to a particular coalescing policy.
 817     // Relax the assertion so that the case where the unallocated
 818     // block is maintained and "prev" is beyond the unallocated
 819     // block does not cause the assertion to fire.
 820     assert((BlockOffsetArrayUseUnallocatedBlock &&
 821             (!is_in(prev))) ||
 822            (blk_start_addr == block_start(region_start_addr)), "invariant");
 823   } else {
 824     region_start_addr = mr.start();
 825     blk_start_addr    = block_start(region_start_addr);
 826   }
 827   HeapWord* region_end_addr = mr.end();
 828   MemRegion derived_mr(region_start_addr, region_end_addr);
 829   while (blk_start_addr < region_end_addr) {
 830     const size_t size = block_size(blk_start_addr);
 831     if (block_is_obj(blk_start_addr)) {
 832       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 833     } else {
 834       last_was_obj_array = false;
 835     }
 836     blk_start_addr += size;
 837   }
 838   if (!last_was_obj_array) {
 839     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 840            "Should be within (closed) used space");
 841     assert(blk_start_addr > prev, "Invariant");
 842     cl->set_previous(blk_start_addr); // min address for next time
 843   }
 844 }
 845 
 846 // Callers of this iterator beware: The closure application should
 847 // be robust in the face of uninitialized objects and should (always)
 848 // return a correct size so that the next addr + size below gives us a
 849 // valid block boundary. [See for instance,
 850 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 851 // in ConcurrentMarkSweepGeneration.cpp.]
 852 HeapWord*
 853 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 854   ObjectClosureCareful* cl) {
 855   assert_lock_strong(freelistLock());
 856   // Can't use used_region() below because it may not necessarily
 857   // be the same as [bottom(),end()); although we could
 858   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 859   // that appears too cumbersome, so we just do the simpler check
 860   // in the assertion below.
 861   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 862          "mr should be non-empty and within used space");
 863   HeapWord *addr, *end;
 864   size_t size;
 865   for (addr = block_start_careful(mr.start()), end  = mr.end();
 866        addr < end; addr += size) {
 867     FreeChunk* fc = (FreeChunk*)addr;
 868     if (fc->is_free()) {
 869       // Since we hold the free list lock, which protects direct
 870       // allocation in this generation by mutators, a free object
 871       // will remain free throughout this iteration code.
 872       size = fc->size();
 873     } else {
 874       // Note that the object need not necessarily be initialized,
 875       // because (for instance) the free list lock does NOT protect
 876       // object initialization. The closure application below must
 877       // therefore be correct in the face of uninitialized objects.
 878       size = cl->do_object_careful_m(oop(addr), mr);
 879       if (size == 0) {
 880         // An unparsable object found. Signal early termination.
 881         return addr;
 882       }
 883     }
 884   }
 885   return NULL;
 886 }
 887 
 888 
 889 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 890   NOT_PRODUCT(verify_objects_initialized());
 891   return _bt.block_start(p);
 892 }
 893 
 894 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 895   return _bt.block_start_careful(p);
 896 }
 897 
 898 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 899   NOT_PRODUCT(verify_objects_initialized());
 900   // This must be volatile, or else there is a danger that the compiler
 901   // will compile the code below into a sometimes-infinite loop, by keeping
 902   // the value read the first time in a register.
 903   while (true) {
 904     // We must do this until we get a consistent view of the object.
 905     if (FreeChunk::indicatesFreeChunk(p)) {
 906       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 907       size_t res = fc->size();
 908 
 909       // Bugfix for systems with weak memory model (PPC64/IA64). The
 910       // block's free bit was set and we have read the size of the
 911       // block. Acquire and check the free bit again. If the block is
 912       // still free, the read size is correct.
 913       OrderAccess::acquire();
 914 
 915       // If the object is still a free chunk, return the size, else it
 916       // has been allocated so try again.
 917       if (FreeChunk::indicatesFreeChunk(p)) {
 918         assert(res != 0, "Block size should not be 0");
 919         return res;
 920       }
 921     } else {
 922       // must read from what 'p' points to in each loop.
 923       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 924       if (k != NULL) {
 925         assert(k->is_klass(), "Should really be klass oop.");
 926         oop o = (oop)p;
 927         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 928 
 929         // Bugfix for systems with weak memory model (PPC64/IA64).
 930         // The object o may be an array. Acquire to make sure that the array
 931         // size (third word) is consistent.
 932         OrderAccess::acquire();
 933 
 934         size_t res = o->size_given_klass(k);
 935         res = adjustObjectSize(res);
 936         assert(res != 0, "Block size should not be 0");
 937         return res;
 938       }
 939     }
 940   }
 941 }
 942 
 943 // TODO: Now that is_parsable is gone, we should combine these two functions.
 944 // A variant of the above that uses the Printezis bits for
 945 // unparsable but allocated objects. This avoids any possible
 946 // stalls waiting for mutators to initialize objects, and is
 947 // thus potentially faster than the variant above. However,
 948 // this variant may return a zero size for a block that is
 949 // under mutation and for which a consistent size cannot be
 950 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 951 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 952                                                      const CMSCollector* c)
 953 const {
 954   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 955   // This must be volatile, or else there is a danger that the compiler
 956   // will compile the code below into a sometimes-infinite loop, by keeping
 957   // the value read the first time in a register.
 958   DEBUG_ONLY(uint loops = 0;)
 959   while (true) {
 960     // We must do this until we get a consistent view of the object.
 961     if (FreeChunk::indicatesFreeChunk(p)) {
 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // free bit of the block was set and we have read the size of
 967       // the block. Acquire and check the free bit again. If the
 968       // block is still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       if (FreeChunk::indicatesFreeChunk(p)) {
 972         assert(res != 0, "Block size should not be 0");
 973         assert(loops == 0, "Should be 0");
 974         return res;
 975       }
 976     } else {
 977       // must read from what 'p' points to in each loop.
 978       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 979       // We trust the size of any object that has a non-NULL
 980       // klass and (for those in the perm gen) is parsable
 981       // -- irrespective of its conc_safe-ty.
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(o->is_oop(), "Should be an oop");
 986 
 987         // Bugfix for systems with weak memory model (PPC64/IA64).
 988         // The object o may be an array. Acquire to make sure that the array
 989         // size (third word) is consistent.
 990         OrderAccess::acquire();
 991 
 992         size_t res = o->size_given_klass(k);
 993         res = adjustObjectSize(res);
 994         assert(res != 0, "Block size should not be 0");
 995         return res;
 996       } else {
 997         // May return 0 if P-bits not present.
 998         return c->block_size_if_printezis_bits(p);
 999       }
1000     }
1001     assert(loops == 0, "Can loop at most once");
1002     DEBUG_ONLY(loops++;)
1003   }
1004 }
1005 
1006 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1007   NOT_PRODUCT(verify_objects_initialized());
1008   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1009   FreeChunk* fc = (FreeChunk*)p;
1010   if (fc->is_free()) {
1011     return fc->size();
1012   } else {
1013     // Ignore mark word because this may be a recently promoted
1014     // object whose mark word is used to chain together grey
1015     // objects (the last one would have a null value).
1016     assert(oop(p)->is_oop(true), "Should be an oop");
1017     return adjustObjectSize(oop(p)->size());
1018   }
1019 }
1020 
1021 // This implementation assumes that the property of "being an object" is
1022 // stable.  But being a free chunk may not be (because of parallel
1023 // promotion.)
1024 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1025   FreeChunk* fc = (FreeChunk*)p;
1026   assert(is_in_reserved(p), "Should be in space");
1027   if (FreeChunk::indicatesFreeChunk(p)) return false;
1028   Klass* k = oop(p)->klass_or_null();
1029   if (k != NULL) {
1030     // Ignore mark word because it may have been used to
1031     // chain together promoted objects (the last one
1032     // would have a null value).
1033     assert(oop(p)->is_oop(true), "Should be an oop");
1034     return true;
1035   } else {
1036     return false;  // Was not an object at the start of collection.
1037   }
1038 }
1039 
1040 // Check if the object is alive. This fact is checked either by consulting
1041 // the main marking bitmap in the sweeping phase or, if it's a permanent
1042 // generation and we're not in the sweeping phase, by checking the
1043 // perm_gen_verify_bit_map where we store the "deadness" information if
1044 // we did not sweep the perm gen in the most recent previous GC cycle.
1045 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1046   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1047          "Else races are possible");
1048   assert(block_is_obj(p), "The address should point to an object");
1049 
1050   // If we're sweeping, we use object liveness information from the main bit map
1051   // for both perm gen and old gen.
1052   // We don't need to lock the bitmap (live_map or dead_map below), because
1053   // EITHER we are in the middle of the sweeping phase, and the
1054   // main marking bit map (live_map below) is locked,
1055   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1056   // is stable, because it's mutated only in the sweeping phase.
1057   // NOTE: This method is also used by jmap where, if class unloading is
1058   // off, the results can return "false" for legitimate perm objects,
1059   // when we are not in the midst of a sweeping phase, which can result
1060   // in jmap not reporting certain perm gen objects. This will be moot
1061   // if/when the perm gen goes away in the future.
1062   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1063     CMSBitMap* live_map = _collector->markBitMap();
1064     return live_map->par_isMarked((HeapWord*) p);
1065   }
1066   return true;
1067 }
1068 
1069 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1070   FreeChunk* fc = (FreeChunk*)p;
1071   assert(is_in_reserved(p), "Should be in space");
1072   assert(_bt.block_start(p) == p, "Should be a block boundary");
1073   if (!fc->is_free()) {
1074     // Ignore mark word because it may have been used to
1075     // chain together promoted objects (the last one
1076     // would have a null value).
1077     assert(oop(p)->is_oop(true), "Should be an oop");
1078     return true;
1079   }
1080   return false;
1081 }
1082 
1083 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1084 // approximate answer if you don't hold the freelistlock when you call this.
1085 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1086   size_t size = 0;
1087   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1088     debug_only(
1089       // We may be calling here without the lock in which case we
1090       // won't do this modest sanity check.
1091       if (freelistLock()->owned_by_self()) {
1092         size_t total_list_size = 0;
1093         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1094           fc = fc->next()) {
1095           total_list_size += i;
1096         }
1097         assert(total_list_size == i * _indexedFreeList[i].count(),
1098                "Count in list is incorrect");
1099       }
1100     )
1101     size += i * _indexedFreeList[i].count();
1102   }
1103   return size;
1104 }
1105 
1106 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1107   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1108   return allocate(size);
1109 }
1110 
1111 HeapWord*
1112 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1113   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1114 }
1115 
1116 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1117   assert_lock_strong(freelistLock());
1118   HeapWord* res = NULL;
1119   assert(size == adjustObjectSize(size),
1120          "use adjustObjectSize() before calling into allocate()");
1121 
1122   res = allocate_adaptive_freelists(size);
1123 
1124   if (res != NULL) {
1125     // check that res does lie in this space!
1126     assert(is_in_reserved(res), "Not in this space!");
1127     assert(is_aligned((void*)res), "alignment check");
1128 
1129     FreeChunk* fc = (FreeChunk*)res;
1130     fc->markNotFree();
1131     assert(!fc->is_free(), "shouldn't be marked free");
1132     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1133     // Verify that the block offset table shows this to
1134     // be a single block, but not one which is unallocated.
1135     _bt.verify_single_block(res, size);
1136     _bt.verify_not_unallocated(res, size);
1137     // mangle a just allocated object with a distinct pattern.
1138     debug_only(fc->mangleAllocated(size));
1139   }
1140 
1141   return res;
1142 }
1143 
1144 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1145   assert_lock_strong(freelistLock());
1146   HeapWord* res = NULL;
1147   assert(size == adjustObjectSize(size),
1148          "use adjustObjectSize() before calling into allocate()");
1149 
1150   // Strategy
1151   //   if small
1152   //     exact size from small object indexed list if small
1153   //     small or large linear allocation block (linAB) as appropriate
1154   //     take from lists of greater sized chunks
1155   //   else
1156   //     dictionary
1157   //     small or large linear allocation block if it has the space
1158   // Try allocating exact size from indexTable first
1159   if (size < IndexSetSize) {
1160     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1161     if(res != NULL) {
1162       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1163         "Not removed from free list");
1164       // no block offset table adjustment is necessary on blocks in
1165       // the indexed lists.
1166 
1167     // Try allocating from the small LinAB
1168     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1169         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1170         // if successful, the above also adjusts block offset table
1171         // Note that this call will refill the LinAB to
1172         // satisfy the request.  This is different that
1173         // evm.
1174         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1175     } else {
1176       // Raid the exact free lists larger than size, even if they are not
1177       // overpopulated.
1178       res = (HeapWord*) getChunkFromGreater(size);
1179     }
1180   } else {
1181     // Big objects get allocated directly from the dictionary.
1182     res = (HeapWord*) getChunkFromDictionaryExact(size);
1183     if (res == NULL) {
1184       // Try hard not to fail since an allocation failure will likely
1185       // trigger a synchronous GC.  Try to get the space from the
1186       // allocation blocks.
1187       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1188     }
1189   }
1190 
1191   return res;
1192 }
1193 
1194 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1195 // when promoting obj.
1196 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1197   // Depending on the object size, expansion may require refilling either a
1198   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1199   // is added because the dictionary may over-allocate to avoid fragmentation.
1200   size_t space = obj_size;
1201   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1202   return space;
1203 }
1204 
1205 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1206   FreeChunk* ret;
1207 
1208   assert(numWords >= MinChunkSize, "Size is less than minimum");
1209   assert(linearAllocationWouldFail() || bestFitFirst(),
1210     "Should not be here");
1211 
1212   size_t i;
1213   size_t currSize = numWords + MinChunkSize;
1214   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1215   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1216     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1217     if (fl->head()) {
1218       ret = getFromListGreater(fl, numWords);
1219       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1220       return ret;
1221     }
1222   }
1223 
1224   currSize = MAX2((size_t)SmallForDictionary,
1225                   (size_t)(numWords + MinChunkSize));
1226 
1227   /* Try to get a chunk that satisfies request, while avoiding
1228      fragmentation that can't be handled. */
1229   {
1230     ret =  dictionary()->get_chunk(currSize);
1231     if (ret != NULL) {
1232       assert(ret->size() - numWords >= MinChunkSize,
1233              "Chunk is too small");
1234       _bt.allocated((HeapWord*)ret, ret->size());
1235       /* Carve returned chunk. */
1236       (void) splitChunkAndReturnRemainder(ret, numWords);
1237       /* Label this as no longer a free chunk. */
1238       assert(ret->is_free(), "This chunk should be free");
1239       ret->link_prev(NULL);
1240     }
1241     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1242     return ret;
1243   }
1244   ShouldNotReachHere();
1245 }
1246 
1247 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1248   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1249   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1250 }
1251 
1252 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1253   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1254          (_smallLinearAllocBlock._word_size == fc->size()),
1255          "Linear allocation block shows incorrect size");
1256   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1257           (_smallLinearAllocBlock._word_size == fc->size()));
1258 }
1259 
1260 // Check if the purported free chunk is present either as a linear
1261 // allocation block, the size-indexed table of (smaller) free blocks,
1262 // or the larger free blocks kept in the binary tree dictionary.
1263 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1264   if (verify_chunk_is_linear_alloc_block(fc)) {
1265     return true;
1266   } else if (fc->size() < IndexSetSize) {
1267     return verifyChunkInIndexedFreeLists(fc);
1268   } else {
1269     return dictionary()->verify_chunk_in_free_list(fc);
1270   }
1271 }
1272 
1273 #ifndef PRODUCT
1274 void CompactibleFreeListSpace::assert_locked() const {
1275   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1276 }
1277 
1278 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1279   CMSLockVerifier::assert_locked(lock);
1280 }
1281 #endif
1282 
1283 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1284   // In the parallel case, the main thread holds the free list lock
1285   // on behalf the parallel threads.
1286   FreeChunk* fc;
1287   {
1288     // If GC is parallel, this might be called by several threads.
1289     // This should be rare enough that the locking overhead won't affect
1290     // the sequential code.
1291     MutexLockerEx x(parDictionaryAllocLock(),
1292                     Mutex::_no_safepoint_check_flag);
1293     fc = getChunkFromDictionary(size);
1294   }
1295   if (fc != NULL) {
1296     fc->dontCoalesce();
1297     assert(fc->is_free(), "Should be free, but not coalescable");
1298     // Verify that the block offset table shows this to
1299     // be a single block, but not one which is unallocated.
1300     _bt.verify_single_block((HeapWord*)fc, fc->size());
1301     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1302   }
1303   return fc;
1304 }
1305 
1306 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1307   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1308   assert_locked();
1309 
1310   // if we are tracking promotions, then first ensure space for
1311   // promotion (including spooling space for saving header if necessary).
1312   // then allocate and copy, then track promoted info if needed.
1313   // When tracking (see PromotionInfo::track()), the mark word may
1314   // be displaced and in this case restoration of the mark word
1315   // occurs in the (oop_since_save_marks_)iterate phase.
1316   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1317     return NULL;
1318   }
1319   // Call the allocate(size_t, bool) form directly to avoid the
1320   // additional call through the allocate(size_t) form.  Having
1321   // the compile inline the call is problematic because allocate(size_t)
1322   // is a virtual method.
1323   HeapWord* res = allocate(adjustObjectSize(obj_size));
1324   if (res != NULL) {
1325     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1326     // if we should be tracking promotions, do so.
1327     if (_promoInfo.tracking()) {
1328         _promoInfo.track((PromotedObject*)res);
1329     }
1330   }
1331   return oop(res);
1332 }
1333 
1334 HeapWord*
1335 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1336   assert_locked();
1337   assert(size >= MinChunkSize, "minimum chunk size");
1338   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1339     "maximum from smallLinearAllocBlock");
1340   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1341 }
1342 
1343 HeapWord*
1344 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1345                                                        size_t size) {
1346   assert_locked();
1347   assert(size >= MinChunkSize, "too small");
1348   HeapWord* res = NULL;
1349   // Try to do linear allocation from blk, making sure that
1350   if (blk->_word_size == 0) {
1351     // We have probably been unable to fill this either in the prologue or
1352     // when it was exhausted at the last linear allocation. Bail out until
1353     // next time.
1354     assert(blk->_ptr == NULL, "consistency check");
1355     return NULL;
1356   }
1357   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1358   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1359   if (res != NULL) return res;
1360 
1361   // about to exhaust this linear allocation block
1362   if (blk->_word_size == size) { // exactly satisfied
1363     res = blk->_ptr;
1364     _bt.allocated(res, blk->_word_size);
1365   } else if (size + MinChunkSize <= blk->_refillSize) {
1366     size_t sz = blk->_word_size;
1367     // Update _unallocated_block if the size is such that chunk would be
1368     // returned to the indexed free list.  All other chunks in the indexed
1369     // free lists are allocated from the dictionary so that _unallocated_block
1370     // has already been adjusted for them.  Do it here so that the cost
1371     // for all chunks added back to the indexed free lists.
1372     if (sz < SmallForDictionary) {
1373       _bt.allocated(blk->_ptr, sz);
1374     }
1375     // Return the chunk that isn't big enough, and then refill below.
1376     addChunkToFreeLists(blk->_ptr, sz);
1377     split_birth(sz);
1378     // Don't keep statistics on adding back chunk from a LinAB.
1379   } else {
1380     // A refilled block would not satisfy the request.
1381     return NULL;
1382   }
1383 
1384   blk->_ptr = NULL; blk->_word_size = 0;
1385   refillLinearAllocBlock(blk);
1386   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1387          "block was replenished");
1388   if (res != NULL) {
1389     split_birth(size);
1390     repairLinearAllocBlock(blk);
1391   } else if (blk->_ptr != NULL) {
1392     res = blk->_ptr;
1393     size_t blk_size = blk->_word_size;
1394     blk->_word_size -= size;
1395     blk->_ptr  += size;
1396     split_birth(size);
1397     repairLinearAllocBlock(blk);
1398     // Update BOT last so that other (parallel) GC threads see a consistent
1399     // view of the BOT and free blocks.
1400     // Above must occur before BOT is updated below.
1401     OrderAccess::storestore();
1402     _bt.split_block(res, blk_size, size);  // adjust block offset table
1403   }
1404   return res;
1405 }
1406 
1407 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1408                                         LinearAllocBlock* blk,
1409                                         size_t size) {
1410   assert_locked();
1411   assert(size >= MinChunkSize, "too small");
1412 
1413   HeapWord* res = NULL;
1414   // This is the common case.  Keep it simple.
1415   if (blk->_word_size >= size + MinChunkSize) {
1416     assert(blk->_ptr != NULL, "consistency check");
1417     res = blk->_ptr;
1418     // Note that the BOT is up-to-date for the linAB before allocation.  It
1419     // indicates the start of the linAB.  The split_block() updates the
1420     // BOT for the linAB after the allocation (indicates the start of the
1421     // next chunk to be allocated).
1422     size_t blk_size = blk->_word_size;
1423     blk->_word_size -= size;
1424     blk->_ptr  += size;
1425     split_birth(size);
1426     repairLinearAllocBlock(blk);
1427     // Update BOT last so that other (parallel) GC threads see a consistent
1428     // view of the BOT and free blocks.
1429     // Above must occur before BOT is updated below.
1430     OrderAccess::storestore();
1431     _bt.split_block(res, blk_size, size);  // adjust block offset table
1432     _bt.allocated(res, size);
1433   }
1434   return res;
1435 }
1436 
1437 FreeChunk*
1438 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1439   assert_locked();
1440   assert(size < SmallForDictionary, "just checking");
1441   FreeChunk* res;
1442   res = _indexedFreeList[size].get_chunk_at_head();
1443   if (res == NULL) {
1444     res = getChunkFromIndexedFreeListHelper(size);
1445   }
1446   _bt.verify_not_unallocated((HeapWord*) res, size);
1447   assert(res == NULL || res->size() == size, "Incorrect block size");
1448   return res;
1449 }
1450 
1451 FreeChunk*
1452 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1453   bool replenish) {
1454   assert_locked();
1455   FreeChunk* fc = NULL;
1456   if (size < SmallForDictionary) {
1457     assert(_indexedFreeList[size].head() == NULL ||
1458       _indexedFreeList[size].surplus() <= 0,
1459       "List for this size should be empty or under populated");
1460     // Try best fit in exact lists before replenishing the list
1461     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1462       // Replenish list.
1463       //
1464       // Things tried that failed.
1465       //   Tried allocating out of the two LinAB's first before
1466       // replenishing lists.
1467       //   Tried small linAB of size 256 (size in indexed list)
1468       // and replenishing indexed lists from the small linAB.
1469       //
1470       FreeChunk* newFc = NULL;
1471       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1472       if (replenish_size < SmallForDictionary) {
1473         // Do not replenish from an underpopulated size.
1474         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1475             _indexedFreeList[replenish_size].head() != NULL) {
1476           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1477         } else if (bestFitFirst()) {
1478           newFc = bestFitSmall(replenish_size);
1479         }
1480       }
1481       if (newFc == NULL && replenish_size > size) {
1482         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1483         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1484       }
1485       // Note: The stats update re split-death of block obtained above
1486       // will be recorded below precisely when we know we are going to
1487       // be actually splitting it into more than one pieces below.
1488       if (newFc != NULL) {
1489         if  (replenish || CMSReplenishIntermediate) {
1490           // Replenish this list and return one block to caller.
1491           size_t i;
1492           FreeChunk *curFc, *nextFc;
1493           size_t num_blk = newFc->size() / size;
1494           assert(num_blk >= 1, "Smaller than requested?");
1495           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1496           if (num_blk > 1) {
1497             // we are sure we will be splitting the block just obtained
1498             // into multiple pieces; record the split-death of the original
1499             splitDeath(replenish_size);
1500           }
1501           // carve up and link blocks 0, ..., num_blk - 2
1502           // The last chunk is not added to the lists but is returned as the
1503           // free chunk.
1504           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1505                i = 0;
1506                i < (num_blk - 1);
1507                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1508                i++) {
1509             curFc->set_size(size);
1510             // Don't record this as a return in order to try and
1511             // determine the "returns" from a GC.
1512             _bt.verify_not_unallocated((HeapWord*) fc, size);
1513             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1514             _bt.mark_block((HeapWord*)curFc, size);
1515             split_birth(size);
1516             // Don't record the initial population of the indexed list
1517             // as a split birth.
1518           }
1519 
1520           // check that the arithmetic was OK above
1521           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1522             "inconsistency in carving newFc");
1523           curFc->set_size(size);
1524           _bt.mark_block((HeapWord*)curFc, size);
1525           split_birth(size);
1526           fc = curFc;
1527         } else {
1528           // Return entire block to caller
1529           fc = newFc;
1530         }
1531       }
1532     }
1533   } else {
1534     // Get a free chunk from the free chunk dictionary to be returned to
1535     // replenish the indexed free list.
1536     fc = getChunkFromDictionaryExact(size);
1537   }
1538   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1539   return fc;
1540 }
1541 
1542 FreeChunk*
1543 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1544   assert_locked();
1545   FreeChunk* fc = _dictionary->get_chunk(size,
1546                                          FreeBlockDictionary<FreeChunk>::atLeast);
1547   if (fc == NULL) {
1548     return NULL;
1549   }
1550   _bt.allocated((HeapWord*)fc, fc->size());
1551   if (fc->size() >= size + MinChunkSize) {
1552     fc = splitChunkAndReturnRemainder(fc, size);
1553   }
1554   assert(fc->size() >= size, "chunk too small");
1555   assert(fc->size() < size + MinChunkSize, "chunk too big");
1556   _bt.verify_single_block((HeapWord*)fc, fc->size());
1557   return fc;
1558 }
1559 
1560 FreeChunk*
1561 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1562   assert_locked();
1563   FreeChunk* fc = _dictionary->get_chunk(size,
1564                                          FreeBlockDictionary<FreeChunk>::atLeast);
1565   if (fc == NULL) {
1566     return fc;
1567   }
1568   _bt.allocated((HeapWord*)fc, fc->size());
1569   if (fc->size() == size) {
1570     _bt.verify_single_block((HeapWord*)fc, size);
1571     return fc;
1572   }
1573   assert(fc->size() > size, "get_chunk() guarantee");
1574   if (fc->size() < size + MinChunkSize) {
1575     // Return the chunk to the dictionary and go get a bigger one.
1576     returnChunkToDictionary(fc);
1577     fc = _dictionary->get_chunk(size + MinChunkSize,
1578                                 FreeBlockDictionary<FreeChunk>::atLeast);
1579     if (fc == NULL) {
1580       return NULL;
1581     }
1582     _bt.allocated((HeapWord*)fc, fc->size());
1583   }
1584   assert(fc->size() >= size + MinChunkSize, "tautology");
1585   fc = splitChunkAndReturnRemainder(fc, size);
1586   assert(fc->size() == size, "chunk is wrong size");
1587   _bt.verify_single_block((HeapWord*)fc, size);
1588   return fc;
1589 }
1590 
1591 void
1592 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1593   assert_locked();
1594 
1595   size_t size = chunk->size();
1596   _bt.verify_single_block((HeapWord*)chunk, size);
1597   // adjust _unallocated_block downward, as necessary
1598   _bt.freed((HeapWord*)chunk, size);
1599   _dictionary->return_chunk(chunk);
1600 #ifndef PRODUCT
1601   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1602     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1603     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1604     tl->verify_stats();
1605   }
1606 #endif // PRODUCT
1607 }
1608 
1609 void
1610 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1611   assert_locked();
1612   size_t size = fc->size();
1613   _bt.verify_single_block((HeapWord*) fc, size);
1614   _bt.verify_not_unallocated((HeapWord*) fc, size);
1615   _indexedFreeList[size].return_chunk_at_tail(fc);
1616 #ifndef PRODUCT
1617   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1618      _indexedFreeList[size].verify_stats();
1619   }
1620 #endif // PRODUCT
1621 }
1622 
1623 // Add chunk to end of last block -- if it's the largest
1624 // block -- and update BOT and census data. We would
1625 // of course have preferred to coalesce it with the
1626 // last block, but it's currently less expensive to find the
1627 // largest block than it is to find the last.
1628 void
1629 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1630   HeapWord* chunk, size_t     size) {
1631   // check that the chunk does lie in this space!
1632   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1633   // One of the parallel gc task threads may be here
1634   // whilst others are allocating.
1635   Mutex* lock = &_parDictionaryAllocLock;
1636   FreeChunk* ec;
1637   {
1638     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1639     ec = dictionary()->find_largest_dict();  // get largest block
1640     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1641       // It's a coterminal block - we can coalesce.
1642       size_t old_size = ec->size();
1643       coalDeath(old_size);
1644       removeChunkFromDictionary(ec);
1645       size += old_size;
1646     } else {
1647       ec = (FreeChunk*)chunk;
1648     }
1649   }
1650   ec->set_size(size);
1651   debug_only(ec->mangleFreed(size));
1652   if (size < SmallForDictionary) {
1653     lock = _indexedFreeListParLocks[size];
1654   }
1655   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1656   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1657   // record the birth under the lock since the recording involves
1658   // manipulation of the list on which the chunk lives and
1659   // if the chunk is allocated and is the last on the list,
1660   // the list can go away.
1661   coalBirth(size);
1662 }
1663 
1664 void
1665 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1666                                               size_t     size) {
1667   // check that the chunk does lie in this space!
1668   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1669   assert_locked();
1670   _bt.verify_single_block(chunk, size);
1671 
1672   FreeChunk* fc = (FreeChunk*) chunk;
1673   fc->set_size(size);
1674   debug_only(fc->mangleFreed(size));
1675   if (size < SmallForDictionary) {
1676     returnChunkToFreeList(fc);
1677   } else {
1678     returnChunkToDictionary(fc);
1679   }
1680 }
1681 
1682 void
1683 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1684   size_t size, bool coalesced) {
1685   assert_locked();
1686   assert(chunk != NULL, "null chunk");
1687   if (coalesced) {
1688     // repair BOT
1689     _bt.single_block(chunk, size);
1690   }
1691   addChunkToFreeLists(chunk, size);
1692 }
1693 
1694 // We _must_ find the purported chunk on our free lists;
1695 // we assert if we don't.
1696 void
1697 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1698   size_t size = fc->size();
1699   assert_locked();
1700   debug_only(verifyFreeLists());
1701   if (size < SmallForDictionary) {
1702     removeChunkFromIndexedFreeList(fc);
1703   } else {
1704     removeChunkFromDictionary(fc);
1705   }
1706   _bt.verify_single_block((HeapWord*)fc, size);
1707   debug_only(verifyFreeLists());
1708 }
1709 
1710 void
1711 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1712   size_t size = fc->size();
1713   assert_locked();
1714   assert(fc != NULL, "null chunk");
1715   _bt.verify_single_block((HeapWord*)fc, size);
1716   _dictionary->remove_chunk(fc);
1717   // adjust _unallocated_block upward, as necessary
1718   _bt.allocated((HeapWord*)fc, size);
1719 }
1720 
1721 void
1722 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1723   assert_locked();
1724   size_t size = fc->size();
1725   _bt.verify_single_block((HeapWord*)fc, size);
1726   NOT_PRODUCT(
1727     if (FLSVerifyIndexTable) {
1728       verifyIndexedFreeList(size);
1729     }
1730   )
1731   _indexedFreeList[size].remove_chunk(fc);
1732   NOT_PRODUCT(
1733     if (FLSVerifyIndexTable) {
1734       verifyIndexedFreeList(size);
1735     }
1736   )
1737 }
1738 
1739 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1740   /* A hint is the next larger size that has a surplus.
1741      Start search at a size large enough to guarantee that
1742      the excess is >= MIN_CHUNK. */
1743   size_t start = align_object_size(numWords + MinChunkSize);
1744   if (start < IndexSetSize) {
1745     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1746     size_t    hint = _indexedFreeList[start].hint();
1747     while (hint < IndexSetSize) {
1748       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1749       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1750       if (fl->surplus() > 0 && fl->head() != NULL) {
1751         // Found a list with surplus, reset original hint
1752         // and split out a free chunk which is returned.
1753         _indexedFreeList[start].set_hint(hint);
1754         FreeChunk* res = getFromListGreater(fl, numWords);
1755         assert(res == NULL || res->is_free(),
1756           "Should be returning a free chunk");
1757         return res;
1758       }
1759       hint = fl->hint(); /* keep looking */
1760     }
1761     /* None found. */
1762     it[start].set_hint(IndexSetSize);
1763   }
1764   return NULL;
1765 }
1766 
1767 /* Requires fl->size >= numWords + MinChunkSize */
1768 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1769   size_t numWords) {
1770   FreeChunk *curr = fl->head();
1771   size_t oldNumWords = curr->size();
1772   assert(numWords >= MinChunkSize, "Word size is too small");
1773   assert(curr != NULL, "List is empty");
1774   assert(oldNumWords >= numWords + MinChunkSize,
1775         "Size of chunks in the list is too small");
1776 
1777   fl->remove_chunk(curr);
1778   // recorded indirectly by splitChunkAndReturnRemainder -
1779   // smallSplit(oldNumWords, numWords);
1780   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1781   // Does anything have to be done for the remainder in terms of
1782   // fixing the card table?
1783   assert(new_chunk == NULL || new_chunk->is_free(),
1784     "Should be returning a free chunk");
1785   return new_chunk;
1786 }
1787 
1788 FreeChunk*
1789 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1790   size_t new_size) {
1791   assert_locked();
1792   size_t size = chunk->size();
1793   assert(size > new_size, "Split from a smaller block?");
1794   assert(is_aligned(chunk), "alignment problem");
1795   assert(size == adjustObjectSize(size), "alignment problem");
1796   size_t rem_sz = size - new_size;
1797   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1798   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1799   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1800   assert(is_aligned(ffc), "alignment problem");
1801   ffc->set_size(rem_sz);
1802   ffc->link_next(NULL);
1803   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1804   // Above must occur before BOT is updated below.
1805   // adjust block offset table
1806   OrderAccess::storestore();
1807   assert(chunk->is_free() && ffc->is_free(), "Error");
1808   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1809   if (rem_sz < SmallForDictionary) {
1810     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1811     bool is_par = Thread::current()->is_GC_task_thread();
1812     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1813     returnChunkToFreeList(ffc);
1814     split(size, rem_sz);
1815     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1816   } else {
1817     returnChunkToDictionary(ffc);
1818     split(size, rem_sz);
1819   }
1820   chunk->set_size(new_size);
1821   return chunk;
1822 }
1823 
1824 void
1825 CompactibleFreeListSpace::sweep_completed() {
1826   // Now that space is probably plentiful, refill linear
1827   // allocation blocks as needed.
1828   refillLinearAllocBlocksIfNeeded();
1829 }
1830 
1831 void
1832 CompactibleFreeListSpace::gc_prologue() {
1833   assert_locked();
1834   reportFreeListStatistics("Before GC:");
1835   refillLinearAllocBlocksIfNeeded();
1836 }
1837 
1838 void
1839 CompactibleFreeListSpace::gc_epilogue() {
1840   assert_locked();
1841   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1842   _promoInfo.stopTrackingPromotions();
1843   repairLinearAllocationBlocks();
1844   reportFreeListStatistics("After GC:");
1845 }
1846 
1847 // Iteration support, mostly delegated from a CMS generation
1848 
1849 void CompactibleFreeListSpace::save_marks() {
1850   assert(Thread::current()->is_VM_thread(),
1851          "Global variable should only be set when single-threaded");
1852   // Mark the "end" of the used space at the time of this call;
1853   // note, however, that promoted objects from this point
1854   // on are tracked in the _promoInfo below.
1855   set_saved_mark_word(unallocated_block());
1856 #ifdef ASSERT
1857   // Check the sanity of save_marks() etc.
1858   MemRegion ur    = used_region();
1859   MemRegion urasm = used_region_at_save_marks();
1860   assert(ur.contains(urasm),
1861          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1862          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1863          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1864 #endif
1865   // inform allocator that promotions should be tracked.
1866   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1867   _promoInfo.startTrackingPromotions();
1868 }
1869 
1870 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1871   assert(_promoInfo.tracking(), "No preceding save_marks?");
1872   return _promoInfo.noPromotions();
1873 }
1874 
1875 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1876                                                                             \
1877 void CompactibleFreeListSpace::                                             \
1878 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1879   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1880   /*                                                                        \
1881    * This also restores any displaced headers and removes the elements from \
1882    * the iteration set as they are processed, so that we have a clean slate \
1883    * at the end of the iteration. Note, thus, that if new objects are       \
1884    * promoted as a result of the iteration they are iterated over as well.  \
1885    */                                                                       \
1886   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1887 }
1888 
1889 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1890 
1891 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1892   return _smallLinearAllocBlock._word_size == 0;
1893 }
1894 
1895 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1896   // Fix up linear allocation blocks to look like free blocks
1897   repairLinearAllocBlock(&_smallLinearAllocBlock);
1898 }
1899 
1900 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1901   assert_locked();
1902   if (blk->_ptr != NULL) {
1903     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1904            "Minimum block size requirement");
1905     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1906     fc->set_size(blk->_word_size);
1907     fc->link_prev(NULL);   // mark as free
1908     fc->dontCoalesce();
1909     assert(fc->is_free(), "just marked it free");
1910     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1911   }
1912 }
1913 
1914 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1915   assert_locked();
1916   if (_smallLinearAllocBlock._ptr == NULL) {
1917     assert(_smallLinearAllocBlock._word_size == 0,
1918       "Size of linAB should be zero if the ptr is NULL");
1919     // Reset the linAB refill and allocation size limit.
1920     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1921   }
1922   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1923 }
1924 
1925 void
1926 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1927   assert_locked();
1928   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1929          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1930          "blk invariant");
1931   if (blk->_ptr == NULL) {
1932     refillLinearAllocBlock(blk);
1933   }
1934 }
1935 
1936 void
1937 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1938   assert_locked();
1939   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1940          "linear allocation block should be empty");
1941   FreeChunk* fc;
1942   if (blk->_refillSize < SmallForDictionary &&
1943       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1944     // A linAB's strategy might be to use small sizes to reduce
1945     // fragmentation but still get the benefits of allocation from a
1946     // linAB.
1947   } else {
1948     fc = getChunkFromDictionary(blk->_refillSize);
1949   }
1950   if (fc != NULL) {
1951     blk->_ptr  = (HeapWord*)fc;
1952     blk->_word_size = fc->size();
1953     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1954   }
1955 }
1956 
1957 // Support for compaction
1958 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1959   scan_and_forward(this, cp);
1960   // Prepare_for_compaction() uses the space between live objects
1961   // so that later phase can skip dead space quickly.  So verification
1962   // of the free lists doesn't work after.
1963 }
1964 
1965 void CompactibleFreeListSpace::adjust_pointers() {
1966   // In other versions of adjust_pointers(), a bail out
1967   // based on the amount of live data in the generation
1968   // (i.e., if 0, bail out) may be used.
1969   // Cannot test used() == 0 here because the free lists have already
1970   // been mangled by the compaction.
1971 
1972   scan_and_adjust_pointers(this);
1973   // See note about verification in prepare_for_compaction().
1974 }
1975 
1976 void CompactibleFreeListSpace::compact() {
1977   scan_and_compact(this);
1978 }
1979 
1980 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1981 // where fbs is free block sizes
1982 double CompactibleFreeListSpace::flsFrag() const {
1983   size_t itabFree = totalSizeInIndexedFreeLists();
1984   double frag = 0.0;
1985   size_t i;
1986 
1987   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1988     double sz  = i;
1989     frag      += _indexedFreeList[i].count() * (sz * sz);
1990   }
1991 
1992   double totFree = itabFree +
1993                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1994   if (totFree > 0) {
1995     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1996             (totFree * totFree));
1997     frag = (double)1.0  - frag;
1998   } else {
1999     assert(frag == 0.0, "Follows from totFree == 0");
2000   }
2001   return frag;
2002 }
2003 
2004 void CompactibleFreeListSpace::beginSweepFLCensus(
2005   float inter_sweep_current,
2006   float inter_sweep_estimate,
2007   float intra_sweep_estimate) {
2008   assert_locked();
2009   size_t i;
2010   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2011     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2012     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2013     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2014     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2015     fl->set_before_sweep(fl->count());
2016     fl->set_bfr_surp(fl->surplus());
2017   }
2018   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2019                                     inter_sweep_current,
2020                                     inter_sweep_estimate,
2021                                     intra_sweep_estimate);
2022 }
2023 
2024 void CompactibleFreeListSpace::setFLSurplus() {
2025   assert_locked();
2026   size_t i;
2027   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2028     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2029     fl->set_surplus(fl->count() -
2030                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2031   }
2032 }
2033 
2034 void CompactibleFreeListSpace::setFLHints() {
2035   assert_locked();
2036   size_t i;
2037   size_t h = IndexSetSize;
2038   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2039     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2040     fl->set_hint(h);
2041     if (fl->surplus() > 0) {
2042       h = i;
2043     }
2044   }
2045 }
2046 
2047 void CompactibleFreeListSpace::clearFLCensus() {
2048   assert_locked();
2049   size_t i;
2050   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2051     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2052     fl->set_prev_sweep(fl->count());
2053     fl->set_coal_births(0);
2054     fl->set_coal_deaths(0);
2055     fl->set_split_births(0);
2056     fl->set_split_deaths(0);
2057   }
2058 }
2059 
2060 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2061   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2062   setFLSurplus();
2063   setFLHints();
2064   printFLCensus(sweep_count);
2065   clearFLCensus();
2066   assert_locked();
2067   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2068 }
2069 
2070 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2071   if (size < SmallForDictionary) {
2072     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2073     return (fl->coal_desired() < 0) ||
2074            ((int)fl->count() > fl->coal_desired());
2075   } else {
2076     return dictionary()->coal_dict_over_populated(size);
2077   }
2078 }
2079 
2080 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2081   assert(size < SmallForDictionary, "Size too large for indexed list");
2082   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2083   fl->increment_coal_births();
2084   fl->increment_surplus();
2085 }
2086 
2087 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2088   assert(size < SmallForDictionary, "Size too large for indexed list");
2089   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2090   fl->increment_coal_deaths();
2091   fl->decrement_surplus();
2092 }
2093 
2094 void CompactibleFreeListSpace::coalBirth(size_t size) {
2095   if (size  < SmallForDictionary) {
2096     smallCoalBirth(size);
2097   } else {
2098     dictionary()->dict_census_update(size,
2099                                    false /* split */,
2100                                    true /* birth */);
2101   }
2102 }
2103 
2104 void CompactibleFreeListSpace::coalDeath(size_t size) {
2105   if(size  < SmallForDictionary) {
2106     smallCoalDeath(size);
2107   } else {
2108     dictionary()->dict_census_update(size,
2109                                    false /* split */,
2110                                    false /* birth */);
2111   }
2112 }
2113 
2114 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2115   assert(size < SmallForDictionary, "Size too large for indexed list");
2116   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2117   fl->increment_split_births();
2118   fl->increment_surplus();
2119 }
2120 
2121 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2122   assert(size < SmallForDictionary, "Size too large for indexed list");
2123   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2124   fl->increment_split_deaths();
2125   fl->decrement_surplus();
2126 }
2127 
2128 void CompactibleFreeListSpace::split_birth(size_t size) {
2129   if (size  < SmallForDictionary) {
2130     smallSplitBirth(size);
2131   } else {
2132     dictionary()->dict_census_update(size,
2133                                    true /* split */,
2134                                    true /* birth */);
2135   }
2136 }
2137 
2138 void CompactibleFreeListSpace::splitDeath(size_t size) {
2139   if (size  < SmallForDictionary) {
2140     smallSplitDeath(size);
2141   } else {
2142     dictionary()->dict_census_update(size,
2143                                    true /* split */,
2144                                    false /* birth */);
2145   }
2146 }
2147 
2148 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2149   size_t to2 = from - to1;
2150   splitDeath(from);
2151   split_birth(to1);
2152   split_birth(to2);
2153 }
2154 
2155 void CompactibleFreeListSpace::print() const {
2156   print_on(tty);
2157 }
2158 
2159 void CompactibleFreeListSpace::prepare_for_verify() {
2160   assert_locked();
2161   repairLinearAllocationBlocks();
2162   // Verify that the SpoolBlocks look like free blocks of
2163   // appropriate sizes... To be done ...
2164 }
2165 
2166 class VerifyAllBlksClosure: public BlkClosure {
2167  private:
2168   const CompactibleFreeListSpace* _sp;
2169   const MemRegion                 _span;
2170   HeapWord*                       _last_addr;
2171   size_t                          _last_size;
2172   bool                            _last_was_obj;
2173   bool                            _last_was_live;
2174 
2175  public:
2176   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2177     MemRegion span) :  _sp(sp), _span(span),
2178                        _last_addr(NULL), _last_size(0),
2179                        _last_was_obj(false), _last_was_live(false) { }
2180 
2181   virtual size_t do_blk(HeapWord* addr) {
2182     size_t res;
2183     bool   was_obj  = false;
2184     bool   was_live = false;
2185     if (_sp->block_is_obj(addr)) {
2186       was_obj = true;
2187       oop p = oop(addr);
2188       guarantee(p->is_oop(), "Should be an oop");
2189       res = _sp->adjustObjectSize(p->size());
2190       if (_sp->obj_is_alive(addr)) {
2191         was_live = true;
2192         p->verify();
2193       }
2194     } else {
2195       FreeChunk* fc = (FreeChunk*)addr;
2196       res = fc->size();
2197       if (FLSVerifyLists && !fc->cantCoalesce()) {
2198         guarantee(_sp->verify_chunk_in_free_list(fc),
2199                   "Chunk should be on a free list");
2200       }
2201     }
2202     if (res == 0) {
2203       LogHandle(gc, verify) log;
2204       log.error("Livelock: no rank reduction!");
2205       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2206                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2207         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2208         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2209       ResourceMark rm;
2210       _sp->print_on(log.error_stream());
2211       guarantee(false, "Verification failed.");
2212     }
2213     _last_addr = addr;
2214     _last_size = res;
2215     _last_was_obj  = was_obj;
2216     _last_was_live = was_live;
2217     return res;
2218   }
2219 };
2220 
2221 class VerifyAllOopsClosure: public OopClosure {
2222  private:
2223   const CMSCollector*             _collector;
2224   const CompactibleFreeListSpace* _sp;
2225   const MemRegion                 _span;
2226   const bool                      _past_remark;
2227   const CMSBitMap*                _bit_map;
2228 
2229  protected:
2230   void do_oop(void* p, oop obj) {
2231     if (_span.contains(obj)) { // the interior oop points into CMS heap
2232       if (!_span.contains(p)) { // reference from outside CMS heap
2233         // Should be a valid object; the first disjunct below allows
2234         // us to sidestep an assertion in block_is_obj() that insists
2235         // that p be in _sp. Note that several generations (and spaces)
2236         // are spanned by _span (CMS heap) above.
2237         guarantee(!_sp->is_in_reserved(obj) ||
2238                   _sp->block_is_obj((HeapWord*)obj),
2239                   "Should be an object");
2240         guarantee(obj->is_oop(), "Should be an oop");
2241         obj->verify();
2242         if (_past_remark) {
2243           // Remark has been completed, the object should be marked
2244           _bit_map->isMarked((HeapWord*)obj);
2245         }
2246       } else { // reference within CMS heap
2247         if (_past_remark) {
2248           // Remark has been completed -- so the referent should have
2249           // been marked, if referring object is.
2250           if (_bit_map->isMarked(_collector->block_start(p))) {
2251             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2252           }
2253         }
2254       }
2255     } else if (_sp->is_in_reserved(p)) {
2256       // the reference is from FLS, and points out of FLS
2257       guarantee(obj->is_oop(), "Should be an oop");
2258       obj->verify();
2259     }
2260   }
2261 
2262   template <class T> void do_oop_work(T* p) {
2263     T heap_oop = oopDesc::load_heap_oop(p);
2264     if (!oopDesc::is_null(heap_oop)) {
2265       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2266       do_oop(p, obj);
2267     }
2268   }
2269 
2270  public:
2271   VerifyAllOopsClosure(const CMSCollector* collector,
2272     const CompactibleFreeListSpace* sp, MemRegion span,
2273     bool past_remark, CMSBitMap* bit_map) :
2274     _collector(collector), _sp(sp), _span(span),
2275     _past_remark(past_remark), _bit_map(bit_map) { }
2276 
2277   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2278   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2279 };
2280 
2281 void CompactibleFreeListSpace::verify() const {
2282   assert_lock_strong(&_freelistLock);
2283   verify_objects_initialized();
2284   MemRegion span = _collector->_span;
2285   bool past_remark = (_collector->abstract_state() ==
2286                       CMSCollector::Sweeping);
2287 
2288   ResourceMark rm;
2289   HandleMark  hm;
2290 
2291   // Check integrity of CFL data structures
2292   _promoInfo.verify();
2293   _dictionary->verify();
2294   if (FLSVerifyIndexTable) {
2295     verifyIndexedFreeLists();
2296   }
2297   // Check integrity of all objects and free blocks in space
2298   {
2299     VerifyAllBlksClosure cl(this, span);
2300     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2301   }
2302   // Check that all references in the heap to FLS
2303   // are to valid objects in FLS or that references in
2304   // FLS are to valid objects elsewhere in the heap
2305   if (FLSVerifyAllHeapReferences)
2306   {
2307     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2308       _collector->markBitMap());
2309 
2310     // Iterate over all oops in the heap. Uses the _no_header version
2311     // since we are not interested in following the klass pointers.
2312     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2313   }
2314 
2315   if (VerifyObjectStartArray) {
2316     // Verify the block offset table
2317     _bt.verify();
2318   }
2319 }
2320 
2321 #ifndef PRODUCT
2322 void CompactibleFreeListSpace::verifyFreeLists() const {
2323   if (FLSVerifyLists) {
2324     _dictionary->verify();
2325     verifyIndexedFreeLists();
2326   } else {
2327     if (FLSVerifyDictionary) {
2328       _dictionary->verify();
2329     }
2330     if (FLSVerifyIndexTable) {
2331       verifyIndexedFreeLists();
2332     }
2333   }
2334 }
2335 #endif
2336 
2337 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2338   size_t i = 0;
2339   for (; i < IndexSetStart; i++) {
2340     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2341   }
2342   for (; i < IndexSetSize; i++) {
2343     verifyIndexedFreeList(i);
2344   }
2345 }
2346 
2347 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2348   FreeChunk* fc   =  _indexedFreeList[size].head();
2349   FreeChunk* tail =  _indexedFreeList[size].tail();
2350   size_t    num = _indexedFreeList[size].count();
2351   size_t      n = 0;
2352   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2353             "Slot should have been empty");
2354   for (; fc != NULL; fc = fc->next(), n++) {
2355     guarantee(fc->size() == size, "Size inconsistency");
2356     guarantee(fc->is_free(), "!free?");
2357     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2358     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2359   }
2360   guarantee(n == num, "Incorrect count");
2361 }
2362 
2363 #ifndef PRODUCT
2364 void CompactibleFreeListSpace::check_free_list_consistency() const {
2365   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2366     "Some sizes can't be allocated without recourse to"
2367     " linear allocation buffers");
2368   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2369     "else MIN_TREE_CHUNK_SIZE is wrong");
2370   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2371   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2372 }
2373 #endif
2374 
2375 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2376   assert_lock_strong(&_freelistLock);
2377   LogHandle(gc, freelist, census) log;
2378   if (!log.is_debug()) {
2379     return;
2380   }
2381   AdaptiveFreeList<FreeChunk> total;
2382   log.debug("end sweep# " SIZE_FORMAT, sweep_count);
2383   ResourceMark rm;
2384   outputStream* out = log.debug_stream();
2385   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2386   size_t total_free = 0;
2387   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2388     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2389     total_free += fl->count() * fl->size();
2390     if (i % (40*IndexSetStride) == 0) {
2391       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2392     }
2393     fl->print_on(out);
2394     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2395     total.set_surplus(    total.surplus()     + fl->surplus()    );
2396     total.set_desired(    total.desired()     + fl->desired()    );
2397     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2398     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2399     total.set_count(      total.count()       + fl->count()      );
2400     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2401     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2402     total.set_split_births(total.split_births() + fl->split_births());
2403     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2404   }
2405   total.print_on(out, "TOTAL");
2406   log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2407   log.debug("growth: %8.5f  deficit: %8.5f",
2408             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2409                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2410             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2411   _dictionary->print_dict_census(out);
2412 }
2413 
2414 ///////////////////////////////////////////////////////////////////////////
2415 // CompactibleFreeListSpaceLAB
2416 ///////////////////////////////////////////////////////////////////////////
2417 
2418 #define VECTOR_257(x)                                                                                  \
2419   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2420   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2421      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2422      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2423      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2424      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2425      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2426      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2427      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2428      x }
2429 
2430 // Initialize with default setting for CMS, _not_
2431 // generic OldPLABSize, whose static default is different; if overridden at the
2432 // command-line, this will get reinitialized via a call to
2433 // modify_initialization() below.
2434 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2435   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2436 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2437 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2438 
2439 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2440   _cfls(cfls)
2441 {
2442   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2443   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2444        i < CompactibleFreeListSpace::IndexSetSize;
2445        i += CompactibleFreeListSpace::IndexSetStride) {
2446     _indexedFreeList[i].set_size(i);
2447     _num_blocks[i] = 0;
2448   }
2449 }
2450 
2451 static bool _CFLS_LAB_modified = false;
2452 
2453 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2454   assert(!_CFLS_LAB_modified, "Call only once");
2455   _CFLS_LAB_modified = true;
2456   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2457        i < CompactibleFreeListSpace::IndexSetSize;
2458        i += CompactibleFreeListSpace::IndexSetStride) {
2459     _blocks_to_claim[i].modify(n, wt, true /* force */);
2460   }
2461 }
2462 
2463 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2464   FreeChunk* res;
2465   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2466   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2467     // This locking manages sync with other large object allocations.
2468     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2469                     Mutex::_no_safepoint_check_flag);
2470     res = _cfls->getChunkFromDictionaryExact(word_sz);
2471     if (res == NULL) return NULL;
2472   } else {
2473     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2474     if (fl->count() == 0) {
2475       // Attempt to refill this local free list.
2476       get_from_global_pool(word_sz, fl);
2477       // If it didn't work, give up.
2478       if (fl->count() == 0) return NULL;
2479     }
2480     res = fl->get_chunk_at_head();
2481     assert(res != NULL, "Why was count non-zero?");
2482   }
2483   res->markNotFree();
2484   assert(!res->is_free(), "shouldn't be marked free");
2485   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2486   // mangle a just allocated object with a distinct pattern.
2487   debug_only(res->mangleAllocated(word_sz));
2488   return (HeapWord*)res;
2489 }
2490 
2491 // Get a chunk of blocks of the right size and update related
2492 // book-keeping stats
2493 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2494   // Get the #blocks we want to claim
2495   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2496   assert(n_blks > 0, "Error");
2497   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2498   // In some cases, when the application has a phase change,
2499   // there may be a sudden and sharp shift in the object survival
2500   // profile, and updating the counts at the end of a scavenge
2501   // may not be quick enough, giving rise to large scavenge pauses
2502   // during these phase changes. It is beneficial to detect such
2503   // changes on-the-fly during a scavenge and avoid such a phase-change
2504   // pothole. The following code is a heuristic attempt to do that.
2505   // It is protected by a product flag until we have gained
2506   // enough experience with this heuristic and fine-tuned its behavior.
2507   // WARNING: This might increase fragmentation if we overreact to
2508   // small spikes, so some kind of historical smoothing based on
2509   // previous experience with the greater reactivity might be useful.
2510   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2511   // default.
2512   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2513     //
2514     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2515     // which is why there is a cast to double.
2516     //
2517     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2518     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2519     n_blks = MIN2(n_blks, CMSOldPLABMax);
2520   }
2521   assert(n_blks > 0, "Error");
2522   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2523   // Update stats table entry for this block size
2524   _num_blocks[word_sz] += fl->count();
2525 }
2526 
2527 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2528   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2529        i < CompactibleFreeListSpace::IndexSetSize;
2530        i += CompactibleFreeListSpace::IndexSetStride) {
2531     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2532            "Counter inconsistency");
2533     if (_global_num_workers[i] > 0) {
2534       // Need to smooth wrt historical average
2535       if (ResizeOldPLAB) {
2536         _blocks_to_claim[i].sample(
2537           MAX2(CMSOldPLABMin,
2538           MIN2(CMSOldPLABMax,
2539                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2540       }
2541       // Reset counters for next round
2542       _global_num_workers[i] = 0;
2543       _global_num_blocks[i] = 0;
2544       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2545     }
2546   }
2547 }
2548 
2549 // If this is changed in the future to allow parallel
2550 // access, one would need to take the FL locks and,
2551 // depending on how it is used, stagger access from
2552 // parallel threads to reduce contention.
2553 void CompactibleFreeListSpaceLAB::retire(int tid) {
2554   // We run this single threaded with the world stopped;
2555   // so no need for locks and such.
2556   NOT_PRODUCT(Thread* t = Thread::current();)
2557   assert(Thread::current()->is_VM_thread(), "Error");
2558   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2559        i < CompactibleFreeListSpace::IndexSetSize;
2560        i += CompactibleFreeListSpace::IndexSetStride) {
2561     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2562            "Can't retire more than what we obtained");
2563     if (_num_blocks[i] > 0) {
2564       size_t num_retire =  _indexedFreeList[i].count();
2565       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2566       {
2567         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2568         //                Mutex::_no_safepoint_check_flag);
2569 
2570         // Update globals stats for num_blocks used
2571         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2572         _global_num_workers[i]++;
2573         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2574         if (num_retire > 0) {
2575           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2576           // Reset this list.
2577           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2578           _indexedFreeList[i].set_size(i);
2579         }
2580       }
2581       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2582                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2583       // Reset stats for next round
2584       _num_blocks[i]         = 0;
2585     }
2586   }
2587 }
2588 
2589 // Used by par_get_chunk_of_blocks() for the chunks from the
2590 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2591 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2592 // to the free list "fl".  "n" is the maximum number of chunks to
2593 // be added to "fl".
2594 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2595 
2596   // We'll try all multiples of word_sz in the indexed set, starting with
2597   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2598   // then try getting a big chunk and splitting it.
2599   {
2600     bool found;
2601     int  k;
2602     size_t cur_sz;
2603     for (k = 1, cur_sz = k * word_sz, found = false;
2604          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2605          (CMSSplitIndexedFreeListBlocks || k <= 1);
2606          k++, cur_sz = k * word_sz) {
2607       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2608       fl_for_cur_sz.set_size(cur_sz);
2609       {
2610         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2611                         Mutex::_no_safepoint_check_flag);
2612         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2613         if (gfl->count() != 0) {
2614           // nn is the number of chunks of size cur_sz that
2615           // we'd need to split k-ways each, in order to create
2616           // "n" chunks of size word_sz each.
2617           const size_t nn = MAX2(n/k, (size_t)1);
2618           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2619           found = true;
2620           if (k > 1) {
2621             // Update split death stats for the cur_sz-size blocks list:
2622             // we increment the split death count by the number of blocks
2623             // we just took from the cur_sz-size blocks list and which
2624             // we will be splitting below.
2625             ssize_t deaths = gfl->split_deaths() +
2626                              fl_for_cur_sz.count();
2627             gfl->set_split_deaths(deaths);
2628           }
2629         }
2630       }
2631       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2632       if (found) {
2633         if (k == 1) {
2634           fl->prepend(&fl_for_cur_sz);
2635         } else {
2636           // Divide each block on fl_for_cur_sz up k ways.
2637           FreeChunk* fc;
2638           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2639             // Must do this in reverse order, so that anybody attempting to
2640             // access the main chunk sees it as a single free block until we
2641             // change it.
2642             size_t fc_size = fc->size();
2643             assert(fc->is_free(), "Error");
2644             for (int i = k-1; i >= 0; i--) {
2645               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2646               assert((i != 0) ||
2647                         ((fc == ffc) && ffc->is_free() &&
2648                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2649                         "Counting error");
2650               ffc->set_size(word_sz);
2651               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2652               ffc->link_next(NULL);
2653               // Above must occur before BOT is updated below.
2654               OrderAccess::storestore();
2655               // splitting from the right, fc_size == i * word_sz
2656               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2657               fc_size -= word_sz;
2658               assert(fc_size == i*word_sz, "Error");
2659               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2660               _bt.verify_single_block((HeapWord*)fc, fc_size);
2661               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2662               // Push this on "fl".
2663               fl->return_chunk_at_head(ffc);
2664             }
2665             // TRAP
2666             assert(fl->tail()->next() == NULL, "List invariant.");
2667           }
2668         }
2669         // Update birth stats for this block size.
2670         size_t num = fl->count();
2671         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2672                         Mutex::_no_safepoint_check_flag);
2673         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2674         _indexedFreeList[word_sz].set_split_births(births);
2675         return true;
2676       }
2677     }
2678     return found;
2679   }
2680 }
2681 
2682 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2683 
2684   FreeChunk* fc = NULL;
2685   FreeChunk* rem_fc = NULL;
2686   size_t rem;
2687   {
2688     MutexLockerEx x(parDictionaryAllocLock(),
2689                     Mutex::_no_safepoint_check_flag);
2690     while (n > 0) {
2691       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2692                                   FreeBlockDictionary<FreeChunk>::atLeast);
2693       if (fc != NULL) {
2694         break;
2695       } else {
2696         n--;
2697       }
2698     }
2699     if (fc == NULL) return NULL;
2700     // Otherwise, split up that block.
2701     assert((ssize_t)n >= 1, "Control point invariant");
2702     assert(fc->is_free(), "Error: should be a free block");
2703     _bt.verify_single_block((HeapWord*)fc, fc->size());
2704     const size_t nn = fc->size() / word_sz;
2705     n = MIN2(nn, n);
2706     assert((ssize_t)n >= 1, "Control point invariant");
2707     rem = fc->size() - n * word_sz;
2708     // If there is a remainder, and it's too small, allocate one fewer.
2709     if (rem > 0 && rem < MinChunkSize) {
2710       n--; rem += word_sz;
2711     }
2712     // Note that at this point we may have n == 0.
2713     assert((ssize_t)n >= 0, "Control point invariant");
2714 
2715     // If n is 0, the chunk fc that was found is not large
2716     // enough to leave a viable remainder.  We are unable to
2717     // allocate even one block.  Return fc to the
2718     // dictionary and return, leaving "fl" empty.
2719     if (n == 0) {
2720       returnChunkToDictionary(fc);
2721       return NULL;
2722     }
2723 
2724     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2725     dictionary()->dict_census_update(fc->size(),
2726                                      true /*split*/,
2727                                      false /*birth*/);
2728 
2729     // First return the remainder, if any.
2730     // Note that we hold the lock until we decide if we're going to give
2731     // back the remainder to the dictionary, since a concurrent allocation
2732     // may otherwise see the heap as empty.  (We're willing to take that
2733     // hit if the block is a small block.)
2734     if (rem > 0) {
2735       size_t prefix_size = n * word_sz;
2736       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2737       rem_fc->set_size(rem);
2738       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2739       rem_fc->link_next(NULL);
2740       // Above must occur before BOT is updated below.
2741       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2742       OrderAccess::storestore();
2743       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2744       assert(fc->is_free(), "Error");
2745       fc->set_size(prefix_size);
2746       if (rem >= IndexSetSize) {
2747         returnChunkToDictionary(rem_fc);
2748         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2749         rem_fc = NULL;
2750       }
2751       // Otherwise, return it to the small list below.
2752     }
2753   }
2754   if (rem_fc != NULL) {
2755     MutexLockerEx x(_indexedFreeListParLocks[rem],
2756                     Mutex::_no_safepoint_check_flag);
2757     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2758     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2759     smallSplitBirth(rem);
2760   }
2761   assert(n * word_sz == fc->size(),
2762          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2763          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2764          fc->size(), n, word_sz);
2765   return fc;
2766 }
2767 
2768 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2769 
2770   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2771 
2772   if (fc == NULL) {
2773     return;
2774   }
2775 
2776   size_t n = fc->size() / word_sz;
2777 
2778   assert((ssize_t)n > 0, "Consistency");
2779   // Now do the splitting up.
2780   // Must do this in reverse order, so that anybody attempting to
2781   // access the main chunk sees it as a single free block until we
2782   // change it.
2783   size_t fc_size = n * word_sz;
2784   // All but first chunk in this loop
2785   for (ssize_t i = n-1; i > 0; i--) {
2786     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2787     ffc->set_size(word_sz);
2788     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2789     ffc->link_next(NULL);
2790     // Above must occur before BOT is updated below.
2791     OrderAccess::storestore();
2792     // splitting from the right, fc_size == (n - i + 1) * wordsize
2793     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2794     fc_size -= word_sz;
2795     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2796     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2797     _bt.verify_single_block((HeapWord*)fc, fc_size);
2798     // Push this on "fl".
2799     fl->return_chunk_at_head(ffc);
2800   }
2801   // First chunk
2802   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2803   // The blocks above should show their new sizes before the first block below
2804   fc->set_size(word_sz);
2805   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2806   fc->link_next(NULL);
2807   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2808   _bt.verify_single_block((HeapWord*)fc, fc->size());
2809   fl->return_chunk_at_head(fc);
2810 
2811   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2812   {
2813     // Update the stats for this block size.
2814     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2815                     Mutex::_no_safepoint_check_flag);
2816     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2817     _indexedFreeList[word_sz].set_split_births(births);
2818     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2819     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2820   }
2821 
2822   // TRAP
2823   assert(fl->tail()->next() == NULL, "List invariant.");
2824 }
2825 
2826 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2827   assert(fl->count() == 0, "Precondition.");
2828   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2829          "Precondition");
2830 
2831   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2832     // Got it
2833     return;
2834   }
2835 
2836   // Otherwise, we'll split a block from the dictionary.
2837   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2838 }
2839 
2840 // Set up the space's par_seq_tasks structure for work claiming
2841 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2842 // XXX Need to suitably abstract and generalize this and the next
2843 // method into one.
2844 void
2845 CompactibleFreeListSpace::
2846 initialize_sequential_subtasks_for_rescan(int n_threads) {
2847   // The "size" of each task is fixed according to rescan_task_size.
2848   assert(n_threads > 0, "Unexpected n_threads argument");
2849   const size_t task_size = rescan_task_size();
2850   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2851   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2852   assert(n_tasks == 0 ||
2853          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2854           (used_region().start() + n_tasks*task_size >= used_region().end())),
2855          "n_tasks calculation incorrect");
2856   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2857   assert(!pst->valid(), "Clobbering existing data?");
2858   // Sets the condition for completion of the subtask (how many threads
2859   // need to finish in order to be done).
2860   pst->set_n_threads(n_threads);
2861   pst->set_n_tasks((int)n_tasks);
2862 }
2863 
2864 // Set up the space's par_seq_tasks structure for work claiming
2865 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2866 void
2867 CompactibleFreeListSpace::
2868 initialize_sequential_subtasks_for_marking(int n_threads,
2869                                            HeapWord* low) {
2870   // The "size" of each task is fixed according to rescan_task_size.
2871   assert(n_threads > 0, "Unexpected n_threads argument");
2872   const size_t task_size = marking_task_size();
2873   assert(task_size > CardTableModRefBS::card_size_in_words &&
2874          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2875          "Otherwise arithmetic below would be incorrect");
2876   MemRegion span = _old_gen->reserved();
2877   if (low != NULL) {
2878     if (span.contains(low)) {
2879       // Align low down to  a card boundary so that
2880       // we can use block_offset_careful() on span boundaries.
2881       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2882                                  CardTableModRefBS::card_size);
2883       // Clip span prefix at aligned_low
2884       span = span.intersection(MemRegion(aligned_low, span.end()));
2885     } else if (low > span.end()) {
2886       span = MemRegion(low, low);  // Null region
2887     } // else use entire span
2888   }
2889   assert(span.is_empty() ||
2890          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2891         "span should start at a card boundary");
2892   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2893   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2894   assert(n_tasks == 0 ||
2895          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2896           (span.start() + n_tasks*task_size >= span.end())),
2897          "n_tasks calculation incorrect");
2898   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2899   assert(!pst->valid(), "Clobbering existing data?");
2900   // Sets the condition for completion of the subtask (how many threads
2901   // need to finish in order to be done).
2902   pst->set_n_threads(n_threads);
2903   pst->set_n_tasks((int)n_tasks);
2904 }