1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/logStream.inline.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 223   return adjustObjectSize(oop(addr)->size());
 224 }
 225 
 226 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 227   for (size_t i = 1; i < IndexSetSize; i++) {
 228     assert(_indexedFreeList[i].size() == (size_t) i,
 229       "Indexed free list sizes are incorrect");
 230     _indexedFreeList[i].reset(IndexSetSize);
 231     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 232     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 233     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 235   }
 236 }
 237 
 238 void CompactibleFreeListSpace::reset(MemRegion mr) {
 239   resetIndexedFreeListArray();
 240   dictionary()->reset();
 241   if (BlockOffsetArrayUseUnallocatedBlock) {
 242     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 243     // Everything's allocated until proven otherwise.
 244     _bt.set_unallocated_block(end());
 245   }
 246   if (!mr.is_empty()) {
 247     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 248     _bt.single_block(mr.start(), mr.word_size());
 249     FreeChunk* fc = (FreeChunk*) mr.start();
 250     fc->set_size(mr.word_size());
 251     if (mr.word_size() >= IndexSetSize ) {
 252       returnChunkToDictionary(fc);
 253     } else {
 254       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 255       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 256     }
 257     coalBirth(mr.word_size());
 258   }
 259   _promoInfo.reset();
 260   _smallLinearAllocBlock._ptr = NULL;
 261   _smallLinearAllocBlock._word_size = 0;
 262 }
 263 
 264 void CompactibleFreeListSpace::reset_after_compaction() {
 265   // Reset the space to the new reality - one free chunk.
 266   MemRegion mr(compaction_top(), end());
 267   reset(mr);
 268   // Now refill the linear allocation block(s) if possible.
 269   refillLinearAllocBlocksIfNeeded();
 270 }
 271 
 272 // Walks the entire dictionary, returning a coterminal
 273 // chunk, if it exists. Use with caution since it involves
 274 // a potentially complete walk of a potentially large tree.
 275 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 276 
 277   assert_lock_strong(&_freelistLock);
 278 
 279   return dictionary()->find_chunk_ends_at(end());
 280 }
 281 
 282 
 283 #ifndef PRODUCT
 284 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 285   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 286     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 287   }
 288 }
 289 
 290 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 291   size_t sum = 0;
 292   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 293     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 294   }
 295   return sum;
 296 }
 297 
 298 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 299   size_t count = 0;
 300   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 301     debug_only(
 302       ssize_t total_list_count = 0;
 303       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 304          fc = fc->next()) {
 305         total_list_count++;
 306       }
 307       assert(total_list_count ==  _indexedFreeList[i].count(),
 308         "Count in list is incorrect");
 309     )
 310     count += _indexedFreeList[i].count();
 311   }
 312   return count;
 313 }
 314 
 315 size_t CompactibleFreeListSpace::totalCount() {
 316   size_t num = totalCountInIndexedFreeLists();
 317   num +=  dictionary()->total_count();
 318   if (_smallLinearAllocBlock._word_size != 0) {
 319     num++;
 320   }
 321   return num;
 322 }
 323 #endif
 324 
 325 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 326   FreeChunk* fc = (FreeChunk*) p;
 327   return fc->is_free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::used() const {
 331   return capacity() - free();
 332 }
 333 
 334 size_t CompactibleFreeListSpace::free() const {
 335   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 336   // if you do this while the structures are in flux you
 337   // may get an approximate answer only; for instance
 338   // because there is concurrent allocation either
 339   // directly by mutators or for promotion during a GC.
 340   // It's "MT-safe", however, in the sense that you are guaranteed
 341   // not to crash and burn, for instance, because of walking
 342   // pointers that could disappear as you were walking them.
 343   // The approximation is because the various components
 344   // that are read below are not read atomically (and
 345   // further the computation of totalSizeInIndexedFreeLists()
 346   // is itself a non-atomic computation. The normal use of
 347   // this is during a resize operation at the end of GC
 348   // and at that time you are guaranteed to get the
 349   // correct actual value. However, for instance, this is
 350   // also read completely asynchronously by the "perf-sampler"
 351   // that supports jvmstat, and you are apt to see the values
 352   // flicker in such cases.
 353   assert(_dictionary != NULL, "No _dictionary?");
 354   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 355           totalSizeInIndexedFreeLists() +
 356           _smallLinearAllocBlock._word_size) * HeapWordSize;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 360   assert(_dictionary != NULL, "No _dictionary?");
 361   assert_locked();
 362   size_t res = _dictionary->max_chunk_size();
 363   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 364                        (size_t) SmallForLinearAlloc - 1));
 365   // XXX the following could potentially be pretty slow;
 366   // should one, pessimistically for the rare cases when res
 367   // calculated above is less than IndexSetSize,
 368   // just return res calculated above? My reasoning was that
 369   // those cases will be so rare that the extra time spent doesn't
 370   // really matter....
 371   // Note: do not change the loop test i >= res + IndexSetStride
 372   // to i > res below, because i is unsigned and res may be zero.
 373   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 374        i -= IndexSetStride) {
 375     if (_indexedFreeList[i].head() != NULL) {
 376       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 377       return i;
 378     }
 379   }
 380   return res;
 381 }
 382 
 383 void LinearAllocBlock::print_on(outputStream* st) const {
 384   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 385             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 386             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 387 }
 388 
 389 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 390   st->print_cr("COMPACTIBLE FREELIST SPACE");
 391   st->print_cr(" Space:");
 392   Space::print_on(st);
 393 
 394   st->print_cr("promoInfo:");
 395   _promoInfo.print_on(st);
 396 
 397   st->print_cr("_smallLinearAllocBlock");
 398   _smallLinearAllocBlock.print_on(st);
 399 
 400   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 401 
 402   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 403 }
 404 
 405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 406 const {
 407   reportIndexedFreeListStatistics(st);
 408   st->print_cr("Layout of Indexed Freelists");
 409   st->print_cr("---------------------------");
 410   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 411   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 412     _indexedFreeList[i].print_on(st);
 413     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 414       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 415                    p2i(fc), p2i((HeapWord*)fc + i),
 416                    fc->cantCoalesce() ? "\t CC" : "");
 417     }
 418   }
 419 }
 420 
 421 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 422 const {
 423   _promoInfo.print_on(st);
 424 }
 425 
 426 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 427 const {
 428   _dictionary->report_statistics(st);
 429   st->print_cr("Layout of Freelists in Tree");
 430   st->print_cr("---------------------------");
 431   _dictionary->print_free_lists(st);
 432 }
 433 
 434 class BlkPrintingClosure: public BlkClosure {
 435   const CMSCollector*             _collector;
 436   const CompactibleFreeListSpace* _sp;
 437   const CMSBitMap*                _live_bit_map;
 438   const bool                      _post_remark;
 439   outputStream*                   _st;
 440 public:
 441   BlkPrintingClosure(const CMSCollector* collector,
 442                      const CompactibleFreeListSpace* sp,
 443                      const CMSBitMap* live_bit_map,
 444                      outputStream* st):
 445     _collector(collector),
 446     _sp(sp),
 447     _live_bit_map(live_bit_map),
 448     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 449     _st(st) { }
 450   size_t do_blk(HeapWord* addr);
 451 };
 452 
 453 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 454   size_t sz = _sp->block_size_no_stall(addr, _collector);
 455   assert(sz != 0, "Should always be able to compute a size");
 456   if (_sp->block_is_obj(addr)) {
 457     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 458     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 459       p2i(addr),
 460       dead ? "dead" : "live",
 461       sz,
 462       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 463     if (CMSPrintObjectsInDump && !dead) {
 464       oop(addr)->print_on(_st);
 465       _st->print_cr("--------------------------------------");
 466     }
 467   } else { // free block
 468     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 469       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 470     if (CMSPrintChunksInDump) {
 471       ((FreeChunk*)addr)->print_on(_st);
 472       _st->print_cr("--------------------------------------");
 473     }
 474   }
 475   return sz;
 476 }
 477 
 478 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 479   st->print_cr("=========================");
 480   st->print_cr("Block layout in CMS Heap:");
 481   st->print_cr("=========================");
 482   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 483   blk_iterate(&bpcl);
 484 
 485   st->print_cr("=======================================");
 486   st->print_cr("Order & Layout of Promotion Info Blocks");
 487   st->print_cr("=======================================");
 488   print_promo_info_blocks(st);
 489 
 490   st->print_cr("===========================");
 491   st->print_cr("Order of Indexed Free Lists");
 492   st->print_cr("=========================");
 493   print_indexed_free_lists(st);
 494 
 495   st->print_cr("=================================");
 496   st->print_cr("Order of Free Lists in Dictionary");
 497   st->print_cr("=================================");
 498   print_dictionary_free_lists(st);
 499 }
 500 
 501 
 502 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 503   assert_lock_strong(&_freelistLock);
 504   Log(gc, freelist, stats) log;
 505   if (!log.is_debug()) {
 506     return;
 507   }
 508   log.debug("%s", title);
 509 
 510   LogStream out(log.debug());
 511   _dictionary->report_statistics(&out);
 512 
 513   if (log.is_trace()) {
 514     ResourceMark rm;
 515     reportIndexedFreeListStatistics(log.trace_stream());
 516     size_t total_size = totalSizeInIndexedFreeLists() +
 517                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 518     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 519   }
 520 }
 521 
 522 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 523   assert_lock_strong(&_freelistLock);
 524   st->print_cr("Statistics for IndexedFreeLists:");
 525   st->print_cr("--------------------------------");
 526   size_t total_size = totalSizeInIndexedFreeLists();
 527   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 528   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 529   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 530   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 531   if (free_blocks != 0) {
 532     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 533   }
 534 }
 535 
 536 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 537   size_t res = 0;
 538   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 539     debug_only(
 540       ssize_t recount = 0;
 541       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 542          fc = fc->next()) {
 543         recount += 1;
 544       }
 545       assert(recount == _indexedFreeList[i].count(),
 546         "Incorrect count in list");
 547     )
 548     res += _indexedFreeList[i].count();
 549   }
 550   return res;
 551 }
 552 
 553 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 554   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 555     if (_indexedFreeList[i].head() != NULL) {
 556       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 557       return (size_t)i;
 558     }
 559   }
 560   return 0;
 561 }
 562 
 563 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 564   HeapWord* prevEnd = end();
 565   assert(prevEnd != value, "unnecessary set_end call");
 566   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 567         "New end is below unallocated block");
 568   _end = value;
 569   if (prevEnd != NULL) {
 570     // Resize the underlying block offset table.
 571     _bt.resize(pointer_delta(value, bottom()));
 572     if (value <= prevEnd) {
 573       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 574              "New end is below unallocated block");
 575     } else {
 576       // Now, take this new chunk and add it to the free blocks.
 577       // Note that the BOT has not yet been updated for this block.
 578       size_t newFcSize = pointer_delta(value, prevEnd);
 579       // Add the block to the free lists, if possible coalescing it
 580       // with the last free block, and update the BOT and census data.
 581       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 582     }
 583   }
 584 }
 585 
 586 class FreeListSpaceDCTOC : public FilteringDCTOC {
 587   CompactibleFreeListSpace* _cfls;
 588   CMSCollector* _collector;
 589   bool _parallel;
 590 protected:
 591   // Override.
 592 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 593   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 594                                        HeapWord* bottom, HeapWord* top, \
 595                                        ClosureType* cl);                \
 596       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 597                                        HeapWord* bottom, HeapWord* top, \
 598                                        ClosureType* cl);                \
 599     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 600                                        HeapWord* bottom, HeapWord* top, \
 601                                        ClosureType* cl)
 602   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 603   walk_mem_region_with_cl_DECL(FilteringClosure);
 604 
 605 public:
 606   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 607                      CMSCollector* collector,
 608                      ExtendedOopClosure* cl,
 609                      CardTableModRefBS::PrecisionStyle precision,
 610                      HeapWord* boundary,
 611                      bool parallel) :
 612     FilteringDCTOC(sp, cl, precision, boundary),
 613     _cfls(sp), _collector(collector), _parallel(parallel) {}
 614 };
 615 
 616 // We de-virtualize the block-related calls below, since we know that our
 617 // space is a CompactibleFreeListSpace.
 618 
 619 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 620 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 621                                                  HeapWord* bottom,              \
 622                                                  HeapWord* top,                 \
 623                                                  ClosureType* cl) {             \
 624    if (_parallel) {                                                             \
 625      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 626    } else {                                                                     \
 627      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 628    }                                                                            \
 629 }                                                                               \
 630 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 631                                                      HeapWord* bottom,          \
 632                                                      HeapWord* top,             \
 633                                                      ClosureType* cl) {         \
 634   /* Skip parts that are before "mr", in case "block_start" sent us             \
 635      back too far. */                                                           \
 636   HeapWord* mr_start = mr.start();                                              \
 637   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 638   HeapWord* next = bottom + bot_size;                                           \
 639   while (next < mr_start) {                                                     \
 640     bottom = next;                                                              \
 641     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 642     next = bottom + bot_size;                                                   \
 643   }                                                                             \
 644                                                                                 \
 645   while (bottom < top) {                                                        \
 646     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 647         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 648                     oop(bottom)) &&                                             \
 649         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 650       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 651       bottom += _cfls->adjustObjectSize(word_sz);                               \
 652     } else {                                                                    \
 653       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 654     }                                                                           \
 655   }                                                                             \
 656 }                                                                               \
 657 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 658                                                        HeapWord* bottom,        \
 659                                                        HeapWord* top,           \
 660                                                        ClosureType* cl) {       \
 661   /* Skip parts that are before "mr", in case "block_start" sent us             \
 662      back too far. */                                                           \
 663   HeapWord* mr_start = mr.start();                                              \
 664   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 665   HeapWord* next = bottom + bot_size;                                           \
 666   while (next < mr_start) {                                                     \
 667     bottom = next;                                                              \
 668     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 669     next = bottom + bot_size;                                                   \
 670   }                                                                             \
 671                                                                                 \
 672   while (bottom < top) {                                                        \
 673     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 674         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 675                     oop(bottom)) &&                                             \
 676         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 677       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 678       bottom += _cfls->adjustObjectSize(word_sz);                               \
 679     } else {                                                                    \
 680       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 681     }                                                                           \
 682   }                                                                             \
 683 }
 684 
 685 // (There are only two of these, rather than N, because the split is due
 686 // only to the introduction of the FilteringClosure, a local part of the
 687 // impl of this abstraction.)
 688 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 689 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 690 
 691 DirtyCardToOopClosure*
 692 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 693                                       CardTableModRefBS::PrecisionStyle precision,
 694                                       HeapWord* boundary,
 695                                       bool parallel) {
 696   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 697 }
 698 
 699 
 700 // Note on locking for the space iteration functions:
 701 // since the collector's iteration activities are concurrent with
 702 // allocation activities by mutators, absent a suitable mutual exclusion
 703 // mechanism the iterators may go awry. For instance a block being iterated
 704 // may suddenly be allocated or divided up and part of it allocated and
 705 // so on.
 706 
 707 // Apply the given closure to each block in the space.
 708 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 709   assert_lock_strong(freelistLock());
 710   HeapWord *cur, *limit;
 711   for (cur = bottom(), limit = end(); cur < limit;
 712        cur += cl->do_blk_careful(cur));
 713 }
 714 
 715 // Apply the given closure to each block in the space.
 716 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 717   assert_lock_strong(freelistLock());
 718   HeapWord *cur, *limit;
 719   for (cur = bottom(), limit = end(); cur < limit;
 720        cur += cl->do_blk(cur));
 721 }
 722 
 723 // Apply the given closure to each oop in the space.
 724 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 725   assert_lock_strong(freelistLock());
 726   HeapWord *cur, *limit;
 727   size_t curSize;
 728   for (cur = bottom(), limit = end(); cur < limit;
 729        cur += curSize) {
 730     curSize = block_size(cur);
 731     if (block_is_obj(cur)) {
 732       oop(cur)->oop_iterate(cl);
 733     }
 734   }
 735 }
 736 
 737 // NOTE: In the following methods, in order to safely be able to
 738 // apply the closure to an object, we need to be sure that the
 739 // object has been initialized. We are guaranteed that an object
 740 // is initialized if we are holding the Heap_lock with the
 741 // world stopped.
 742 void CompactibleFreeListSpace::verify_objects_initialized() const {
 743   if (is_init_completed()) {
 744     assert_locked_or_safepoint(Heap_lock);
 745     if (Universe::is_fully_initialized()) {
 746       guarantee(SafepointSynchronize::is_at_safepoint(),
 747                 "Required for objects to be initialized");
 748     }
 749   } // else make a concession at vm start-up
 750 }
 751 
 752 // Apply the given closure to each object in the space
 753 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 754   assert_lock_strong(freelistLock());
 755   NOT_PRODUCT(verify_objects_initialized());
 756   HeapWord *cur, *limit;
 757   size_t curSize;
 758   for (cur = bottom(), limit = end(); cur < limit;
 759        cur += curSize) {
 760     curSize = block_size(cur);
 761     if (block_is_obj(cur)) {
 762       blk->do_object(oop(cur));
 763     }
 764   }
 765 }
 766 
 767 // Apply the given closure to each live object in the space
 768 //   The usage of CompactibleFreeListSpace
 769 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 770 // objects in the space with references to objects that are no longer
 771 // valid.  For example, an object may reference another object
 772 // that has already been sweep up (collected).  This method uses
 773 // obj_is_alive() to determine whether it is safe to apply the closure to
 774 // an object.  See obj_is_alive() for details on how liveness of an
 775 // object is decided.
 776 
 777 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 778   assert_lock_strong(freelistLock());
 779   NOT_PRODUCT(verify_objects_initialized());
 780   HeapWord *cur, *limit;
 781   size_t curSize;
 782   for (cur = bottom(), limit = end(); cur < limit;
 783        cur += curSize) {
 784     curSize = block_size(cur);
 785     if (block_is_obj(cur) && obj_is_alive(cur)) {
 786       blk->do_object(oop(cur));
 787     }
 788   }
 789 }
 790 
 791 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 792                                                   UpwardsObjectClosure* cl) {
 793   assert_locked(freelistLock());
 794   NOT_PRODUCT(verify_objects_initialized());
 795   assert(!mr.is_empty(), "Should be non-empty");
 796   // We use MemRegion(bottom(), end()) rather than used_region() below
 797   // because the two are not necessarily equal for some kinds of
 798   // spaces, in particular, certain kinds of free list spaces.
 799   // We could use the more complicated but more precise:
 800   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 801   // but the slight imprecision seems acceptable in the assertion check.
 802   assert(MemRegion(bottom(), end()).contains(mr),
 803          "Should be within used space");
 804   HeapWord* prev = cl->previous();   // max address from last time
 805   if (prev >= mr.end()) { // nothing to do
 806     return;
 807   }
 808   // This assert will not work when we go from cms space to perm
 809   // space, and use same closure. Easy fix deferred for later. XXX YSR
 810   // assert(prev == NULL || contains(prev), "Should be within space");
 811 
 812   bool last_was_obj_array = false;
 813   HeapWord *blk_start_addr, *region_start_addr;
 814   if (prev > mr.start()) {
 815     region_start_addr = prev;
 816     blk_start_addr    = prev;
 817     // The previous invocation may have pushed "prev" beyond the
 818     // last allocated block yet there may be still be blocks
 819     // in this region due to a particular coalescing policy.
 820     // Relax the assertion so that the case where the unallocated
 821     // block is maintained and "prev" is beyond the unallocated
 822     // block does not cause the assertion to fire.
 823     assert((BlockOffsetArrayUseUnallocatedBlock &&
 824             (!is_in(prev))) ||
 825            (blk_start_addr == block_start(region_start_addr)), "invariant");
 826   } else {
 827     region_start_addr = mr.start();
 828     blk_start_addr    = block_start(region_start_addr);
 829   }
 830   HeapWord* region_end_addr = mr.end();
 831   MemRegion derived_mr(region_start_addr, region_end_addr);
 832   while (blk_start_addr < region_end_addr) {
 833     const size_t size = block_size(blk_start_addr);
 834     if (block_is_obj(blk_start_addr)) {
 835       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 836     } else {
 837       last_was_obj_array = false;
 838     }
 839     blk_start_addr += size;
 840   }
 841   if (!last_was_obj_array) {
 842     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 843            "Should be within (closed) used space");
 844     assert(blk_start_addr > prev, "Invariant");
 845     cl->set_previous(blk_start_addr); // min address for next time
 846   }
 847 }
 848 
 849 // Callers of this iterator beware: The closure application should
 850 // be robust in the face of uninitialized objects and should (always)
 851 // return a correct size so that the next addr + size below gives us a
 852 // valid block boundary. [See for instance,
 853 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 854 // in ConcurrentMarkSweepGeneration.cpp.]
 855 HeapWord*
 856 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 857   ObjectClosureCareful* cl) {
 858   assert_lock_strong(freelistLock());
 859   // Can't use used_region() below because it may not necessarily
 860   // be the same as [bottom(),end()); although we could
 861   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 862   // that appears too cumbersome, so we just do the simpler check
 863   // in the assertion below.
 864   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 865          "mr should be non-empty and within used space");
 866   HeapWord *addr, *end;
 867   size_t size;
 868   for (addr = block_start_careful(mr.start()), end  = mr.end();
 869        addr < end; addr += size) {
 870     FreeChunk* fc = (FreeChunk*)addr;
 871     if (fc->is_free()) {
 872       // Since we hold the free list lock, which protects direct
 873       // allocation in this generation by mutators, a free object
 874       // will remain free throughout this iteration code.
 875       size = fc->size();
 876     } else {
 877       // Note that the object need not necessarily be initialized,
 878       // because (for instance) the free list lock does NOT protect
 879       // object initialization. The closure application below must
 880       // therefore be correct in the face of uninitialized objects.
 881       size = cl->do_object_careful_m(oop(addr), mr);
 882       if (size == 0) {
 883         // An unparsable object found. Signal early termination.
 884         return addr;
 885       }
 886     }
 887   }
 888   return NULL;
 889 }
 890 
 891 
 892 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 893   NOT_PRODUCT(verify_objects_initialized());
 894   return _bt.block_start(p);
 895 }
 896 
 897 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 898   return _bt.block_start_careful(p);
 899 }
 900 
 901 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 902   NOT_PRODUCT(verify_objects_initialized());
 903   // This must be volatile, or else there is a danger that the compiler
 904   // will compile the code below into a sometimes-infinite loop, by keeping
 905   // the value read the first time in a register.
 906   while (true) {
 907     // We must do this until we get a consistent view of the object.
 908     if (FreeChunk::indicatesFreeChunk(p)) {
 909       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 910       size_t res = fc->size();
 911 
 912       // Bugfix for systems with weak memory model (PPC64/IA64). The
 913       // block's free bit was set and we have read the size of the
 914       // block. Acquire and check the free bit again. If the block is
 915       // still free, the read size is correct.
 916       OrderAccess::acquire();
 917 
 918       // If the object is still a free chunk, return the size, else it
 919       // has been allocated so try again.
 920       if (FreeChunk::indicatesFreeChunk(p)) {
 921         assert(res != 0, "Block size should not be 0");
 922         return res;
 923       }
 924     } else {
 925       // must read from what 'p' points to in each loop.
 926       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 927       if (k != NULL) {
 928         assert(k->is_klass(), "Should really be klass oop.");
 929         oop o = (oop)p;
 930         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 931 
 932         // Bugfix for systems with weak memory model (PPC64/IA64).
 933         // The object o may be an array. Acquire to make sure that the array
 934         // size (third word) is consistent.
 935         OrderAccess::acquire();
 936 
 937         size_t res = o->size_given_klass(k);
 938         res = adjustObjectSize(res);
 939         assert(res != 0, "Block size should not be 0");
 940         return res;
 941       }
 942     }
 943   }
 944 }
 945 
 946 // TODO: Now that is_parsable is gone, we should combine these two functions.
 947 // A variant of the above that uses the Printezis bits for
 948 // unparsable but allocated objects. This avoids any possible
 949 // stalls waiting for mutators to initialize objects, and is
 950 // thus potentially faster than the variant above. However,
 951 // this variant may return a zero size for a block that is
 952 // under mutation and for which a consistent size cannot be
 953 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 954 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 955                                                      const CMSCollector* c)
 956 const {
 957   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   DEBUG_ONLY(uint loops = 0;)
 962   while (true) {
 963     // We must do this until we get a consistent view of the object.
 964     if (FreeChunk::indicatesFreeChunk(p)) {
 965       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 966       size_t res = fc->size();
 967 
 968       // Bugfix for systems with weak memory model (PPC64/IA64). The
 969       // free bit of the block was set and we have read the size of
 970       // the block. Acquire and check the free bit again. If the
 971       // block is still free, the read size is correct.
 972       OrderAccess::acquire();
 973 
 974       if (FreeChunk::indicatesFreeChunk(p)) {
 975         assert(res != 0, "Block size should not be 0");
 976         assert(loops == 0, "Should be 0");
 977         return res;
 978       }
 979     } else {
 980       // must read from what 'p' points to in each loop.
 981       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 982       // We trust the size of any object that has a non-NULL
 983       // klass and (for those in the perm gen) is parsable
 984       // -- irrespective of its conc_safe-ty.
 985       if (k != NULL) {
 986         assert(k->is_klass(), "Should really be klass oop.");
 987         oop o = (oop)p;
 988         assert(o->is_oop(), "Should be an oop");
 989 
 990         // Bugfix for systems with weak memory model (PPC64/IA64).
 991         // The object o may be an array. Acquire to make sure that the array
 992         // size (third word) is consistent.
 993         OrderAccess::acquire();
 994 
 995         size_t res = o->size_given_klass(k);
 996         res = adjustObjectSize(res);
 997         assert(res != 0, "Block size should not be 0");
 998         return res;
 999       } else {
1000         // May return 0 if P-bits not present.
1001         return c->block_size_if_printezis_bits(p);
1002       }
1003     }
1004     assert(loops == 0, "Can loop at most once");
1005     DEBUG_ONLY(loops++;)
1006   }
1007 }
1008 
1009 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1010   NOT_PRODUCT(verify_objects_initialized());
1011   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1012   FreeChunk* fc = (FreeChunk*)p;
1013   if (fc->is_free()) {
1014     return fc->size();
1015   } else {
1016     // Ignore mark word because this may be a recently promoted
1017     // object whose mark word is used to chain together grey
1018     // objects (the last one would have a null value).
1019     assert(oop(p)->is_oop(true), "Should be an oop");
1020     return adjustObjectSize(oop(p)->size());
1021   }
1022 }
1023 
1024 // This implementation assumes that the property of "being an object" is
1025 // stable.  But being a free chunk may not be (because of parallel
1026 // promotion.)
1027 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1028   FreeChunk* fc = (FreeChunk*)p;
1029   assert(is_in_reserved(p), "Should be in space");
1030   if (FreeChunk::indicatesFreeChunk(p)) return false;
1031   Klass* k = oop(p)->klass_or_null();
1032   if (k != NULL) {
1033     // Ignore mark word because it may have been used to
1034     // chain together promoted objects (the last one
1035     // would have a null value).
1036     assert(oop(p)->is_oop(true), "Should be an oop");
1037     return true;
1038   } else {
1039     return false;  // Was not an object at the start of collection.
1040   }
1041 }
1042 
1043 // Check if the object is alive. This fact is checked either by consulting
1044 // the main marking bitmap in the sweeping phase or, if it's a permanent
1045 // generation and we're not in the sweeping phase, by checking the
1046 // perm_gen_verify_bit_map where we store the "deadness" information if
1047 // we did not sweep the perm gen in the most recent previous GC cycle.
1048 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1049   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1050          "Else races are possible");
1051   assert(block_is_obj(p), "The address should point to an object");
1052 
1053   // If we're sweeping, we use object liveness information from the main bit map
1054   // for both perm gen and old gen.
1055   // We don't need to lock the bitmap (live_map or dead_map below), because
1056   // EITHER we are in the middle of the sweeping phase, and the
1057   // main marking bit map (live_map below) is locked,
1058   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1059   // is stable, because it's mutated only in the sweeping phase.
1060   // NOTE: This method is also used by jmap where, if class unloading is
1061   // off, the results can return "false" for legitimate perm objects,
1062   // when we are not in the midst of a sweeping phase, which can result
1063   // in jmap not reporting certain perm gen objects. This will be moot
1064   // if/when the perm gen goes away in the future.
1065   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1066     CMSBitMap* live_map = _collector->markBitMap();
1067     return live_map->par_isMarked((HeapWord*) p);
1068   }
1069   return true;
1070 }
1071 
1072 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1073   FreeChunk* fc = (FreeChunk*)p;
1074   assert(is_in_reserved(p), "Should be in space");
1075   assert(_bt.block_start(p) == p, "Should be a block boundary");
1076   if (!fc->is_free()) {
1077     // Ignore mark word because it may have been used to
1078     // chain together promoted objects (the last one
1079     // would have a null value).
1080     assert(oop(p)->is_oop(true), "Should be an oop");
1081     return true;
1082   }
1083   return false;
1084 }
1085 
1086 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1087 // approximate answer if you don't hold the freelistlock when you call this.
1088 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1089   size_t size = 0;
1090   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1091     debug_only(
1092       // We may be calling here without the lock in which case we
1093       // won't do this modest sanity check.
1094       if (freelistLock()->owned_by_self()) {
1095         size_t total_list_size = 0;
1096         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1097           fc = fc->next()) {
1098           total_list_size += i;
1099         }
1100         assert(total_list_size == i * _indexedFreeList[i].count(),
1101                "Count in list is incorrect");
1102       }
1103     )
1104     size += i * _indexedFreeList[i].count();
1105   }
1106   return size;
1107 }
1108 
1109 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1110   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1111   return allocate(size);
1112 }
1113 
1114 HeapWord*
1115 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1116   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1117 }
1118 
1119 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1120   assert_lock_strong(freelistLock());
1121   HeapWord* res = NULL;
1122   assert(size == adjustObjectSize(size),
1123          "use adjustObjectSize() before calling into allocate()");
1124 
1125   res = allocate_adaptive_freelists(size);
1126 
1127   if (res != NULL) {
1128     // check that res does lie in this space!
1129     assert(is_in_reserved(res), "Not in this space!");
1130     assert(is_aligned((void*)res), "alignment check");
1131 
1132     FreeChunk* fc = (FreeChunk*)res;
1133     fc->markNotFree();
1134     assert(!fc->is_free(), "shouldn't be marked free");
1135     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1136     // Verify that the block offset table shows this to
1137     // be a single block, but not one which is unallocated.
1138     _bt.verify_single_block(res, size);
1139     _bt.verify_not_unallocated(res, size);
1140     // mangle a just allocated object with a distinct pattern.
1141     debug_only(fc->mangleAllocated(size));
1142   }
1143 
1144   return res;
1145 }
1146 
1147 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1148   assert_lock_strong(freelistLock());
1149   HeapWord* res = NULL;
1150   assert(size == adjustObjectSize(size),
1151          "use adjustObjectSize() before calling into allocate()");
1152 
1153   // Strategy
1154   //   if small
1155   //     exact size from small object indexed list if small
1156   //     small or large linear allocation block (linAB) as appropriate
1157   //     take from lists of greater sized chunks
1158   //   else
1159   //     dictionary
1160   //     small or large linear allocation block if it has the space
1161   // Try allocating exact size from indexTable first
1162   if (size < IndexSetSize) {
1163     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1164     if(res != NULL) {
1165       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1166         "Not removed from free list");
1167       // no block offset table adjustment is necessary on blocks in
1168       // the indexed lists.
1169 
1170     // Try allocating from the small LinAB
1171     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1172         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1173         // if successful, the above also adjusts block offset table
1174         // Note that this call will refill the LinAB to
1175         // satisfy the request.  This is different that
1176         // evm.
1177         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1178     } else {
1179       // Raid the exact free lists larger than size, even if they are not
1180       // overpopulated.
1181       res = (HeapWord*) getChunkFromGreater(size);
1182     }
1183   } else {
1184     // Big objects get allocated directly from the dictionary.
1185     res = (HeapWord*) getChunkFromDictionaryExact(size);
1186     if (res == NULL) {
1187       // Try hard not to fail since an allocation failure will likely
1188       // trigger a synchronous GC.  Try to get the space from the
1189       // allocation blocks.
1190       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1191     }
1192   }
1193 
1194   return res;
1195 }
1196 
1197 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1198 // when promoting obj.
1199 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1200   // Depending on the object size, expansion may require refilling either a
1201   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1202   // is added because the dictionary may over-allocate to avoid fragmentation.
1203   size_t space = obj_size;
1204   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1205   return space;
1206 }
1207 
1208 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1209   FreeChunk* ret;
1210 
1211   assert(numWords >= MinChunkSize, "Size is less than minimum");
1212   assert(linearAllocationWouldFail() || bestFitFirst(),
1213     "Should not be here");
1214 
1215   size_t i;
1216   size_t currSize = numWords + MinChunkSize;
1217   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1218   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1219     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1220     if (fl->head()) {
1221       ret = getFromListGreater(fl, numWords);
1222       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1223       return ret;
1224     }
1225   }
1226 
1227   currSize = MAX2((size_t)SmallForDictionary,
1228                   (size_t)(numWords + MinChunkSize));
1229 
1230   /* Try to get a chunk that satisfies request, while avoiding
1231      fragmentation that can't be handled. */
1232   {
1233     ret =  dictionary()->get_chunk(currSize);
1234     if (ret != NULL) {
1235       assert(ret->size() - numWords >= MinChunkSize,
1236              "Chunk is too small");
1237       _bt.allocated((HeapWord*)ret, ret->size());
1238       /* Carve returned chunk. */
1239       (void) splitChunkAndReturnRemainder(ret, numWords);
1240       /* Label this as no longer a free chunk. */
1241       assert(ret->is_free(), "This chunk should be free");
1242       ret->link_prev(NULL);
1243     }
1244     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1245     return ret;
1246   }
1247   ShouldNotReachHere();
1248 }
1249 
1250 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1251   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1252   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1253 }
1254 
1255 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1256   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1257          (_smallLinearAllocBlock._word_size == fc->size()),
1258          "Linear allocation block shows incorrect size");
1259   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1260           (_smallLinearAllocBlock._word_size == fc->size()));
1261 }
1262 
1263 // Check if the purported free chunk is present either as a linear
1264 // allocation block, the size-indexed table of (smaller) free blocks,
1265 // or the larger free blocks kept in the binary tree dictionary.
1266 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1267   if (verify_chunk_is_linear_alloc_block(fc)) {
1268     return true;
1269   } else if (fc->size() < IndexSetSize) {
1270     return verifyChunkInIndexedFreeLists(fc);
1271   } else {
1272     return dictionary()->verify_chunk_in_free_list(fc);
1273   }
1274 }
1275 
1276 #ifndef PRODUCT
1277 void CompactibleFreeListSpace::assert_locked() const {
1278   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1279 }
1280 
1281 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1282   CMSLockVerifier::assert_locked(lock);
1283 }
1284 #endif
1285 
1286 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1287   // In the parallel case, the main thread holds the free list lock
1288   // on behalf the parallel threads.
1289   FreeChunk* fc;
1290   {
1291     // If GC is parallel, this might be called by several threads.
1292     // This should be rare enough that the locking overhead won't affect
1293     // the sequential code.
1294     MutexLockerEx x(parDictionaryAllocLock(),
1295                     Mutex::_no_safepoint_check_flag);
1296     fc = getChunkFromDictionary(size);
1297   }
1298   if (fc != NULL) {
1299     fc->dontCoalesce();
1300     assert(fc->is_free(), "Should be free, but not coalescable");
1301     // Verify that the block offset table shows this to
1302     // be a single block, but not one which is unallocated.
1303     _bt.verify_single_block((HeapWord*)fc, fc->size());
1304     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1305   }
1306   return fc;
1307 }
1308 
1309 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1310   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1311   assert_locked();
1312 
1313   // if we are tracking promotions, then first ensure space for
1314   // promotion (including spooling space for saving header if necessary).
1315   // then allocate and copy, then track promoted info if needed.
1316   // When tracking (see PromotionInfo::track()), the mark word may
1317   // be displaced and in this case restoration of the mark word
1318   // occurs in the (oop_since_save_marks_)iterate phase.
1319   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1320     return NULL;
1321   }
1322   // Call the allocate(size_t, bool) form directly to avoid the
1323   // additional call through the allocate(size_t) form.  Having
1324   // the compile inline the call is problematic because allocate(size_t)
1325   // is a virtual method.
1326   HeapWord* res = allocate(adjustObjectSize(obj_size));
1327   if (res != NULL) {
1328     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1329     // if we should be tracking promotions, do so.
1330     if (_promoInfo.tracking()) {
1331         _promoInfo.track((PromotedObject*)res);
1332     }
1333   }
1334   return oop(res);
1335 }
1336 
1337 HeapWord*
1338 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1339   assert_locked();
1340   assert(size >= MinChunkSize, "minimum chunk size");
1341   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1342     "maximum from smallLinearAllocBlock");
1343   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1344 }
1345 
1346 HeapWord*
1347 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1348                                                        size_t size) {
1349   assert_locked();
1350   assert(size >= MinChunkSize, "too small");
1351   HeapWord* res = NULL;
1352   // Try to do linear allocation from blk, making sure that
1353   if (blk->_word_size == 0) {
1354     // We have probably been unable to fill this either in the prologue or
1355     // when it was exhausted at the last linear allocation. Bail out until
1356     // next time.
1357     assert(blk->_ptr == NULL, "consistency check");
1358     return NULL;
1359   }
1360   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1361   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1362   if (res != NULL) return res;
1363 
1364   // about to exhaust this linear allocation block
1365   if (blk->_word_size == size) { // exactly satisfied
1366     res = blk->_ptr;
1367     _bt.allocated(res, blk->_word_size);
1368   } else if (size + MinChunkSize <= blk->_refillSize) {
1369     size_t sz = blk->_word_size;
1370     // Update _unallocated_block if the size is such that chunk would be
1371     // returned to the indexed free list.  All other chunks in the indexed
1372     // free lists are allocated from the dictionary so that _unallocated_block
1373     // has already been adjusted for them.  Do it here so that the cost
1374     // for all chunks added back to the indexed free lists.
1375     if (sz < SmallForDictionary) {
1376       _bt.allocated(blk->_ptr, sz);
1377     }
1378     // Return the chunk that isn't big enough, and then refill below.
1379     addChunkToFreeLists(blk->_ptr, sz);
1380     split_birth(sz);
1381     // Don't keep statistics on adding back chunk from a LinAB.
1382   } else {
1383     // A refilled block would not satisfy the request.
1384     return NULL;
1385   }
1386 
1387   blk->_ptr = NULL; blk->_word_size = 0;
1388   refillLinearAllocBlock(blk);
1389   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1390          "block was replenished");
1391   if (res != NULL) {
1392     split_birth(size);
1393     repairLinearAllocBlock(blk);
1394   } else if (blk->_ptr != NULL) {
1395     res = blk->_ptr;
1396     size_t blk_size = blk->_word_size;
1397     blk->_word_size -= size;
1398     blk->_ptr  += size;
1399     split_birth(size);
1400     repairLinearAllocBlock(blk);
1401     // Update BOT last so that other (parallel) GC threads see a consistent
1402     // view of the BOT and free blocks.
1403     // Above must occur before BOT is updated below.
1404     OrderAccess::storestore();
1405     _bt.split_block(res, blk_size, size);  // adjust block offset table
1406   }
1407   return res;
1408 }
1409 
1410 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1411                                         LinearAllocBlock* blk,
1412                                         size_t size) {
1413   assert_locked();
1414   assert(size >= MinChunkSize, "too small");
1415 
1416   HeapWord* res = NULL;
1417   // This is the common case.  Keep it simple.
1418   if (blk->_word_size >= size + MinChunkSize) {
1419     assert(blk->_ptr != NULL, "consistency check");
1420     res = blk->_ptr;
1421     // Note that the BOT is up-to-date for the linAB before allocation.  It
1422     // indicates the start of the linAB.  The split_block() updates the
1423     // BOT for the linAB after the allocation (indicates the start of the
1424     // next chunk to be allocated).
1425     size_t blk_size = blk->_word_size;
1426     blk->_word_size -= size;
1427     blk->_ptr  += size;
1428     split_birth(size);
1429     repairLinearAllocBlock(blk);
1430     // Update BOT last so that other (parallel) GC threads see a consistent
1431     // view of the BOT and free blocks.
1432     // Above must occur before BOT is updated below.
1433     OrderAccess::storestore();
1434     _bt.split_block(res, blk_size, size);  // adjust block offset table
1435     _bt.allocated(res, size);
1436   }
1437   return res;
1438 }
1439 
1440 FreeChunk*
1441 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1442   assert_locked();
1443   assert(size < SmallForDictionary, "just checking");
1444   FreeChunk* res;
1445   res = _indexedFreeList[size].get_chunk_at_head();
1446   if (res == NULL) {
1447     res = getChunkFromIndexedFreeListHelper(size);
1448   }
1449   _bt.verify_not_unallocated((HeapWord*) res, size);
1450   assert(res == NULL || res->size() == size, "Incorrect block size");
1451   return res;
1452 }
1453 
1454 FreeChunk*
1455 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1456   bool replenish) {
1457   assert_locked();
1458   FreeChunk* fc = NULL;
1459   if (size < SmallForDictionary) {
1460     assert(_indexedFreeList[size].head() == NULL ||
1461       _indexedFreeList[size].surplus() <= 0,
1462       "List for this size should be empty or under populated");
1463     // Try best fit in exact lists before replenishing the list
1464     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1465       // Replenish list.
1466       //
1467       // Things tried that failed.
1468       //   Tried allocating out of the two LinAB's first before
1469       // replenishing lists.
1470       //   Tried small linAB of size 256 (size in indexed list)
1471       // and replenishing indexed lists from the small linAB.
1472       //
1473       FreeChunk* newFc = NULL;
1474       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1475       if (replenish_size < SmallForDictionary) {
1476         // Do not replenish from an underpopulated size.
1477         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1478             _indexedFreeList[replenish_size].head() != NULL) {
1479           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1480         } else if (bestFitFirst()) {
1481           newFc = bestFitSmall(replenish_size);
1482         }
1483       }
1484       if (newFc == NULL && replenish_size > size) {
1485         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1486         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1487       }
1488       // Note: The stats update re split-death of block obtained above
1489       // will be recorded below precisely when we know we are going to
1490       // be actually splitting it into more than one pieces below.
1491       if (newFc != NULL) {
1492         if  (replenish || CMSReplenishIntermediate) {
1493           // Replenish this list and return one block to caller.
1494           size_t i;
1495           FreeChunk *curFc, *nextFc;
1496           size_t num_blk = newFc->size() / size;
1497           assert(num_blk >= 1, "Smaller than requested?");
1498           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1499           if (num_blk > 1) {
1500             // we are sure we will be splitting the block just obtained
1501             // into multiple pieces; record the split-death of the original
1502             splitDeath(replenish_size);
1503           }
1504           // carve up and link blocks 0, ..., num_blk - 2
1505           // The last chunk is not added to the lists but is returned as the
1506           // free chunk.
1507           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1508                i = 0;
1509                i < (num_blk - 1);
1510                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1511                i++) {
1512             curFc->set_size(size);
1513             // Don't record this as a return in order to try and
1514             // determine the "returns" from a GC.
1515             _bt.verify_not_unallocated((HeapWord*) fc, size);
1516             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1517             _bt.mark_block((HeapWord*)curFc, size);
1518             split_birth(size);
1519             // Don't record the initial population of the indexed list
1520             // as a split birth.
1521           }
1522 
1523           // check that the arithmetic was OK above
1524           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1525             "inconsistency in carving newFc");
1526           curFc->set_size(size);
1527           _bt.mark_block((HeapWord*)curFc, size);
1528           split_birth(size);
1529           fc = curFc;
1530         } else {
1531           // Return entire block to caller
1532           fc = newFc;
1533         }
1534       }
1535     }
1536   } else {
1537     // Get a free chunk from the free chunk dictionary to be returned to
1538     // replenish the indexed free list.
1539     fc = getChunkFromDictionaryExact(size);
1540   }
1541   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1542   return fc;
1543 }
1544 
1545 FreeChunk*
1546 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1547   assert_locked();
1548   FreeChunk* fc = _dictionary->get_chunk(size,
1549                                          FreeBlockDictionary<FreeChunk>::atLeast);
1550   if (fc == NULL) {
1551     return NULL;
1552   }
1553   _bt.allocated((HeapWord*)fc, fc->size());
1554   if (fc->size() >= size + MinChunkSize) {
1555     fc = splitChunkAndReturnRemainder(fc, size);
1556   }
1557   assert(fc->size() >= size, "chunk too small");
1558   assert(fc->size() < size + MinChunkSize, "chunk too big");
1559   _bt.verify_single_block((HeapWord*)fc, fc->size());
1560   return fc;
1561 }
1562 
1563 FreeChunk*
1564 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1565   assert_locked();
1566   FreeChunk* fc = _dictionary->get_chunk(size,
1567                                          FreeBlockDictionary<FreeChunk>::atLeast);
1568   if (fc == NULL) {
1569     return fc;
1570   }
1571   _bt.allocated((HeapWord*)fc, fc->size());
1572   if (fc->size() == size) {
1573     _bt.verify_single_block((HeapWord*)fc, size);
1574     return fc;
1575   }
1576   assert(fc->size() > size, "get_chunk() guarantee");
1577   if (fc->size() < size + MinChunkSize) {
1578     // Return the chunk to the dictionary and go get a bigger one.
1579     returnChunkToDictionary(fc);
1580     fc = _dictionary->get_chunk(size + MinChunkSize,
1581                                 FreeBlockDictionary<FreeChunk>::atLeast);
1582     if (fc == NULL) {
1583       return NULL;
1584     }
1585     _bt.allocated((HeapWord*)fc, fc->size());
1586   }
1587   assert(fc->size() >= size + MinChunkSize, "tautology");
1588   fc = splitChunkAndReturnRemainder(fc, size);
1589   assert(fc->size() == size, "chunk is wrong size");
1590   _bt.verify_single_block((HeapWord*)fc, size);
1591   return fc;
1592 }
1593 
1594 void
1595 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1596   assert_locked();
1597 
1598   size_t size = chunk->size();
1599   _bt.verify_single_block((HeapWord*)chunk, size);
1600   // adjust _unallocated_block downward, as necessary
1601   _bt.freed((HeapWord*)chunk, size);
1602   _dictionary->return_chunk(chunk);
1603 #ifndef PRODUCT
1604   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1605     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1606     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1607     tl->verify_stats();
1608   }
1609 #endif // PRODUCT
1610 }
1611 
1612 void
1613 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1614   assert_locked();
1615   size_t size = fc->size();
1616   _bt.verify_single_block((HeapWord*) fc, size);
1617   _bt.verify_not_unallocated((HeapWord*) fc, size);
1618   _indexedFreeList[size].return_chunk_at_tail(fc);
1619 #ifndef PRODUCT
1620   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1621      _indexedFreeList[size].verify_stats();
1622   }
1623 #endif // PRODUCT
1624 }
1625 
1626 // Add chunk to end of last block -- if it's the largest
1627 // block -- and update BOT and census data. We would
1628 // of course have preferred to coalesce it with the
1629 // last block, but it's currently less expensive to find the
1630 // largest block than it is to find the last.
1631 void
1632 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1633   HeapWord* chunk, size_t     size) {
1634   // check that the chunk does lie in this space!
1635   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1636   // One of the parallel gc task threads may be here
1637   // whilst others are allocating.
1638   Mutex* lock = &_parDictionaryAllocLock;
1639   FreeChunk* ec;
1640   {
1641     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1642     ec = dictionary()->find_largest_dict();  // get largest block
1643     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1644       // It's a coterminal block - we can coalesce.
1645       size_t old_size = ec->size();
1646       coalDeath(old_size);
1647       removeChunkFromDictionary(ec);
1648       size += old_size;
1649     } else {
1650       ec = (FreeChunk*)chunk;
1651     }
1652   }
1653   ec->set_size(size);
1654   debug_only(ec->mangleFreed(size));
1655   if (size < SmallForDictionary) {
1656     lock = _indexedFreeListParLocks[size];
1657   }
1658   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1659   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1660   // record the birth under the lock since the recording involves
1661   // manipulation of the list on which the chunk lives and
1662   // if the chunk is allocated and is the last on the list,
1663   // the list can go away.
1664   coalBirth(size);
1665 }
1666 
1667 void
1668 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1669                                               size_t     size) {
1670   // check that the chunk does lie in this space!
1671   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1672   assert_locked();
1673   _bt.verify_single_block(chunk, size);
1674 
1675   FreeChunk* fc = (FreeChunk*) chunk;
1676   fc->set_size(size);
1677   debug_only(fc->mangleFreed(size));
1678   if (size < SmallForDictionary) {
1679     returnChunkToFreeList(fc);
1680   } else {
1681     returnChunkToDictionary(fc);
1682   }
1683 }
1684 
1685 void
1686 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1687   size_t size, bool coalesced) {
1688   assert_locked();
1689   assert(chunk != NULL, "null chunk");
1690   if (coalesced) {
1691     // repair BOT
1692     _bt.single_block(chunk, size);
1693   }
1694   addChunkToFreeLists(chunk, size);
1695 }
1696 
1697 // We _must_ find the purported chunk on our free lists;
1698 // we assert if we don't.
1699 void
1700 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1701   size_t size = fc->size();
1702   assert_locked();
1703   debug_only(verifyFreeLists());
1704   if (size < SmallForDictionary) {
1705     removeChunkFromIndexedFreeList(fc);
1706   } else {
1707     removeChunkFromDictionary(fc);
1708   }
1709   _bt.verify_single_block((HeapWord*)fc, size);
1710   debug_only(verifyFreeLists());
1711 }
1712 
1713 void
1714 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1715   size_t size = fc->size();
1716   assert_locked();
1717   assert(fc != NULL, "null chunk");
1718   _bt.verify_single_block((HeapWord*)fc, size);
1719   _dictionary->remove_chunk(fc);
1720   // adjust _unallocated_block upward, as necessary
1721   _bt.allocated((HeapWord*)fc, size);
1722 }
1723 
1724 void
1725 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1726   assert_locked();
1727   size_t size = fc->size();
1728   _bt.verify_single_block((HeapWord*)fc, size);
1729   NOT_PRODUCT(
1730     if (FLSVerifyIndexTable) {
1731       verifyIndexedFreeList(size);
1732     }
1733   )
1734   _indexedFreeList[size].remove_chunk(fc);
1735   NOT_PRODUCT(
1736     if (FLSVerifyIndexTable) {
1737       verifyIndexedFreeList(size);
1738     }
1739   )
1740 }
1741 
1742 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1743   /* A hint is the next larger size that has a surplus.
1744      Start search at a size large enough to guarantee that
1745      the excess is >= MIN_CHUNK. */
1746   size_t start = align_object_size(numWords + MinChunkSize);
1747   if (start < IndexSetSize) {
1748     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1749     size_t    hint = _indexedFreeList[start].hint();
1750     while (hint < IndexSetSize) {
1751       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1752       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1753       if (fl->surplus() > 0 && fl->head() != NULL) {
1754         // Found a list with surplus, reset original hint
1755         // and split out a free chunk which is returned.
1756         _indexedFreeList[start].set_hint(hint);
1757         FreeChunk* res = getFromListGreater(fl, numWords);
1758         assert(res == NULL || res->is_free(),
1759           "Should be returning a free chunk");
1760         return res;
1761       }
1762       hint = fl->hint(); /* keep looking */
1763     }
1764     /* None found. */
1765     it[start].set_hint(IndexSetSize);
1766   }
1767   return NULL;
1768 }
1769 
1770 /* Requires fl->size >= numWords + MinChunkSize */
1771 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1772   size_t numWords) {
1773   FreeChunk *curr = fl->head();
1774   size_t oldNumWords = curr->size();
1775   assert(numWords >= MinChunkSize, "Word size is too small");
1776   assert(curr != NULL, "List is empty");
1777   assert(oldNumWords >= numWords + MinChunkSize,
1778         "Size of chunks in the list is too small");
1779 
1780   fl->remove_chunk(curr);
1781   // recorded indirectly by splitChunkAndReturnRemainder -
1782   // smallSplit(oldNumWords, numWords);
1783   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1784   // Does anything have to be done for the remainder in terms of
1785   // fixing the card table?
1786   assert(new_chunk == NULL || new_chunk->is_free(),
1787     "Should be returning a free chunk");
1788   return new_chunk;
1789 }
1790 
1791 FreeChunk*
1792 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1793   size_t new_size) {
1794   assert_locked();
1795   size_t size = chunk->size();
1796   assert(size > new_size, "Split from a smaller block?");
1797   assert(is_aligned(chunk), "alignment problem");
1798   assert(size == adjustObjectSize(size), "alignment problem");
1799   size_t rem_sz = size - new_size;
1800   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1801   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1802   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1803   assert(is_aligned(ffc), "alignment problem");
1804   ffc->set_size(rem_sz);
1805   ffc->link_next(NULL);
1806   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1807   // Above must occur before BOT is updated below.
1808   // adjust block offset table
1809   OrderAccess::storestore();
1810   assert(chunk->is_free() && ffc->is_free(), "Error");
1811   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1812   if (rem_sz < SmallForDictionary) {
1813     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1814     bool is_par = Thread::current()->is_GC_task_thread();
1815     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1816     returnChunkToFreeList(ffc);
1817     split(size, rem_sz);
1818     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1819   } else {
1820     returnChunkToDictionary(ffc);
1821     split(size, rem_sz);
1822   }
1823   chunk->set_size(new_size);
1824   return chunk;
1825 }
1826 
1827 void
1828 CompactibleFreeListSpace::sweep_completed() {
1829   // Now that space is probably plentiful, refill linear
1830   // allocation blocks as needed.
1831   refillLinearAllocBlocksIfNeeded();
1832 }
1833 
1834 void
1835 CompactibleFreeListSpace::gc_prologue() {
1836   assert_locked();
1837   reportFreeListStatistics("Before GC:");
1838   refillLinearAllocBlocksIfNeeded();
1839 }
1840 
1841 void
1842 CompactibleFreeListSpace::gc_epilogue() {
1843   assert_locked();
1844   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1845   _promoInfo.stopTrackingPromotions();
1846   repairLinearAllocationBlocks();
1847   reportFreeListStatistics("After GC:");
1848 }
1849 
1850 // Iteration support, mostly delegated from a CMS generation
1851 
1852 void CompactibleFreeListSpace::save_marks() {
1853   assert(Thread::current()->is_VM_thread(),
1854          "Global variable should only be set when single-threaded");
1855   // Mark the "end" of the used space at the time of this call;
1856   // note, however, that promoted objects from this point
1857   // on are tracked in the _promoInfo below.
1858   set_saved_mark_word(unallocated_block());
1859 #ifdef ASSERT
1860   // Check the sanity of save_marks() etc.
1861   MemRegion ur    = used_region();
1862   MemRegion urasm = used_region_at_save_marks();
1863   assert(ur.contains(urasm),
1864          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1865          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1866          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1867 #endif
1868   // inform allocator that promotions should be tracked.
1869   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1870   _promoInfo.startTrackingPromotions();
1871 }
1872 
1873 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1874   assert(_promoInfo.tracking(), "No preceding save_marks?");
1875   return _promoInfo.noPromotions();
1876 }
1877 
1878 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1879                                                                             \
1880 void CompactibleFreeListSpace::                                             \
1881 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1882   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1883   /*                                                                        \
1884    * This also restores any displaced headers and removes the elements from \
1885    * the iteration set as they are processed, so that we have a clean slate \
1886    * at the end of the iteration. Note, thus, that if new objects are       \
1887    * promoted as a result of the iteration they are iterated over as well.  \
1888    */                                                                       \
1889   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1890 }
1891 
1892 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1893 
1894 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1895   return _smallLinearAllocBlock._word_size == 0;
1896 }
1897 
1898 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1899   // Fix up linear allocation blocks to look like free blocks
1900   repairLinearAllocBlock(&_smallLinearAllocBlock);
1901 }
1902 
1903 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1904   assert_locked();
1905   if (blk->_ptr != NULL) {
1906     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1907            "Minimum block size requirement");
1908     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1909     fc->set_size(blk->_word_size);
1910     fc->link_prev(NULL);   // mark as free
1911     fc->dontCoalesce();
1912     assert(fc->is_free(), "just marked it free");
1913     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1914   }
1915 }
1916 
1917 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1918   assert_locked();
1919   if (_smallLinearAllocBlock._ptr == NULL) {
1920     assert(_smallLinearAllocBlock._word_size == 0,
1921       "Size of linAB should be zero if the ptr is NULL");
1922     // Reset the linAB refill and allocation size limit.
1923     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1924   }
1925   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1926 }
1927 
1928 void
1929 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1930   assert_locked();
1931   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1932          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1933          "blk invariant");
1934   if (blk->_ptr == NULL) {
1935     refillLinearAllocBlock(blk);
1936   }
1937 }
1938 
1939 void
1940 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1941   assert_locked();
1942   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1943          "linear allocation block should be empty");
1944   FreeChunk* fc;
1945   if (blk->_refillSize < SmallForDictionary &&
1946       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1947     // A linAB's strategy might be to use small sizes to reduce
1948     // fragmentation but still get the benefits of allocation from a
1949     // linAB.
1950   } else {
1951     fc = getChunkFromDictionary(blk->_refillSize);
1952   }
1953   if (fc != NULL) {
1954     blk->_ptr  = (HeapWord*)fc;
1955     blk->_word_size = fc->size();
1956     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1957   }
1958 }
1959 
1960 // Support for compaction
1961 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1962   scan_and_forward(this, cp);
1963   // Prepare_for_compaction() uses the space between live objects
1964   // so that later phase can skip dead space quickly.  So verification
1965   // of the free lists doesn't work after.
1966 }
1967 
1968 void CompactibleFreeListSpace::adjust_pointers() {
1969   // In other versions of adjust_pointers(), a bail out
1970   // based on the amount of live data in the generation
1971   // (i.e., if 0, bail out) may be used.
1972   // Cannot test used() == 0 here because the free lists have already
1973   // been mangled by the compaction.
1974 
1975   scan_and_adjust_pointers(this);
1976   // See note about verification in prepare_for_compaction().
1977 }
1978 
1979 void CompactibleFreeListSpace::compact() {
1980   scan_and_compact(this);
1981 }
1982 
1983 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1984 // where fbs is free block sizes
1985 double CompactibleFreeListSpace::flsFrag() const {
1986   size_t itabFree = totalSizeInIndexedFreeLists();
1987   double frag = 0.0;
1988   size_t i;
1989 
1990   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1991     double sz  = i;
1992     frag      += _indexedFreeList[i].count() * (sz * sz);
1993   }
1994 
1995   double totFree = itabFree +
1996                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1997   if (totFree > 0) {
1998     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1999             (totFree * totFree));
2000     frag = (double)1.0  - frag;
2001   } else {
2002     assert(frag == 0.0, "Follows from totFree == 0");
2003   }
2004   return frag;
2005 }
2006 
2007 void CompactibleFreeListSpace::beginSweepFLCensus(
2008   float inter_sweep_current,
2009   float inter_sweep_estimate,
2010   float intra_sweep_estimate) {
2011   assert_locked();
2012   size_t i;
2013   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2014     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2015     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2016     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2017     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2018     fl->set_before_sweep(fl->count());
2019     fl->set_bfr_surp(fl->surplus());
2020   }
2021   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2022                                     inter_sweep_current,
2023                                     inter_sweep_estimate,
2024                                     intra_sweep_estimate);
2025 }
2026 
2027 void CompactibleFreeListSpace::setFLSurplus() {
2028   assert_locked();
2029   size_t i;
2030   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2031     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2032     fl->set_surplus(fl->count() -
2033                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2034   }
2035 }
2036 
2037 void CompactibleFreeListSpace::setFLHints() {
2038   assert_locked();
2039   size_t i;
2040   size_t h = IndexSetSize;
2041   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2042     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2043     fl->set_hint(h);
2044     if (fl->surplus() > 0) {
2045       h = i;
2046     }
2047   }
2048 }
2049 
2050 void CompactibleFreeListSpace::clearFLCensus() {
2051   assert_locked();
2052   size_t i;
2053   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2054     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2055     fl->set_prev_sweep(fl->count());
2056     fl->set_coal_births(0);
2057     fl->set_coal_deaths(0);
2058     fl->set_split_births(0);
2059     fl->set_split_deaths(0);
2060   }
2061 }
2062 
2063 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2064   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2065   setFLSurplus();
2066   setFLHints();
2067   printFLCensus(sweep_count);
2068   clearFLCensus();
2069   assert_locked();
2070   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2071 }
2072 
2073 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2074   if (size < SmallForDictionary) {
2075     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2076     return (fl->coal_desired() < 0) ||
2077            ((int)fl->count() > fl->coal_desired());
2078   } else {
2079     return dictionary()->coal_dict_over_populated(size);
2080   }
2081 }
2082 
2083 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2084   assert(size < SmallForDictionary, "Size too large for indexed list");
2085   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2086   fl->increment_coal_births();
2087   fl->increment_surplus();
2088 }
2089 
2090 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2091   assert(size < SmallForDictionary, "Size too large for indexed list");
2092   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2093   fl->increment_coal_deaths();
2094   fl->decrement_surplus();
2095 }
2096 
2097 void CompactibleFreeListSpace::coalBirth(size_t size) {
2098   if (size  < SmallForDictionary) {
2099     smallCoalBirth(size);
2100   } else {
2101     dictionary()->dict_census_update(size,
2102                                    false /* split */,
2103                                    true /* birth */);
2104   }
2105 }
2106 
2107 void CompactibleFreeListSpace::coalDeath(size_t size) {
2108   if(size  < SmallForDictionary) {
2109     smallCoalDeath(size);
2110   } else {
2111     dictionary()->dict_census_update(size,
2112                                    false /* split */,
2113                                    false /* birth */);
2114   }
2115 }
2116 
2117 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2118   assert(size < SmallForDictionary, "Size too large for indexed list");
2119   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2120   fl->increment_split_births();
2121   fl->increment_surplus();
2122 }
2123 
2124 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2125   assert(size < SmallForDictionary, "Size too large for indexed list");
2126   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2127   fl->increment_split_deaths();
2128   fl->decrement_surplus();
2129 }
2130 
2131 void CompactibleFreeListSpace::split_birth(size_t size) {
2132   if (size  < SmallForDictionary) {
2133     smallSplitBirth(size);
2134   } else {
2135     dictionary()->dict_census_update(size,
2136                                    true /* split */,
2137                                    true /* birth */);
2138   }
2139 }
2140 
2141 void CompactibleFreeListSpace::splitDeath(size_t size) {
2142   if (size  < SmallForDictionary) {
2143     smallSplitDeath(size);
2144   } else {
2145     dictionary()->dict_census_update(size,
2146                                    true /* split */,
2147                                    false /* birth */);
2148   }
2149 }
2150 
2151 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2152   size_t to2 = from - to1;
2153   splitDeath(from);
2154   split_birth(to1);
2155   split_birth(to2);
2156 }
2157 
2158 void CompactibleFreeListSpace::print() const {
2159   print_on(tty);
2160 }
2161 
2162 void CompactibleFreeListSpace::prepare_for_verify() {
2163   assert_locked();
2164   repairLinearAllocationBlocks();
2165   // Verify that the SpoolBlocks look like free blocks of
2166   // appropriate sizes... To be done ...
2167 }
2168 
2169 class VerifyAllBlksClosure: public BlkClosure {
2170  private:
2171   const CompactibleFreeListSpace* _sp;
2172   const MemRegion                 _span;
2173   HeapWord*                       _last_addr;
2174   size_t                          _last_size;
2175   bool                            _last_was_obj;
2176   bool                            _last_was_live;
2177 
2178  public:
2179   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2180     MemRegion span) :  _sp(sp), _span(span),
2181                        _last_addr(NULL), _last_size(0),
2182                        _last_was_obj(false), _last_was_live(false) { }
2183 
2184   virtual size_t do_blk(HeapWord* addr) {
2185     size_t res;
2186     bool   was_obj  = false;
2187     bool   was_live = false;
2188     if (_sp->block_is_obj(addr)) {
2189       was_obj = true;
2190       oop p = oop(addr);
2191       guarantee(p->is_oop(), "Should be an oop");
2192       res = _sp->adjustObjectSize(p->size());
2193       if (_sp->obj_is_alive(addr)) {
2194         was_live = true;
2195         p->verify();
2196       }
2197     } else {
2198       FreeChunk* fc = (FreeChunk*)addr;
2199       res = fc->size();
2200       if (FLSVerifyLists && !fc->cantCoalesce()) {
2201         guarantee(_sp->verify_chunk_in_free_list(fc),
2202                   "Chunk should be on a free list");
2203       }
2204     }
2205     if (res == 0) {
2206       Log(gc, verify) log;
2207       log.error("Livelock: no rank reduction!");
2208       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2209                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2210         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2211         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2212       ResourceMark rm;
2213       _sp->print_on(log.error_stream());
2214       guarantee(false, "Verification failed.");
2215     }
2216     _last_addr = addr;
2217     _last_size = res;
2218     _last_was_obj  = was_obj;
2219     _last_was_live = was_live;
2220     return res;
2221   }
2222 };
2223 
2224 class VerifyAllOopsClosure: public OopClosure {
2225  private:
2226   const CMSCollector*             _collector;
2227   const CompactibleFreeListSpace* _sp;
2228   const MemRegion                 _span;
2229   const bool                      _past_remark;
2230   const CMSBitMap*                _bit_map;
2231 
2232  protected:
2233   void do_oop(void* p, oop obj) {
2234     if (_span.contains(obj)) { // the interior oop points into CMS heap
2235       if (!_span.contains(p)) { // reference from outside CMS heap
2236         // Should be a valid object; the first disjunct below allows
2237         // us to sidestep an assertion in block_is_obj() that insists
2238         // that p be in _sp. Note that several generations (and spaces)
2239         // are spanned by _span (CMS heap) above.
2240         guarantee(!_sp->is_in_reserved(obj) ||
2241                   _sp->block_is_obj((HeapWord*)obj),
2242                   "Should be an object");
2243         guarantee(obj->is_oop(), "Should be an oop");
2244         obj->verify();
2245         if (_past_remark) {
2246           // Remark has been completed, the object should be marked
2247           _bit_map->isMarked((HeapWord*)obj);
2248         }
2249       } else { // reference within CMS heap
2250         if (_past_remark) {
2251           // Remark has been completed -- so the referent should have
2252           // been marked, if referring object is.
2253           if (_bit_map->isMarked(_collector->block_start(p))) {
2254             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2255           }
2256         }
2257       }
2258     } else if (_sp->is_in_reserved(p)) {
2259       // the reference is from FLS, and points out of FLS
2260       guarantee(obj->is_oop(), "Should be an oop");
2261       obj->verify();
2262     }
2263   }
2264 
2265   template <class T> void do_oop_work(T* p) {
2266     T heap_oop = oopDesc::load_heap_oop(p);
2267     if (!oopDesc::is_null(heap_oop)) {
2268       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2269       do_oop(p, obj);
2270     }
2271   }
2272 
2273  public:
2274   VerifyAllOopsClosure(const CMSCollector* collector,
2275     const CompactibleFreeListSpace* sp, MemRegion span,
2276     bool past_remark, CMSBitMap* bit_map) :
2277     _collector(collector), _sp(sp), _span(span),
2278     _past_remark(past_remark), _bit_map(bit_map) { }
2279 
2280   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2281   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2282 };
2283 
2284 void CompactibleFreeListSpace::verify() const {
2285   assert_lock_strong(&_freelistLock);
2286   verify_objects_initialized();
2287   MemRegion span = _collector->_span;
2288   bool past_remark = (_collector->abstract_state() ==
2289                       CMSCollector::Sweeping);
2290 
2291   ResourceMark rm;
2292   HandleMark  hm;
2293 
2294   // Check integrity of CFL data structures
2295   _promoInfo.verify();
2296   _dictionary->verify();
2297   if (FLSVerifyIndexTable) {
2298     verifyIndexedFreeLists();
2299   }
2300   // Check integrity of all objects and free blocks in space
2301   {
2302     VerifyAllBlksClosure cl(this, span);
2303     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2304   }
2305   // Check that all references in the heap to FLS
2306   // are to valid objects in FLS or that references in
2307   // FLS are to valid objects elsewhere in the heap
2308   if (FLSVerifyAllHeapReferences)
2309   {
2310     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2311       _collector->markBitMap());
2312 
2313     // Iterate over all oops in the heap. Uses the _no_header version
2314     // since we are not interested in following the klass pointers.
2315     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2316   }
2317 
2318   if (VerifyObjectStartArray) {
2319     // Verify the block offset table
2320     _bt.verify();
2321   }
2322 }
2323 
2324 #ifndef PRODUCT
2325 void CompactibleFreeListSpace::verifyFreeLists() const {
2326   if (FLSVerifyLists) {
2327     _dictionary->verify();
2328     verifyIndexedFreeLists();
2329   } else {
2330     if (FLSVerifyDictionary) {
2331       _dictionary->verify();
2332     }
2333     if (FLSVerifyIndexTable) {
2334       verifyIndexedFreeLists();
2335     }
2336   }
2337 }
2338 #endif
2339 
2340 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2341   size_t i = 0;
2342   for (; i < IndexSetStart; i++) {
2343     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2344   }
2345   for (; i < IndexSetSize; i++) {
2346     verifyIndexedFreeList(i);
2347   }
2348 }
2349 
2350 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2351   FreeChunk* fc   =  _indexedFreeList[size].head();
2352   FreeChunk* tail =  _indexedFreeList[size].tail();
2353   size_t    num = _indexedFreeList[size].count();
2354   size_t      n = 0;
2355   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2356             "Slot should have been empty");
2357   for (; fc != NULL; fc = fc->next(), n++) {
2358     guarantee(fc->size() == size, "Size inconsistency");
2359     guarantee(fc->is_free(), "!free?");
2360     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2361     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2362   }
2363   guarantee(n == num, "Incorrect count");
2364 }
2365 
2366 #ifndef PRODUCT
2367 void CompactibleFreeListSpace::check_free_list_consistency() const {
2368   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2369     "Some sizes can't be allocated without recourse to"
2370     " linear allocation buffers");
2371   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2372     "else MIN_TREE_CHUNK_SIZE is wrong");
2373   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2374   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2375 }
2376 #endif
2377 
2378 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2379   assert_lock_strong(&_freelistLock);
2380   LogTarget(Debug, gc, freelist, census) log;
2381   if (!log.is_enabled()) {
2382     return;
2383   }
2384   AdaptiveFreeList<FreeChunk> total;
2385   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2386   ResourceMark rm;
2387   outputStream* out = log.stream();
2388   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2389   size_t total_free = 0;
2390   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2391     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2392     total_free += fl->count() * fl->size();
2393     if (i % (40*IndexSetStride) == 0) {
2394       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2395     }
2396     fl->print_on(out);
2397     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2398     total.set_surplus(    total.surplus()     + fl->surplus()    );
2399     total.set_desired(    total.desired()     + fl->desired()    );
2400     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2401     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2402     total.set_count(      total.count()       + fl->count()      );
2403     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2404     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2405     total.set_split_births(total.split_births() + fl->split_births());
2406     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2407   }
2408   total.print_on(out, "TOTAL");
2409   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2410   log.print("growth: %8.5f  deficit: %8.5f",
2411             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2412                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2413             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2414   _dictionary->print_dict_census(out);
2415 }
2416 
2417 ///////////////////////////////////////////////////////////////////////////
2418 // CompactibleFreeListSpaceLAB
2419 ///////////////////////////////////////////////////////////////////////////
2420 
2421 #define VECTOR_257(x)                                                                                  \
2422   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2423   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2424      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2425      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2426      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2427      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2428      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2429      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2430      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2431      x }
2432 
2433 // Initialize with default setting for CMS, _not_
2434 // generic OldPLABSize, whose static default is different; if overridden at the
2435 // command-line, this will get reinitialized via a call to
2436 // modify_initialization() below.
2437 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2438   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2439 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2440 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2441 
2442 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2443   _cfls(cfls)
2444 {
2445   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2446   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2447        i < CompactibleFreeListSpace::IndexSetSize;
2448        i += CompactibleFreeListSpace::IndexSetStride) {
2449     _indexedFreeList[i].set_size(i);
2450     _num_blocks[i] = 0;
2451   }
2452 }
2453 
2454 static bool _CFLS_LAB_modified = false;
2455 
2456 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2457   assert(!_CFLS_LAB_modified, "Call only once");
2458   _CFLS_LAB_modified = true;
2459   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2460        i < CompactibleFreeListSpace::IndexSetSize;
2461        i += CompactibleFreeListSpace::IndexSetStride) {
2462     _blocks_to_claim[i].modify(n, wt, true /* force */);
2463   }
2464 }
2465 
2466 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2467   FreeChunk* res;
2468   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2469   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2470     // This locking manages sync with other large object allocations.
2471     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2472                     Mutex::_no_safepoint_check_flag);
2473     res = _cfls->getChunkFromDictionaryExact(word_sz);
2474     if (res == NULL) return NULL;
2475   } else {
2476     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2477     if (fl->count() == 0) {
2478       // Attempt to refill this local free list.
2479       get_from_global_pool(word_sz, fl);
2480       // If it didn't work, give up.
2481       if (fl->count() == 0) return NULL;
2482     }
2483     res = fl->get_chunk_at_head();
2484     assert(res != NULL, "Why was count non-zero?");
2485   }
2486   res->markNotFree();
2487   assert(!res->is_free(), "shouldn't be marked free");
2488   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2489   // mangle a just allocated object with a distinct pattern.
2490   debug_only(res->mangleAllocated(word_sz));
2491   return (HeapWord*)res;
2492 }
2493 
2494 // Get a chunk of blocks of the right size and update related
2495 // book-keeping stats
2496 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2497   // Get the #blocks we want to claim
2498   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2499   assert(n_blks > 0, "Error");
2500   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2501   // In some cases, when the application has a phase change,
2502   // there may be a sudden and sharp shift in the object survival
2503   // profile, and updating the counts at the end of a scavenge
2504   // may not be quick enough, giving rise to large scavenge pauses
2505   // during these phase changes. It is beneficial to detect such
2506   // changes on-the-fly during a scavenge and avoid such a phase-change
2507   // pothole. The following code is a heuristic attempt to do that.
2508   // It is protected by a product flag until we have gained
2509   // enough experience with this heuristic and fine-tuned its behavior.
2510   // WARNING: This might increase fragmentation if we overreact to
2511   // small spikes, so some kind of historical smoothing based on
2512   // previous experience with the greater reactivity might be useful.
2513   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2514   // default.
2515   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2516     //
2517     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2518     // which is why there is a cast to double.
2519     //
2520     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2521     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2522     n_blks = MIN2(n_blks, CMSOldPLABMax);
2523   }
2524   assert(n_blks > 0, "Error");
2525   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2526   // Update stats table entry for this block size
2527   _num_blocks[word_sz] += fl->count();
2528 }
2529 
2530 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2531   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2532        i < CompactibleFreeListSpace::IndexSetSize;
2533        i += CompactibleFreeListSpace::IndexSetStride) {
2534     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2535            "Counter inconsistency");
2536     if (_global_num_workers[i] > 0) {
2537       // Need to smooth wrt historical average
2538       if (ResizeOldPLAB) {
2539         _blocks_to_claim[i].sample(
2540           MAX2(CMSOldPLABMin,
2541           MIN2(CMSOldPLABMax,
2542                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2543       }
2544       // Reset counters for next round
2545       _global_num_workers[i] = 0;
2546       _global_num_blocks[i] = 0;
2547       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2548     }
2549   }
2550 }
2551 
2552 // If this is changed in the future to allow parallel
2553 // access, one would need to take the FL locks and,
2554 // depending on how it is used, stagger access from
2555 // parallel threads to reduce contention.
2556 void CompactibleFreeListSpaceLAB::retire(int tid) {
2557   // We run this single threaded with the world stopped;
2558   // so no need for locks and such.
2559   NOT_PRODUCT(Thread* t = Thread::current();)
2560   assert(Thread::current()->is_VM_thread(), "Error");
2561   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2562        i < CompactibleFreeListSpace::IndexSetSize;
2563        i += CompactibleFreeListSpace::IndexSetStride) {
2564     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2565            "Can't retire more than what we obtained");
2566     if (_num_blocks[i] > 0) {
2567       size_t num_retire =  _indexedFreeList[i].count();
2568       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2569       {
2570         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2571         //                Mutex::_no_safepoint_check_flag);
2572 
2573         // Update globals stats for num_blocks used
2574         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2575         _global_num_workers[i]++;
2576         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2577         if (num_retire > 0) {
2578           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2579           // Reset this list.
2580           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2581           _indexedFreeList[i].set_size(i);
2582         }
2583       }
2584       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2585                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2586       // Reset stats for next round
2587       _num_blocks[i]         = 0;
2588     }
2589   }
2590 }
2591 
2592 // Used by par_get_chunk_of_blocks() for the chunks from the
2593 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2594 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2595 // to the free list "fl".  "n" is the maximum number of chunks to
2596 // be added to "fl".
2597 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2598 
2599   // We'll try all multiples of word_sz in the indexed set, starting with
2600   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2601   // then try getting a big chunk and splitting it.
2602   {
2603     bool found;
2604     int  k;
2605     size_t cur_sz;
2606     for (k = 1, cur_sz = k * word_sz, found = false;
2607          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2608          (CMSSplitIndexedFreeListBlocks || k <= 1);
2609          k++, cur_sz = k * word_sz) {
2610       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2611       fl_for_cur_sz.set_size(cur_sz);
2612       {
2613         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2614                         Mutex::_no_safepoint_check_flag);
2615         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2616         if (gfl->count() != 0) {
2617           // nn is the number of chunks of size cur_sz that
2618           // we'd need to split k-ways each, in order to create
2619           // "n" chunks of size word_sz each.
2620           const size_t nn = MAX2(n/k, (size_t)1);
2621           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2622           found = true;
2623           if (k > 1) {
2624             // Update split death stats for the cur_sz-size blocks list:
2625             // we increment the split death count by the number of blocks
2626             // we just took from the cur_sz-size blocks list and which
2627             // we will be splitting below.
2628             ssize_t deaths = gfl->split_deaths() +
2629                              fl_for_cur_sz.count();
2630             gfl->set_split_deaths(deaths);
2631           }
2632         }
2633       }
2634       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2635       if (found) {
2636         if (k == 1) {
2637           fl->prepend(&fl_for_cur_sz);
2638         } else {
2639           // Divide each block on fl_for_cur_sz up k ways.
2640           FreeChunk* fc;
2641           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2642             // Must do this in reverse order, so that anybody attempting to
2643             // access the main chunk sees it as a single free block until we
2644             // change it.
2645             size_t fc_size = fc->size();
2646             assert(fc->is_free(), "Error");
2647             for (int i = k-1; i >= 0; i--) {
2648               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2649               assert((i != 0) ||
2650                         ((fc == ffc) && ffc->is_free() &&
2651                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2652                         "Counting error");
2653               ffc->set_size(word_sz);
2654               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2655               ffc->link_next(NULL);
2656               // Above must occur before BOT is updated below.
2657               OrderAccess::storestore();
2658               // splitting from the right, fc_size == i * word_sz
2659               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2660               fc_size -= word_sz;
2661               assert(fc_size == i*word_sz, "Error");
2662               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2663               _bt.verify_single_block((HeapWord*)fc, fc_size);
2664               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2665               // Push this on "fl".
2666               fl->return_chunk_at_head(ffc);
2667             }
2668             // TRAP
2669             assert(fl->tail()->next() == NULL, "List invariant.");
2670           }
2671         }
2672         // Update birth stats for this block size.
2673         size_t num = fl->count();
2674         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2675                         Mutex::_no_safepoint_check_flag);
2676         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2677         _indexedFreeList[word_sz].set_split_births(births);
2678         return true;
2679       }
2680     }
2681     return found;
2682   }
2683 }
2684 
2685 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2686 
2687   FreeChunk* fc = NULL;
2688   FreeChunk* rem_fc = NULL;
2689   size_t rem;
2690   {
2691     MutexLockerEx x(parDictionaryAllocLock(),
2692                     Mutex::_no_safepoint_check_flag);
2693     while (n > 0) {
2694       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2695                                   FreeBlockDictionary<FreeChunk>::atLeast);
2696       if (fc != NULL) {
2697         break;
2698       } else {
2699         n--;
2700       }
2701     }
2702     if (fc == NULL) return NULL;
2703     // Otherwise, split up that block.
2704     assert((ssize_t)n >= 1, "Control point invariant");
2705     assert(fc->is_free(), "Error: should be a free block");
2706     _bt.verify_single_block((HeapWord*)fc, fc->size());
2707     const size_t nn = fc->size() / word_sz;
2708     n = MIN2(nn, n);
2709     assert((ssize_t)n >= 1, "Control point invariant");
2710     rem = fc->size() - n * word_sz;
2711     // If there is a remainder, and it's too small, allocate one fewer.
2712     if (rem > 0 && rem < MinChunkSize) {
2713       n--; rem += word_sz;
2714     }
2715     // Note that at this point we may have n == 0.
2716     assert((ssize_t)n >= 0, "Control point invariant");
2717 
2718     // If n is 0, the chunk fc that was found is not large
2719     // enough to leave a viable remainder.  We are unable to
2720     // allocate even one block.  Return fc to the
2721     // dictionary and return, leaving "fl" empty.
2722     if (n == 0) {
2723       returnChunkToDictionary(fc);
2724       return NULL;
2725     }
2726 
2727     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2728     dictionary()->dict_census_update(fc->size(),
2729                                      true /*split*/,
2730                                      false /*birth*/);
2731 
2732     // First return the remainder, if any.
2733     // Note that we hold the lock until we decide if we're going to give
2734     // back the remainder to the dictionary, since a concurrent allocation
2735     // may otherwise see the heap as empty.  (We're willing to take that
2736     // hit if the block is a small block.)
2737     if (rem > 0) {
2738       size_t prefix_size = n * word_sz;
2739       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2740       rem_fc->set_size(rem);
2741       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2742       rem_fc->link_next(NULL);
2743       // Above must occur before BOT is updated below.
2744       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2745       OrderAccess::storestore();
2746       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2747       assert(fc->is_free(), "Error");
2748       fc->set_size(prefix_size);
2749       if (rem >= IndexSetSize) {
2750         returnChunkToDictionary(rem_fc);
2751         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2752         rem_fc = NULL;
2753       }
2754       // Otherwise, return it to the small list below.
2755     }
2756   }
2757   if (rem_fc != NULL) {
2758     MutexLockerEx x(_indexedFreeListParLocks[rem],
2759                     Mutex::_no_safepoint_check_flag);
2760     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2761     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2762     smallSplitBirth(rem);
2763   }
2764   assert(n * word_sz == fc->size(),
2765          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2766          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2767          fc->size(), n, word_sz);
2768   return fc;
2769 }
2770 
2771 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2772 
2773   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2774 
2775   if (fc == NULL) {
2776     return;
2777   }
2778 
2779   size_t n = fc->size() / word_sz;
2780 
2781   assert((ssize_t)n > 0, "Consistency");
2782   // Now do the splitting up.
2783   // Must do this in reverse order, so that anybody attempting to
2784   // access the main chunk sees it as a single free block until we
2785   // change it.
2786   size_t fc_size = n * word_sz;
2787   // All but first chunk in this loop
2788   for (ssize_t i = n-1; i > 0; i--) {
2789     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2790     ffc->set_size(word_sz);
2791     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2792     ffc->link_next(NULL);
2793     // Above must occur before BOT is updated below.
2794     OrderAccess::storestore();
2795     // splitting from the right, fc_size == (n - i + 1) * wordsize
2796     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2797     fc_size -= word_sz;
2798     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2799     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2800     _bt.verify_single_block((HeapWord*)fc, fc_size);
2801     // Push this on "fl".
2802     fl->return_chunk_at_head(ffc);
2803   }
2804   // First chunk
2805   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2806   // The blocks above should show their new sizes before the first block below
2807   fc->set_size(word_sz);
2808   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2809   fc->link_next(NULL);
2810   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2811   _bt.verify_single_block((HeapWord*)fc, fc->size());
2812   fl->return_chunk_at_head(fc);
2813 
2814   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2815   {
2816     // Update the stats for this block size.
2817     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2818                     Mutex::_no_safepoint_check_flag);
2819     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2820     _indexedFreeList[word_sz].set_split_births(births);
2821     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2822     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2823   }
2824 
2825   // TRAP
2826   assert(fl->tail()->next() == NULL, "List invariant.");
2827 }
2828 
2829 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2830   assert(fl->count() == 0, "Precondition.");
2831   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2832          "Precondition");
2833 
2834   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2835     // Got it
2836     return;
2837   }
2838 
2839   // Otherwise, we'll split a block from the dictionary.
2840   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2841 }
2842 
2843 // Set up the space's par_seq_tasks structure for work claiming
2844 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2845 // XXX Need to suitably abstract and generalize this and the next
2846 // method into one.
2847 void
2848 CompactibleFreeListSpace::
2849 initialize_sequential_subtasks_for_rescan(int n_threads) {
2850   // The "size" of each task is fixed according to rescan_task_size.
2851   assert(n_threads > 0, "Unexpected n_threads argument");
2852   const size_t task_size = rescan_task_size();
2853   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2854   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2855   assert(n_tasks == 0 ||
2856          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2857           (used_region().start() + n_tasks*task_size >= used_region().end())),
2858          "n_tasks calculation incorrect");
2859   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2860   assert(!pst->valid(), "Clobbering existing data?");
2861   // Sets the condition for completion of the subtask (how many threads
2862   // need to finish in order to be done).
2863   pst->set_n_threads(n_threads);
2864   pst->set_n_tasks((int)n_tasks);
2865 }
2866 
2867 // Set up the space's par_seq_tasks structure for work claiming
2868 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2869 void
2870 CompactibleFreeListSpace::
2871 initialize_sequential_subtasks_for_marking(int n_threads,
2872                                            HeapWord* low) {
2873   // The "size" of each task is fixed according to rescan_task_size.
2874   assert(n_threads > 0, "Unexpected n_threads argument");
2875   const size_t task_size = marking_task_size();
2876   assert(task_size > CardTableModRefBS::card_size_in_words &&
2877          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2878          "Otherwise arithmetic below would be incorrect");
2879   MemRegion span = _old_gen->reserved();
2880   if (low != NULL) {
2881     if (span.contains(low)) {
2882       // Align low down to  a card boundary so that
2883       // we can use block_offset_careful() on span boundaries.
2884       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2885                                  CardTableModRefBS::card_size);
2886       // Clip span prefix at aligned_low
2887       span = span.intersection(MemRegion(aligned_low, span.end()));
2888     } else if (low > span.end()) {
2889       span = MemRegion(low, low);  // Null region
2890     } // else use entire span
2891   }
2892   assert(span.is_empty() ||
2893          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2894         "span should start at a card boundary");
2895   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2896   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2897   assert(n_tasks == 0 ||
2898          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2899           (span.start() + n_tasks*task_size >= span.end())),
2900          "n_tasks calculation incorrect");
2901   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2902   assert(!pst->valid(), "Clobbering existing data?");
2903   // Sets the condition for completion of the subtask (how many threads
2904   // need to finish in order to be done).
2905   pst->set_n_threads(n_threads);
2906   pst->set_n_tasks((int)n_tasks);
2907 }