1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/liveRange.hpp"
  34 #include "gc/shared/space.inline.hpp"
  35 #include "gc/shared/spaceDecorator.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 223   for (size_t i = 1; i < IndexSetSize; i++) {
 224     assert(_indexedFreeList[i].size() == (size_t) i,
 225       "Indexed free list sizes are incorrect");
 226     _indexedFreeList[i].reset(IndexSetSize);
 227     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 228     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 229     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 230     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 231   }
 232 }
 233 
 234 void CompactibleFreeListSpace::reset(MemRegion mr) {
 235   resetIndexedFreeListArray();
 236   dictionary()->reset();
 237   if (BlockOffsetArrayUseUnallocatedBlock) {
 238     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 239     // Everything's allocated until proven otherwise.
 240     _bt.set_unallocated_block(end());
 241   }
 242   if (!mr.is_empty()) {
 243     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 244     _bt.single_block(mr.start(), mr.word_size());
 245     FreeChunk* fc = (FreeChunk*) mr.start();
 246     fc->set_size(mr.word_size());
 247     if (mr.word_size() >= IndexSetSize ) {
 248       returnChunkToDictionary(fc);
 249     } else {
 250       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 251       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 252     }
 253     coalBirth(mr.word_size());
 254   }
 255   _promoInfo.reset();
 256   _smallLinearAllocBlock._ptr = NULL;
 257   _smallLinearAllocBlock._word_size = 0;
 258 }
 259 
 260 void CompactibleFreeListSpace::reset_after_compaction() {
 261   // Reset the space to the new reality - one free chunk.
 262   MemRegion mr(compaction_top(), end());
 263   reset(mr);
 264   // Now refill the linear allocation block(s) if possible.
 265   refillLinearAllocBlocksIfNeeded();
 266 }
 267 
 268 // Walks the entire dictionary, returning a coterminal
 269 // chunk, if it exists. Use with caution since it involves
 270 // a potentially complete walk of a potentially large tree.
 271 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 272 
 273   assert_lock_strong(&_freelistLock);
 274 
 275   return dictionary()->find_chunk_ends_at(end());
 276 }
 277 
 278 
 279 #ifndef PRODUCT
 280 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 281   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 282     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 283   }
 284 }
 285 
 286 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 287   size_t sum = 0;
 288   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 289     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 290   }
 291   return sum;
 292 }
 293 
 294 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 295   size_t count = 0;
 296   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 297     debug_only(
 298       ssize_t total_list_count = 0;
 299       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 300          fc = fc->next()) {
 301         total_list_count++;
 302       }
 303       assert(total_list_count ==  _indexedFreeList[i].count(),
 304         "Count in list is incorrect");
 305     )
 306     count += _indexedFreeList[i].count();
 307   }
 308   return count;
 309 }
 310 
 311 size_t CompactibleFreeListSpace::totalCount() {
 312   size_t num = totalCountInIndexedFreeLists();
 313   num +=  dictionary()->total_count();
 314   if (_smallLinearAllocBlock._word_size != 0) {
 315     num++;
 316   }
 317   return num;
 318 }
 319 #endif
 320 
 321 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 322   FreeChunk* fc = (FreeChunk*) p;
 323   return fc->is_free();
 324 }
 325 
 326 size_t CompactibleFreeListSpace::used() const {
 327   return capacity() - free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::free() const {
 331   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 332   // if you do this while the structures are in flux you
 333   // may get an approximate answer only; for instance
 334   // because there is concurrent allocation either
 335   // directly by mutators or for promotion during a GC.
 336   // It's "MT-safe", however, in the sense that you are guaranteed
 337   // not to crash and burn, for instance, because of walking
 338   // pointers that could disappear as you were walking them.
 339   // The approximation is because the various components
 340   // that are read below are not read atomically (and
 341   // further the computation of totalSizeInIndexedFreeLists()
 342   // is itself a non-atomic computation. The normal use of
 343   // this is during a resize operation at the end of GC
 344   // and at that time you are guaranteed to get the
 345   // correct actual value. However, for instance, this is
 346   // also read completely asynchronously by the "perf-sampler"
 347   // that supports jvmstat, and you are apt to see the values
 348   // flicker in such cases.
 349   assert(_dictionary != NULL, "No _dictionary?");
 350   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 351           totalSizeInIndexedFreeLists() +
 352           _smallLinearAllocBlock._word_size) * HeapWordSize;
 353 }
 354 
 355 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 356   assert(_dictionary != NULL, "No _dictionary?");
 357   assert_locked();
 358   size_t res = _dictionary->max_chunk_size();
 359   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 360                        (size_t) SmallForLinearAlloc - 1));
 361   // XXX the following could potentially be pretty slow;
 362   // should one, pessimistically for the rare cases when res
 363   // calculated above is less than IndexSetSize,
 364   // just return res calculated above? My reasoning was that
 365   // those cases will be so rare that the extra time spent doesn't
 366   // really matter....
 367   // Note: do not change the loop test i >= res + IndexSetStride
 368   // to i > res below, because i is unsigned and res may be zero.
 369   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 370        i -= IndexSetStride) {
 371     if (_indexedFreeList[i].head() != NULL) {
 372       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 373       return i;
 374     }
 375   }
 376   return res;
 377 }
 378 
 379 void LinearAllocBlock::print_on(outputStream* st) const {
 380   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 381             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 382             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 383 }
 384 
 385 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 386   st->print_cr("COMPACTIBLE FREELIST SPACE");
 387   st->print_cr(" Space:");
 388   Space::print_on(st);
 389 
 390   st->print_cr("promoInfo:");
 391   _promoInfo.print_on(st);
 392 
 393   st->print_cr("_smallLinearAllocBlock");
 394   _smallLinearAllocBlock.print_on(st);
 395 
 396   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 397 
 398   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 399 }
 400 
 401 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 402 const {
 403   reportIndexedFreeListStatistics(st);
 404   st->print_cr("Layout of Indexed Freelists");
 405   st->print_cr("---------------------------");
 406   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 407   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 408     _indexedFreeList[i].print_on(st);
 409     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 410       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 411                    p2i(fc), p2i((HeapWord*)fc + i),
 412                    fc->cantCoalesce() ? "\t CC" : "");
 413     }
 414   }
 415 }
 416 
 417 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 418 const {
 419   _promoInfo.print_on(st);
 420 }
 421 
 422 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 423 const {
 424   _dictionary->report_statistics(st);
 425   st->print_cr("Layout of Freelists in Tree");
 426   st->print_cr("---------------------------");
 427   _dictionary->print_free_lists(st);
 428 }
 429 
 430 class BlkPrintingClosure: public BlkClosure {
 431   const CMSCollector*             _collector;
 432   const CompactibleFreeListSpace* _sp;
 433   const CMSBitMap*                _live_bit_map;
 434   const bool                      _post_remark;
 435   outputStream*                   _st;
 436 public:
 437   BlkPrintingClosure(const CMSCollector* collector,
 438                      const CompactibleFreeListSpace* sp,
 439                      const CMSBitMap* live_bit_map,
 440                      outputStream* st):
 441     _collector(collector),
 442     _sp(sp),
 443     _live_bit_map(live_bit_map),
 444     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 445     _st(st) { }
 446   size_t do_blk(HeapWord* addr);
 447 };
 448 
 449 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 450   size_t sz = _sp->block_size_no_stall(addr, _collector);
 451   assert(sz != 0, "Should always be able to compute a size");
 452   if (_sp->block_is_obj(addr)) {
 453     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 454     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 455       p2i(addr),
 456       dead ? "dead" : "live",
 457       sz,
 458       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 459     if (CMSPrintObjectsInDump && !dead) {
 460       oop(addr)->print_on(_st);
 461       _st->print_cr("--------------------------------------");
 462     }
 463   } else { // free block
 464     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 465       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 466     if (CMSPrintChunksInDump) {
 467       ((FreeChunk*)addr)->print_on(_st);
 468       _st->print_cr("--------------------------------------");
 469     }
 470   }
 471   return sz;
 472 }
 473 
 474 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 475   st->print_cr("=========================");
 476   st->print_cr("Block layout in CMS Heap:");
 477   st->print_cr("=========================");
 478   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 479   blk_iterate(&bpcl);
 480 
 481   st->print_cr("=======================================");
 482   st->print_cr("Order & Layout of Promotion Info Blocks");
 483   st->print_cr("=======================================");
 484   print_promo_info_blocks(st);
 485 
 486   st->print_cr("===========================");
 487   st->print_cr("Order of Indexed Free Lists");
 488   st->print_cr("=========================");
 489   print_indexed_free_lists(st);
 490 
 491   st->print_cr("=================================");
 492   st->print_cr("Order of Free Lists in Dictionary");
 493   st->print_cr("=================================");
 494   print_dictionary_free_lists(st);
 495 }
 496 
 497 
 498 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 499   assert_lock_strong(&_freelistLock);
 500   LogHandle(gc, freelist, stats) log;
 501   if (!log.is_debug()) {
 502     return;
 503   }
 504   log.debug("%s", title);
 505   _dictionary->report_statistics(log.debug_stream());
 506   if (log.is_trace()) {
 507     ResourceMark rm;
 508     reportIndexedFreeListStatistics(log.trace_stream());
 509     size_t total_size = totalSizeInIndexedFreeLists() +
 510                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 511     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 512   }
 513 }
 514 
 515 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 516   assert_lock_strong(&_freelistLock);
 517   st->print_cr("Statistics for IndexedFreeLists:");
 518   st->print_cr("--------------------------------");
 519   size_t total_size = totalSizeInIndexedFreeLists();
 520   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 521   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 522   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 523   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 524   if (free_blocks != 0) {
 525     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 526   }
 527 }
 528 
 529 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 530   size_t res = 0;
 531   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 532     debug_only(
 533       ssize_t recount = 0;
 534       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 535          fc = fc->next()) {
 536         recount += 1;
 537       }
 538       assert(recount == _indexedFreeList[i].count(),
 539         "Incorrect count in list");
 540     )
 541     res += _indexedFreeList[i].count();
 542   }
 543   return res;
 544 }
 545 
 546 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 547   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 548     if (_indexedFreeList[i].head() != NULL) {
 549       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 550       return (size_t)i;
 551     }
 552   }
 553   return 0;
 554 }
 555 
 556 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 557   HeapWord* prevEnd = end();
 558   assert(prevEnd != value, "unnecessary set_end call");
 559   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 560         "New end is below unallocated block");
 561   _end = value;
 562   if (prevEnd != NULL) {
 563     // Resize the underlying block offset table.
 564     _bt.resize(pointer_delta(value, bottom()));
 565     if (value <= prevEnd) {
 566       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 567              "New end is below unallocated block");
 568     } else {
 569       // Now, take this new chunk and add it to the free blocks.
 570       // Note that the BOT has not yet been updated for this block.
 571       size_t newFcSize = pointer_delta(value, prevEnd);
 572       // Add the block to the free lists, if possible coalescing it
 573       // with the last free block, and update the BOT and census data.
 574       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 575     }
 576   }
 577 }
 578 
 579 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 580   CompactibleFreeListSpace* _cfls;
 581   CMSCollector* _collector;
 582   bool _parallel;
 583 protected:
 584   // Override.
 585 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 586   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 587                                        HeapWord* bottom, HeapWord* top, \
 588                                        ClosureType* cl);                \
 589       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 590                                        HeapWord* bottom, HeapWord* top, \
 591                                        ClosureType* cl);                \
 592     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 593                                        HeapWord* bottom, HeapWord* top, \
 594                                        ClosureType* cl)
 595   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 596   walk_mem_region_with_cl_DECL(FilteringClosure);
 597 
 598 public:
 599   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 600                       CMSCollector* collector,
 601                       ExtendedOopClosure* cl,
 602                       CardTableModRefBS::PrecisionStyle precision,
 603                       HeapWord* boundary,
 604                       bool parallel) :
 605     Filtering_DCTOC(sp, cl, precision, boundary),
 606     _cfls(sp), _collector(collector), _parallel(parallel) {}
 607 };
 608 
 609 // We de-virtualize the block-related calls below, since we know that our
 610 // space is a CompactibleFreeListSpace.
 611 
 612 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 613 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 614                                                  HeapWord* bottom,              \
 615                                                  HeapWord* top,                 \
 616                                                  ClosureType* cl) {             \
 617    if (_parallel) {                                                             \
 618      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 619    } else {                                                                     \
 620      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 621    }                                                                            \
 622 }                                                                               \
 623 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 624                                                       HeapWord* bottom,         \
 625                                                       HeapWord* top,            \
 626                                                       ClosureType* cl) {        \
 627   /* Skip parts that are before "mr", in case "block_start" sent us             \
 628      back too far. */                                                           \
 629   HeapWord* mr_start = mr.start();                                              \
 630   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 631   HeapWord* next = bottom + bot_size;                                           \
 632   while (next < mr_start) {                                                     \
 633     bottom = next;                                                              \
 634     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 635     next = bottom + bot_size;                                                   \
 636   }                                                                             \
 637                                                                                 \
 638   while (bottom < top) {                                                        \
 639     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 640         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 641                     oop(bottom)) &&                                             \
 642         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 643       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 644       bottom += _cfls->adjustObjectSize(word_sz);                               \
 645     } else {                                                                    \
 646       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 647     }                                                                           \
 648   }                                                                             \
 649 }                                                                               \
 650 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 651                                                         HeapWord* bottom,       \
 652                                                         HeapWord* top,          \
 653                                                         ClosureType* cl) {      \
 654   /* Skip parts that are before "mr", in case "block_start" sent us             \
 655      back too far. */                                                           \
 656   HeapWord* mr_start = mr.start();                                              \
 657   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 658   HeapWord* next = bottom + bot_size;                                           \
 659   while (next < mr_start) {                                                     \
 660     bottom = next;                                                              \
 661     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 662     next = bottom + bot_size;                                                   \
 663   }                                                                             \
 664                                                                                 \
 665   while (bottom < top) {                                                        \
 666     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 667         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 668                     oop(bottom)) &&                                             \
 669         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 670       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 671       bottom += _cfls->adjustObjectSize(word_sz);                               \
 672     } else {                                                                    \
 673       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 674     }                                                                           \
 675   }                                                                             \
 676 }
 677 
 678 // (There are only two of these, rather than N, because the split is due
 679 // only to the introduction of the FilteringClosure, a local part of the
 680 // impl of this abstraction.)
 681 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 682 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 683 
 684 DirtyCardToOopClosure*
 685 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 686                                       CardTableModRefBS::PrecisionStyle precision,
 687                                       HeapWord* boundary,
 688                                       bool parallel) {
 689   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel);
 690 }
 691 
 692 
 693 // Note on locking for the space iteration functions:
 694 // since the collector's iteration activities are concurrent with
 695 // allocation activities by mutators, absent a suitable mutual exclusion
 696 // mechanism the iterators may go awry. For instance a block being iterated
 697 // may suddenly be allocated or divided up and part of it allocated and
 698 // so on.
 699 
 700 // Apply the given closure to each block in the space.
 701 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 702   assert_lock_strong(freelistLock());
 703   HeapWord *cur, *limit;
 704   for (cur = bottom(), limit = end(); cur < limit;
 705        cur += cl->do_blk_careful(cur));
 706 }
 707 
 708 // Apply the given closure to each block in the space.
 709 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 710   assert_lock_strong(freelistLock());
 711   HeapWord *cur, *limit;
 712   for (cur = bottom(), limit = end(); cur < limit;
 713        cur += cl->do_blk(cur));
 714 }
 715 
 716 // Apply the given closure to each oop in the space.
 717 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 718   assert_lock_strong(freelistLock());
 719   HeapWord *cur, *limit;
 720   size_t curSize;
 721   for (cur = bottom(), limit = end(); cur < limit;
 722        cur += curSize) {
 723     curSize = block_size(cur);
 724     if (block_is_obj(cur)) {
 725       oop(cur)->oop_iterate(cl);
 726     }
 727   }
 728 }
 729 
 730 // NOTE: In the following methods, in order to safely be able to
 731 // apply the closure to an object, we need to be sure that the
 732 // object has been initialized. We are guaranteed that an object
 733 // is initialized if we are holding the Heap_lock with the
 734 // world stopped.
 735 void CompactibleFreeListSpace::verify_objects_initialized() const {
 736   if (is_init_completed()) {
 737     assert_locked_or_safepoint(Heap_lock);
 738     if (Universe::is_fully_initialized()) {
 739       guarantee(SafepointSynchronize::is_at_safepoint(),
 740                 "Required for objects to be initialized");
 741     }
 742   } // else make a concession at vm start-up
 743 }
 744 
 745 // Apply the given closure to each object in the space
 746 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 747   assert_lock_strong(freelistLock());
 748   NOT_PRODUCT(verify_objects_initialized());
 749   HeapWord *cur, *limit;
 750   size_t curSize;
 751   for (cur = bottom(), limit = end(); cur < limit;
 752        cur += curSize) {
 753     curSize = block_size(cur);
 754     if (block_is_obj(cur)) {
 755       blk->do_object(oop(cur));
 756     }
 757   }
 758 }
 759 
 760 // Apply the given closure to each live object in the space
 761 //   The usage of CompactibleFreeListSpace
 762 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 763 // objects in the space with references to objects that are no longer
 764 // valid.  For example, an object may reference another object
 765 // that has already been sweep up (collected).  This method uses
 766 // obj_is_alive() to determine whether it is safe to apply the closure to
 767 // an object.  See obj_is_alive() for details on how liveness of an
 768 // object is decided.
 769 
 770 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 771   assert_lock_strong(freelistLock());
 772   NOT_PRODUCT(verify_objects_initialized());
 773   HeapWord *cur, *limit;
 774   size_t curSize;
 775   for (cur = bottom(), limit = end(); cur < limit;
 776        cur += curSize) {
 777     curSize = block_size(cur);
 778     if (block_is_obj(cur) && obj_is_alive(cur)) {
 779       blk->do_object(oop(cur));
 780     }
 781   }
 782 }
 783 
 784 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 785                                                   UpwardsObjectClosure* cl) {
 786   assert_locked(freelistLock());
 787   NOT_PRODUCT(verify_objects_initialized());
 788   assert(!mr.is_empty(), "Should be non-empty");
 789   // We use MemRegion(bottom(), end()) rather than used_region() below
 790   // because the two are not necessarily equal for some kinds of
 791   // spaces, in particular, certain kinds of free list spaces.
 792   // We could use the more complicated but more precise:
 793   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 794   // but the slight imprecision seems acceptable in the assertion check.
 795   assert(MemRegion(bottom(), end()).contains(mr),
 796          "Should be within used space");
 797   HeapWord* prev = cl->previous();   // max address from last time
 798   if (prev >= mr.end()) { // nothing to do
 799     return;
 800   }
 801   // This assert will not work when we go from cms space to perm
 802   // space, and use same closure. Easy fix deferred for later. XXX YSR
 803   // assert(prev == NULL || contains(prev), "Should be within space");
 804 
 805   bool last_was_obj_array = false;
 806   HeapWord *blk_start_addr, *region_start_addr;
 807   if (prev > mr.start()) {
 808     region_start_addr = prev;
 809     blk_start_addr    = prev;
 810     // The previous invocation may have pushed "prev" beyond the
 811     // last allocated block yet there may be still be blocks
 812     // in this region due to a particular coalescing policy.
 813     // Relax the assertion so that the case where the unallocated
 814     // block is maintained and "prev" is beyond the unallocated
 815     // block does not cause the assertion to fire.
 816     assert((BlockOffsetArrayUseUnallocatedBlock &&
 817             (!is_in(prev))) ||
 818            (blk_start_addr == block_start(region_start_addr)), "invariant");
 819   } else {
 820     region_start_addr = mr.start();
 821     blk_start_addr    = block_start(region_start_addr);
 822   }
 823   HeapWord* region_end_addr = mr.end();
 824   MemRegion derived_mr(region_start_addr, region_end_addr);
 825   while (blk_start_addr < region_end_addr) {
 826     const size_t size = block_size(blk_start_addr);
 827     if (block_is_obj(blk_start_addr)) {
 828       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 829     } else {
 830       last_was_obj_array = false;
 831     }
 832     blk_start_addr += size;
 833   }
 834   if (!last_was_obj_array) {
 835     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 836            "Should be within (closed) used space");
 837     assert(blk_start_addr > prev, "Invariant");
 838     cl->set_previous(blk_start_addr); // min address for next time
 839   }
 840 }
 841 
 842 // Callers of this iterator beware: The closure application should
 843 // be robust in the face of uninitialized objects and should (always)
 844 // return a correct size so that the next addr + size below gives us a
 845 // valid block boundary. [See for instance,
 846 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 847 // in ConcurrentMarkSweepGeneration.cpp.]
 848 HeapWord*
 849 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 850   ObjectClosureCareful* cl) {
 851   assert_lock_strong(freelistLock());
 852   // Can't use used_region() below because it may not necessarily
 853   // be the same as [bottom(),end()); although we could
 854   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 855   // that appears too cumbersome, so we just do the simpler check
 856   // in the assertion below.
 857   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 858          "mr should be non-empty and within used space");
 859   HeapWord *addr, *end;
 860   size_t size;
 861   for (addr = block_start_careful(mr.start()), end  = mr.end();
 862        addr < end; addr += size) {
 863     FreeChunk* fc = (FreeChunk*)addr;
 864     if (fc->is_free()) {
 865       // Since we hold the free list lock, which protects direct
 866       // allocation in this generation by mutators, a free object
 867       // will remain free throughout this iteration code.
 868       size = fc->size();
 869     } else {
 870       // Note that the object need not necessarily be initialized,
 871       // because (for instance) the free list lock does NOT protect
 872       // object initialization. The closure application below must
 873       // therefore be correct in the face of uninitialized objects.
 874       size = cl->do_object_careful_m(oop(addr), mr);
 875       if (size == 0) {
 876         // An unparsable object found. Signal early termination.
 877         return addr;
 878       }
 879     }
 880   }
 881   return NULL;
 882 }
 883 
 884 
 885 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 886   NOT_PRODUCT(verify_objects_initialized());
 887   return _bt.block_start(p);
 888 }
 889 
 890 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 891   return _bt.block_start_careful(p);
 892 }
 893 
 894 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 895   NOT_PRODUCT(verify_objects_initialized());
 896   // This must be volatile, or else there is a danger that the compiler
 897   // will compile the code below into a sometimes-infinite loop, by keeping
 898   // the value read the first time in a register.
 899   while (true) {
 900     // We must do this until we get a consistent view of the object.
 901     if (FreeChunk::indicatesFreeChunk(p)) {
 902       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 903       size_t res = fc->size();
 904 
 905       // Bugfix for systems with weak memory model (PPC64/IA64). The
 906       // block's free bit was set and we have read the size of the
 907       // block. Acquire and check the free bit again. If the block is
 908       // still free, the read size is correct.
 909       OrderAccess::acquire();
 910 
 911       // If the object is still a free chunk, return the size, else it
 912       // has been allocated so try again.
 913       if (FreeChunk::indicatesFreeChunk(p)) {
 914         assert(res != 0, "Block size should not be 0");
 915         return res;
 916       }
 917     } else {
 918       // must read from what 'p' points to in each loop.
 919       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 920       if (k != NULL) {
 921         assert(k->is_klass(), "Should really be klass oop.");
 922         oop o = (oop)p;
 923         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 924 
 925         // Bugfix for systems with weak memory model (PPC64/IA64).
 926         // The object o may be an array. Acquire to make sure that the array
 927         // size (third word) is consistent.
 928         OrderAccess::acquire();
 929 
 930         size_t res = o->size_given_klass(k);
 931         res = adjustObjectSize(res);
 932         assert(res != 0, "Block size should not be 0");
 933         return res;
 934       }
 935     }
 936   }
 937 }
 938 
 939 // TODO: Now that is_parsable is gone, we should combine these two functions.
 940 // A variant of the above that uses the Printezis bits for
 941 // unparsable but allocated objects. This avoids any possible
 942 // stalls waiting for mutators to initialize objects, and is
 943 // thus potentially faster than the variant above. However,
 944 // this variant may return a zero size for a block that is
 945 // under mutation and for which a consistent size cannot be
 946 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 947 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 948                                                      const CMSCollector* c)
 949 const {
 950   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 951   // This must be volatile, or else there is a danger that the compiler
 952   // will compile the code below into a sometimes-infinite loop, by keeping
 953   // the value read the first time in a register.
 954   DEBUG_ONLY(uint loops = 0;)
 955   while (true) {
 956     // We must do this until we get a consistent view of the object.
 957     if (FreeChunk::indicatesFreeChunk(p)) {
 958       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 959       size_t res = fc->size();
 960 
 961       // Bugfix for systems with weak memory model (PPC64/IA64). The
 962       // free bit of the block was set and we have read the size of
 963       // the block. Acquire and check the free bit again. If the
 964       // block is still free, the read size is correct.
 965       OrderAccess::acquire();
 966 
 967       if (FreeChunk::indicatesFreeChunk(p)) {
 968         assert(res != 0, "Block size should not be 0");
 969         assert(loops == 0, "Should be 0");
 970         return res;
 971       }
 972     } else {
 973       // must read from what 'p' points to in each loop.
 974       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 975       // We trust the size of any object that has a non-NULL
 976       // klass and (for those in the perm gen) is parsable
 977       // -- irrespective of its conc_safe-ty.
 978       if (k != NULL) {
 979         assert(k->is_klass(), "Should really be klass oop.");
 980         oop o = (oop)p;
 981         assert(o->is_oop(), "Should be an oop");
 982 
 983         // Bugfix for systems with weak memory model (PPC64/IA64).
 984         // The object o may be an array. Acquire to make sure that the array
 985         // size (third word) is consistent.
 986         OrderAccess::acquire();
 987 
 988         size_t res = o->size_given_klass(k);
 989         res = adjustObjectSize(res);
 990         assert(res != 0, "Block size should not be 0");
 991         return res;
 992       } else {
 993         // May return 0 if P-bits not present.
 994         return c->block_size_if_printezis_bits(p);
 995       }
 996     }
 997     assert(loops == 0, "Can loop at most once");
 998     DEBUG_ONLY(loops++;)
 999   }
1000 }
1001 
1002 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1003   NOT_PRODUCT(verify_objects_initialized());
1004   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1005   FreeChunk* fc = (FreeChunk*)p;
1006   if (fc->is_free()) {
1007     return fc->size();
1008   } else {
1009     // Ignore mark word because this may be a recently promoted
1010     // object whose mark word is used to chain together grey
1011     // objects (the last one would have a null value).
1012     assert(oop(p)->is_oop(true), "Should be an oop");
1013     return adjustObjectSize(oop(p)->size());
1014   }
1015 }
1016 
1017 // This implementation assumes that the property of "being an object" is
1018 // stable.  But being a free chunk may not be (because of parallel
1019 // promotion.)
1020 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1021   FreeChunk* fc = (FreeChunk*)p;
1022   assert(is_in_reserved(p), "Should be in space");
1023   if (FreeChunk::indicatesFreeChunk(p)) return false;
1024   Klass* k = oop(p)->klass_or_null();
1025   if (k != NULL) {
1026     // Ignore mark word because it may have been used to
1027     // chain together promoted objects (the last one
1028     // would have a null value).
1029     assert(oop(p)->is_oop(true), "Should be an oop");
1030     return true;
1031   } else {
1032     return false;  // Was not an object at the start of collection.
1033   }
1034 }
1035 
1036 // Check if the object is alive. This fact is checked either by consulting
1037 // the main marking bitmap in the sweeping phase or, if it's a permanent
1038 // generation and we're not in the sweeping phase, by checking the
1039 // perm_gen_verify_bit_map where we store the "deadness" information if
1040 // we did not sweep the perm gen in the most recent previous GC cycle.
1041 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1042   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1043          "Else races are possible");
1044   assert(block_is_obj(p), "The address should point to an object");
1045 
1046   // If we're sweeping, we use object liveness information from the main bit map
1047   // for both perm gen and old gen.
1048   // We don't need to lock the bitmap (live_map or dead_map below), because
1049   // EITHER we are in the middle of the sweeping phase, and the
1050   // main marking bit map (live_map below) is locked,
1051   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1052   // is stable, because it's mutated only in the sweeping phase.
1053   // NOTE: This method is also used by jmap where, if class unloading is
1054   // off, the results can return "false" for legitimate perm objects,
1055   // when we are not in the midst of a sweeping phase, which can result
1056   // in jmap not reporting certain perm gen objects. This will be moot
1057   // if/when the perm gen goes away in the future.
1058   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1059     CMSBitMap* live_map = _collector->markBitMap();
1060     return live_map->par_isMarked((HeapWord*) p);
1061   }
1062   return true;
1063 }
1064 
1065 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1066   FreeChunk* fc = (FreeChunk*)p;
1067   assert(is_in_reserved(p), "Should be in space");
1068   assert(_bt.block_start(p) == p, "Should be a block boundary");
1069   if (!fc->is_free()) {
1070     // Ignore mark word because it may have been used to
1071     // chain together promoted objects (the last one
1072     // would have a null value).
1073     assert(oop(p)->is_oop(true), "Should be an oop");
1074     return true;
1075   }
1076   return false;
1077 }
1078 
1079 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1080 // approximate answer if you don't hold the freelistlock when you call this.
1081 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1082   size_t size = 0;
1083   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1084     debug_only(
1085       // We may be calling here without the lock in which case we
1086       // won't do this modest sanity check.
1087       if (freelistLock()->owned_by_self()) {
1088         size_t total_list_size = 0;
1089         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1090           fc = fc->next()) {
1091           total_list_size += i;
1092         }
1093         assert(total_list_size == i * _indexedFreeList[i].count(),
1094                "Count in list is incorrect");
1095       }
1096     )
1097     size += i * _indexedFreeList[i].count();
1098   }
1099   return size;
1100 }
1101 
1102 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1103   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1104   return allocate(size);
1105 }
1106 
1107 HeapWord*
1108 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1109   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1110 }
1111 
1112 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1113   assert_lock_strong(freelistLock());
1114   HeapWord* res = NULL;
1115   assert(size == adjustObjectSize(size),
1116          "use adjustObjectSize() before calling into allocate()");
1117 
1118   res = allocate_adaptive_freelists(size);
1119 
1120   if (res != NULL) {
1121     // check that res does lie in this space!
1122     assert(is_in_reserved(res), "Not in this space!");
1123     assert(is_aligned((void*)res), "alignment check");
1124 
1125     FreeChunk* fc = (FreeChunk*)res;
1126     fc->markNotFree();
1127     assert(!fc->is_free(), "shouldn't be marked free");
1128     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1129     // Verify that the block offset table shows this to
1130     // be a single block, but not one which is unallocated.
1131     _bt.verify_single_block(res, size);
1132     _bt.verify_not_unallocated(res, size);
1133     // mangle a just allocated object with a distinct pattern.
1134     debug_only(fc->mangleAllocated(size));
1135   }
1136 
1137   return res;
1138 }
1139 
1140 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1141   assert_lock_strong(freelistLock());
1142   HeapWord* res = NULL;
1143   assert(size == adjustObjectSize(size),
1144          "use adjustObjectSize() before calling into allocate()");
1145 
1146   // Strategy
1147   //   if small
1148   //     exact size from small object indexed list if small
1149   //     small or large linear allocation block (linAB) as appropriate
1150   //     take from lists of greater sized chunks
1151   //   else
1152   //     dictionary
1153   //     small or large linear allocation block if it has the space
1154   // Try allocating exact size from indexTable first
1155   if (size < IndexSetSize) {
1156     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1157     if(res != NULL) {
1158       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1159         "Not removed from free list");
1160       // no block offset table adjustment is necessary on blocks in
1161       // the indexed lists.
1162 
1163     // Try allocating from the small LinAB
1164     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1165         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1166         // if successful, the above also adjusts block offset table
1167         // Note that this call will refill the LinAB to
1168         // satisfy the request.  This is different that
1169         // evm.
1170         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1171     } else {
1172       // Raid the exact free lists larger than size, even if they are not
1173       // overpopulated.
1174       res = (HeapWord*) getChunkFromGreater(size);
1175     }
1176   } else {
1177     // Big objects get allocated directly from the dictionary.
1178     res = (HeapWord*) getChunkFromDictionaryExact(size);
1179     if (res == NULL) {
1180       // Try hard not to fail since an allocation failure will likely
1181       // trigger a synchronous GC.  Try to get the space from the
1182       // allocation blocks.
1183       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1184     }
1185   }
1186 
1187   return res;
1188 }
1189 
1190 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1191 // when promoting obj.
1192 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1193   // Depending on the object size, expansion may require refilling either a
1194   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1195   // is added because the dictionary may over-allocate to avoid fragmentation.
1196   size_t space = obj_size;
1197   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1198   return space;
1199 }
1200 
1201 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1202   FreeChunk* ret;
1203 
1204   assert(numWords >= MinChunkSize, "Size is less than minimum");
1205   assert(linearAllocationWouldFail() || bestFitFirst(),
1206     "Should not be here");
1207 
1208   size_t i;
1209   size_t currSize = numWords + MinChunkSize;
1210   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1211   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1212     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1213     if (fl->head()) {
1214       ret = getFromListGreater(fl, numWords);
1215       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1216       return ret;
1217     }
1218   }
1219 
1220   currSize = MAX2((size_t)SmallForDictionary,
1221                   (size_t)(numWords + MinChunkSize));
1222 
1223   /* Try to get a chunk that satisfies request, while avoiding
1224      fragmentation that can't be handled. */
1225   {
1226     ret =  dictionary()->get_chunk(currSize);
1227     if (ret != NULL) {
1228       assert(ret->size() - numWords >= MinChunkSize,
1229              "Chunk is too small");
1230       _bt.allocated((HeapWord*)ret, ret->size());
1231       /* Carve returned chunk. */
1232       (void) splitChunkAndReturnRemainder(ret, numWords);
1233       /* Label this as no longer a free chunk. */
1234       assert(ret->is_free(), "This chunk should be free");
1235       ret->link_prev(NULL);
1236     }
1237     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1238     return ret;
1239   }
1240   ShouldNotReachHere();
1241 }
1242 
1243 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1244   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1245   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1246 }
1247 
1248 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1249   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1250          (_smallLinearAllocBlock._word_size == fc->size()),
1251          "Linear allocation block shows incorrect size");
1252   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1253           (_smallLinearAllocBlock._word_size == fc->size()));
1254 }
1255 
1256 // Check if the purported free chunk is present either as a linear
1257 // allocation block, the size-indexed table of (smaller) free blocks,
1258 // or the larger free blocks kept in the binary tree dictionary.
1259 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1260   if (verify_chunk_is_linear_alloc_block(fc)) {
1261     return true;
1262   } else if (fc->size() < IndexSetSize) {
1263     return verifyChunkInIndexedFreeLists(fc);
1264   } else {
1265     return dictionary()->verify_chunk_in_free_list(fc);
1266   }
1267 }
1268 
1269 #ifndef PRODUCT
1270 void CompactibleFreeListSpace::assert_locked() const {
1271   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1272 }
1273 
1274 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1275   CMSLockVerifier::assert_locked(lock);
1276 }
1277 #endif
1278 
1279 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1280   // In the parallel case, the main thread holds the free list lock
1281   // on behalf the parallel threads.
1282   FreeChunk* fc;
1283   {
1284     // If GC is parallel, this might be called by several threads.
1285     // This should be rare enough that the locking overhead won't affect
1286     // the sequential code.
1287     MutexLockerEx x(parDictionaryAllocLock(),
1288                     Mutex::_no_safepoint_check_flag);
1289     fc = getChunkFromDictionary(size);
1290   }
1291   if (fc != NULL) {
1292     fc->dontCoalesce();
1293     assert(fc->is_free(), "Should be free, but not coalescable");
1294     // Verify that the block offset table shows this to
1295     // be a single block, but not one which is unallocated.
1296     _bt.verify_single_block((HeapWord*)fc, fc->size());
1297     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1298   }
1299   return fc;
1300 }
1301 
1302 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1303   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1304   assert_locked();
1305 
1306   // if we are tracking promotions, then first ensure space for
1307   // promotion (including spooling space for saving header if necessary).
1308   // then allocate and copy, then track promoted info if needed.
1309   // When tracking (see PromotionInfo::track()), the mark word may
1310   // be displaced and in this case restoration of the mark word
1311   // occurs in the (oop_since_save_marks_)iterate phase.
1312   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1313     return NULL;
1314   }
1315   // Call the allocate(size_t, bool) form directly to avoid the
1316   // additional call through the allocate(size_t) form.  Having
1317   // the compile inline the call is problematic because allocate(size_t)
1318   // is a virtual method.
1319   HeapWord* res = allocate(adjustObjectSize(obj_size));
1320   if (res != NULL) {
1321     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1322     // if we should be tracking promotions, do so.
1323     if (_promoInfo.tracking()) {
1324         _promoInfo.track((PromotedObject*)res);
1325     }
1326   }
1327   return oop(res);
1328 }
1329 
1330 HeapWord*
1331 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1332   assert_locked();
1333   assert(size >= MinChunkSize, "minimum chunk size");
1334   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1335     "maximum from smallLinearAllocBlock");
1336   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1337 }
1338 
1339 HeapWord*
1340 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1341                                                        size_t size) {
1342   assert_locked();
1343   assert(size >= MinChunkSize, "too small");
1344   HeapWord* res = NULL;
1345   // Try to do linear allocation from blk, making sure that
1346   if (blk->_word_size == 0) {
1347     // We have probably been unable to fill this either in the prologue or
1348     // when it was exhausted at the last linear allocation. Bail out until
1349     // next time.
1350     assert(blk->_ptr == NULL, "consistency check");
1351     return NULL;
1352   }
1353   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1354   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1355   if (res != NULL) return res;
1356 
1357   // about to exhaust this linear allocation block
1358   if (blk->_word_size == size) { // exactly satisfied
1359     res = blk->_ptr;
1360     _bt.allocated(res, blk->_word_size);
1361   } else if (size + MinChunkSize <= blk->_refillSize) {
1362     size_t sz = blk->_word_size;
1363     // Update _unallocated_block if the size is such that chunk would be
1364     // returned to the indexed free list.  All other chunks in the indexed
1365     // free lists are allocated from the dictionary so that _unallocated_block
1366     // has already been adjusted for them.  Do it here so that the cost
1367     // for all chunks added back to the indexed free lists.
1368     if (sz < SmallForDictionary) {
1369       _bt.allocated(blk->_ptr, sz);
1370     }
1371     // Return the chunk that isn't big enough, and then refill below.
1372     addChunkToFreeLists(blk->_ptr, sz);
1373     split_birth(sz);
1374     // Don't keep statistics on adding back chunk from a LinAB.
1375   } else {
1376     // A refilled block would not satisfy the request.
1377     return NULL;
1378   }
1379 
1380   blk->_ptr = NULL; blk->_word_size = 0;
1381   refillLinearAllocBlock(blk);
1382   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1383          "block was replenished");
1384   if (res != NULL) {
1385     split_birth(size);
1386     repairLinearAllocBlock(blk);
1387   } else if (blk->_ptr != NULL) {
1388     res = blk->_ptr;
1389     size_t blk_size = blk->_word_size;
1390     blk->_word_size -= size;
1391     blk->_ptr  += size;
1392     split_birth(size);
1393     repairLinearAllocBlock(blk);
1394     // Update BOT last so that other (parallel) GC threads see a consistent
1395     // view of the BOT and free blocks.
1396     // Above must occur before BOT is updated below.
1397     OrderAccess::storestore();
1398     _bt.split_block(res, blk_size, size);  // adjust block offset table
1399   }
1400   return res;
1401 }
1402 
1403 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1404                                         LinearAllocBlock* blk,
1405                                         size_t size) {
1406   assert_locked();
1407   assert(size >= MinChunkSize, "too small");
1408 
1409   HeapWord* res = NULL;
1410   // This is the common case.  Keep it simple.
1411   if (blk->_word_size >= size + MinChunkSize) {
1412     assert(blk->_ptr != NULL, "consistency check");
1413     res = blk->_ptr;
1414     // Note that the BOT is up-to-date for the linAB before allocation.  It
1415     // indicates the start of the linAB.  The split_block() updates the
1416     // BOT for the linAB after the allocation (indicates the start of the
1417     // next chunk to be allocated).
1418     size_t blk_size = blk->_word_size;
1419     blk->_word_size -= size;
1420     blk->_ptr  += size;
1421     split_birth(size);
1422     repairLinearAllocBlock(blk);
1423     // Update BOT last so that other (parallel) GC threads see a consistent
1424     // view of the BOT and free blocks.
1425     // Above must occur before BOT is updated below.
1426     OrderAccess::storestore();
1427     _bt.split_block(res, blk_size, size);  // adjust block offset table
1428     _bt.allocated(res, size);
1429   }
1430   return res;
1431 }
1432 
1433 FreeChunk*
1434 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1435   assert_locked();
1436   assert(size < SmallForDictionary, "just checking");
1437   FreeChunk* res;
1438   res = _indexedFreeList[size].get_chunk_at_head();
1439   if (res == NULL) {
1440     res = getChunkFromIndexedFreeListHelper(size);
1441   }
1442   _bt.verify_not_unallocated((HeapWord*) res, size);
1443   assert(res == NULL || res->size() == size, "Incorrect block size");
1444   return res;
1445 }
1446 
1447 FreeChunk*
1448 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1449   bool replenish) {
1450   assert_locked();
1451   FreeChunk* fc = NULL;
1452   if (size < SmallForDictionary) {
1453     assert(_indexedFreeList[size].head() == NULL ||
1454       _indexedFreeList[size].surplus() <= 0,
1455       "List for this size should be empty or under populated");
1456     // Try best fit in exact lists before replenishing the list
1457     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1458       // Replenish list.
1459       //
1460       // Things tried that failed.
1461       //   Tried allocating out of the two LinAB's first before
1462       // replenishing lists.
1463       //   Tried small linAB of size 256 (size in indexed list)
1464       // and replenishing indexed lists from the small linAB.
1465       //
1466       FreeChunk* newFc = NULL;
1467       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1468       if (replenish_size < SmallForDictionary) {
1469         // Do not replenish from an underpopulated size.
1470         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1471             _indexedFreeList[replenish_size].head() != NULL) {
1472           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1473         } else if (bestFitFirst()) {
1474           newFc = bestFitSmall(replenish_size);
1475         }
1476       }
1477       if (newFc == NULL && replenish_size > size) {
1478         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1479         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1480       }
1481       // Note: The stats update re split-death of block obtained above
1482       // will be recorded below precisely when we know we are going to
1483       // be actually splitting it into more than one pieces below.
1484       if (newFc != NULL) {
1485         if  (replenish || CMSReplenishIntermediate) {
1486           // Replenish this list and return one block to caller.
1487           size_t i;
1488           FreeChunk *curFc, *nextFc;
1489           size_t num_blk = newFc->size() / size;
1490           assert(num_blk >= 1, "Smaller than requested?");
1491           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1492           if (num_blk > 1) {
1493             // we are sure we will be splitting the block just obtained
1494             // into multiple pieces; record the split-death of the original
1495             splitDeath(replenish_size);
1496           }
1497           // carve up and link blocks 0, ..., num_blk - 2
1498           // The last chunk is not added to the lists but is returned as the
1499           // free chunk.
1500           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1501                i = 0;
1502                i < (num_blk - 1);
1503                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1504                i++) {
1505             curFc->set_size(size);
1506             // Don't record this as a return in order to try and
1507             // determine the "returns" from a GC.
1508             _bt.verify_not_unallocated((HeapWord*) fc, size);
1509             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1510             _bt.mark_block((HeapWord*)curFc, size);
1511             split_birth(size);
1512             // Don't record the initial population of the indexed list
1513             // as a split birth.
1514           }
1515 
1516           // check that the arithmetic was OK above
1517           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1518             "inconsistency in carving newFc");
1519           curFc->set_size(size);
1520           _bt.mark_block((HeapWord*)curFc, size);
1521           split_birth(size);
1522           fc = curFc;
1523         } else {
1524           // Return entire block to caller
1525           fc = newFc;
1526         }
1527       }
1528     }
1529   } else {
1530     // Get a free chunk from the free chunk dictionary to be returned to
1531     // replenish the indexed free list.
1532     fc = getChunkFromDictionaryExact(size);
1533   }
1534   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1535   return fc;
1536 }
1537 
1538 FreeChunk*
1539 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1540   assert_locked();
1541   FreeChunk* fc = _dictionary->get_chunk(size,
1542                                          FreeBlockDictionary<FreeChunk>::atLeast);
1543   if (fc == NULL) {
1544     return NULL;
1545   }
1546   _bt.allocated((HeapWord*)fc, fc->size());
1547   if (fc->size() >= size + MinChunkSize) {
1548     fc = splitChunkAndReturnRemainder(fc, size);
1549   }
1550   assert(fc->size() >= size, "chunk too small");
1551   assert(fc->size() < size + MinChunkSize, "chunk too big");
1552   _bt.verify_single_block((HeapWord*)fc, fc->size());
1553   return fc;
1554 }
1555 
1556 FreeChunk*
1557 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1558   assert_locked();
1559   FreeChunk* fc = _dictionary->get_chunk(size,
1560                                          FreeBlockDictionary<FreeChunk>::atLeast);
1561   if (fc == NULL) {
1562     return fc;
1563   }
1564   _bt.allocated((HeapWord*)fc, fc->size());
1565   if (fc->size() == size) {
1566     _bt.verify_single_block((HeapWord*)fc, size);
1567     return fc;
1568   }
1569   assert(fc->size() > size, "get_chunk() guarantee");
1570   if (fc->size() < size + MinChunkSize) {
1571     // Return the chunk to the dictionary and go get a bigger one.
1572     returnChunkToDictionary(fc);
1573     fc = _dictionary->get_chunk(size + MinChunkSize,
1574                                 FreeBlockDictionary<FreeChunk>::atLeast);
1575     if (fc == NULL) {
1576       return NULL;
1577     }
1578     _bt.allocated((HeapWord*)fc, fc->size());
1579   }
1580   assert(fc->size() >= size + MinChunkSize, "tautology");
1581   fc = splitChunkAndReturnRemainder(fc, size);
1582   assert(fc->size() == size, "chunk is wrong size");
1583   _bt.verify_single_block((HeapWord*)fc, size);
1584   return fc;
1585 }
1586 
1587 void
1588 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1589   assert_locked();
1590 
1591   size_t size = chunk->size();
1592   _bt.verify_single_block((HeapWord*)chunk, size);
1593   // adjust _unallocated_block downward, as necessary
1594   _bt.freed((HeapWord*)chunk, size);
1595   _dictionary->return_chunk(chunk);
1596 #ifndef PRODUCT
1597   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1598     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1599     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1600     tl->verify_stats();
1601   }
1602 #endif // PRODUCT
1603 }
1604 
1605 void
1606 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1607   assert_locked();
1608   size_t size = fc->size();
1609   _bt.verify_single_block((HeapWord*) fc, size);
1610   _bt.verify_not_unallocated((HeapWord*) fc, size);
1611   _indexedFreeList[size].return_chunk_at_tail(fc);
1612 #ifndef PRODUCT
1613   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1614      _indexedFreeList[size].verify_stats();
1615   }
1616 #endif // PRODUCT
1617 }
1618 
1619 // Add chunk to end of last block -- if it's the largest
1620 // block -- and update BOT and census data. We would
1621 // of course have preferred to coalesce it with the
1622 // last block, but it's currently less expensive to find the
1623 // largest block than it is to find the last.
1624 void
1625 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1626   HeapWord* chunk, size_t     size) {
1627   // check that the chunk does lie in this space!
1628   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1629   // One of the parallel gc task threads may be here
1630   // whilst others are allocating.
1631   Mutex* lock = &_parDictionaryAllocLock;
1632   FreeChunk* ec;
1633   {
1634     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1635     ec = dictionary()->find_largest_dict();  // get largest block
1636     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1637       // It's a coterminal block - we can coalesce.
1638       size_t old_size = ec->size();
1639       coalDeath(old_size);
1640       removeChunkFromDictionary(ec);
1641       size += old_size;
1642     } else {
1643       ec = (FreeChunk*)chunk;
1644     }
1645   }
1646   ec->set_size(size);
1647   debug_only(ec->mangleFreed(size));
1648   if (size < SmallForDictionary) {
1649     lock = _indexedFreeListParLocks[size];
1650   }
1651   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1652   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1653   // record the birth under the lock since the recording involves
1654   // manipulation of the list on which the chunk lives and
1655   // if the chunk is allocated and is the last on the list,
1656   // the list can go away.
1657   coalBirth(size);
1658 }
1659 
1660 void
1661 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1662                                               size_t     size) {
1663   // check that the chunk does lie in this space!
1664   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1665   assert_locked();
1666   _bt.verify_single_block(chunk, size);
1667 
1668   FreeChunk* fc = (FreeChunk*) chunk;
1669   fc->set_size(size);
1670   debug_only(fc->mangleFreed(size));
1671   if (size < SmallForDictionary) {
1672     returnChunkToFreeList(fc);
1673   } else {
1674     returnChunkToDictionary(fc);
1675   }
1676 }
1677 
1678 void
1679 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1680   size_t size, bool coalesced) {
1681   assert_locked();
1682   assert(chunk != NULL, "null chunk");
1683   if (coalesced) {
1684     // repair BOT
1685     _bt.single_block(chunk, size);
1686   }
1687   addChunkToFreeLists(chunk, size);
1688 }
1689 
1690 // We _must_ find the purported chunk on our free lists;
1691 // we assert if we don't.
1692 void
1693 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1694   size_t size = fc->size();
1695   assert_locked();
1696   debug_only(verifyFreeLists());
1697   if (size < SmallForDictionary) {
1698     removeChunkFromIndexedFreeList(fc);
1699   } else {
1700     removeChunkFromDictionary(fc);
1701   }
1702   _bt.verify_single_block((HeapWord*)fc, size);
1703   debug_only(verifyFreeLists());
1704 }
1705 
1706 void
1707 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1708   size_t size = fc->size();
1709   assert_locked();
1710   assert(fc != NULL, "null chunk");
1711   _bt.verify_single_block((HeapWord*)fc, size);
1712   _dictionary->remove_chunk(fc);
1713   // adjust _unallocated_block upward, as necessary
1714   _bt.allocated((HeapWord*)fc, size);
1715 }
1716 
1717 void
1718 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1719   assert_locked();
1720   size_t size = fc->size();
1721   _bt.verify_single_block((HeapWord*)fc, size);
1722   NOT_PRODUCT(
1723     if (FLSVerifyIndexTable) {
1724       verifyIndexedFreeList(size);
1725     }
1726   )
1727   _indexedFreeList[size].remove_chunk(fc);
1728   NOT_PRODUCT(
1729     if (FLSVerifyIndexTable) {
1730       verifyIndexedFreeList(size);
1731     }
1732   )
1733 }
1734 
1735 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1736   /* A hint is the next larger size that has a surplus.
1737      Start search at a size large enough to guarantee that
1738      the excess is >= MIN_CHUNK. */
1739   size_t start = align_object_size(numWords + MinChunkSize);
1740   if (start < IndexSetSize) {
1741     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1742     size_t    hint = _indexedFreeList[start].hint();
1743     while (hint < IndexSetSize) {
1744       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1745       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1746       if (fl->surplus() > 0 && fl->head() != NULL) {
1747         // Found a list with surplus, reset original hint
1748         // and split out a free chunk which is returned.
1749         _indexedFreeList[start].set_hint(hint);
1750         FreeChunk* res = getFromListGreater(fl, numWords);
1751         assert(res == NULL || res->is_free(),
1752           "Should be returning a free chunk");
1753         return res;
1754       }
1755       hint = fl->hint(); /* keep looking */
1756     }
1757     /* None found. */
1758     it[start].set_hint(IndexSetSize);
1759   }
1760   return NULL;
1761 }
1762 
1763 /* Requires fl->size >= numWords + MinChunkSize */
1764 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1765   size_t numWords) {
1766   FreeChunk *curr = fl->head();
1767   size_t oldNumWords = curr->size();
1768   assert(numWords >= MinChunkSize, "Word size is too small");
1769   assert(curr != NULL, "List is empty");
1770   assert(oldNumWords >= numWords + MinChunkSize,
1771         "Size of chunks in the list is too small");
1772 
1773   fl->remove_chunk(curr);
1774   // recorded indirectly by splitChunkAndReturnRemainder -
1775   // smallSplit(oldNumWords, numWords);
1776   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1777   // Does anything have to be done for the remainder in terms of
1778   // fixing the card table?
1779   assert(new_chunk == NULL || new_chunk->is_free(),
1780     "Should be returning a free chunk");
1781   return new_chunk;
1782 }
1783 
1784 FreeChunk*
1785 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1786   size_t new_size) {
1787   assert_locked();
1788   size_t size = chunk->size();
1789   assert(size > new_size, "Split from a smaller block?");
1790   assert(is_aligned(chunk), "alignment problem");
1791   assert(size == adjustObjectSize(size), "alignment problem");
1792   size_t rem_sz = size - new_size;
1793   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1794   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1795   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1796   assert(is_aligned(ffc), "alignment problem");
1797   ffc->set_size(rem_sz);
1798   ffc->link_next(NULL);
1799   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1800   // Above must occur before BOT is updated below.
1801   // adjust block offset table
1802   OrderAccess::storestore();
1803   assert(chunk->is_free() && ffc->is_free(), "Error");
1804   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1805   if (rem_sz < SmallForDictionary) {
1806     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1807     bool is_par = Thread::current()->is_GC_task_thread();
1808     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1809     returnChunkToFreeList(ffc);
1810     split(size, rem_sz);
1811     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1812   } else {
1813     returnChunkToDictionary(ffc);
1814     split(size, rem_sz);
1815   }
1816   chunk->set_size(new_size);
1817   return chunk;
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::sweep_completed() {
1822   // Now that space is probably plentiful, refill linear
1823   // allocation blocks as needed.
1824   refillLinearAllocBlocksIfNeeded();
1825 }
1826 
1827 void
1828 CompactibleFreeListSpace::gc_prologue() {
1829   assert_locked();
1830   reportFreeListStatistics("Before GC:");
1831   refillLinearAllocBlocksIfNeeded();
1832 }
1833 
1834 void
1835 CompactibleFreeListSpace::gc_epilogue() {
1836   assert_locked();
1837   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1838   _promoInfo.stopTrackingPromotions();
1839   repairLinearAllocationBlocks();
1840   reportFreeListStatistics("After GC:");
1841 }
1842 
1843 // Iteration support, mostly delegated from a CMS generation
1844 
1845 void CompactibleFreeListSpace::save_marks() {
1846   assert(Thread::current()->is_VM_thread(),
1847          "Global variable should only be set when single-threaded");
1848   // Mark the "end" of the used space at the time of this call;
1849   // note, however, that promoted objects from this point
1850   // on are tracked in the _promoInfo below.
1851   set_saved_mark_word(unallocated_block());
1852 #ifdef ASSERT
1853   // Check the sanity of save_marks() etc.
1854   MemRegion ur    = used_region();
1855   MemRegion urasm = used_region_at_save_marks();
1856   assert(ur.contains(urasm),
1857          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1858          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1859          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1860 #endif
1861   // inform allocator that promotions should be tracked.
1862   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1863   _promoInfo.startTrackingPromotions();
1864 }
1865 
1866 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1867   assert(_promoInfo.tracking(), "No preceding save_marks?");
1868   return _promoInfo.noPromotions();
1869 }
1870 
1871 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1872                                                                             \
1873 void CompactibleFreeListSpace::                                             \
1874 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1875   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1876   /*                                                                        \
1877    * This also restores any displaced headers and removes the elements from \
1878    * the iteration set as they are processed, so that we have a clean slate \
1879    * at the end of the iteration. Note, thus, that if new objects are       \
1880    * promoted as a result of the iteration they are iterated over as well.  \
1881    */                                                                       \
1882   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1883 }
1884 
1885 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1886 
1887 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1888   return _smallLinearAllocBlock._word_size == 0;
1889 }
1890 
1891 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1892   // Fix up linear allocation blocks to look like free blocks
1893   repairLinearAllocBlock(&_smallLinearAllocBlock);
1894 }
1895 
1896 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1897   assert_locked();
1898   if (blk->_ptr != NULL) {
1899     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1900            "Minimum block size requirement");
1901     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1902     fc->set_size(blk->_word_size);
1903     fc->link_prev(NULL);   // mark as free
1904     fc->dontCoalesce();
1905     assert(fc->is_free(), "just marked it free");
1906     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1907   }
1908 }
1909 
1910 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1911   assert_locked();
1912   if (_smallLinearAllocBlock._ptr == NULL) {
1913     assert(_smallLinearAllocBlock._word_size == 0,
1914       "Size of linAB should be zero if the ptr is NULL");
1915     // Reset the linAB refill and allocation size limit.
1916     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1917   }
1918   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1919 }
1920 
1921 void
1922 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1923   assert_locked();
1924   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1925          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1926          "blk invariant");
1927   if (blk->_ptr == NULL) {
1928     refillLinearAllocBlock(blk);
1929   }
1930   if (PrintMiscellaneous && Verbose) {
1931     if (blk->_word_size == 0) {
1932       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1933     }
1934   }
1935 }
1936 
1937 void
1938 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1939   assert_locked();
1940   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1941          "linear allocation block should be empty");
1942   FreeChunk* fc;
1943   if (blk->_refillSize < SmallForDictionary &&
1944       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1945     // A linAB's strategy might be to use small sizes to reduce
1946     // fragmentation but still get the benefits of allocation from a
1947     // linAB.
1948   } else {
1949     fc = getChunkFromDictionary(blk->_refillSize);
1950   }
1951   if (fc != NULL) {
1952     blk->_ptr  = (HeapWord*)fc;
1953     blk->_word_size = fc->size();
1954     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1955   }
1956 }
1957 
1958 // Support for compaction
1959 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1960   scan_and_forward(this, cp);
1961   // Prepare_for_compaction() uses the space between live objects
1962   // so that later phase can skip dead space quickly.  So verification
1963   // of the free lists doesn't work after.
1964 }
1965 
1966 void CompactibleFreeListSpace::adjust_pointers() {
1967   // In other versions of adjust_pointers(), a bail out
1968   // based on the amount of live data in the generation
1969   // (i.e., if 0, bail out) may be used.
1970   // Cannot test used() == 0 here because the free lists have already
1971   // been mangled by the compaction.
1972 
1973   scan_and_adjust_pointers(this);
1974   // See note about verification in prepare_for_compaction().
1975 }
1976 
1977 void CompactibleFreeListSpace::compact() {
1978   scan_and_compact(this);
1979 }
1980 
1981 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1982 // where fbs is free block sizes
1983 double CompactibleFreeListSpace::flsFrag() const {
1984   size_t itabFree = totalSizeInIndexedFreeLists();
1985   double frag = 0.0;
1986   size_t i;
1987 
1988   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1989     double sz  = i;
1990     frag      += _indexedFreeList[i].count() * (sz * sz);
1991   }
1992 
1993   double totFree = itabFree +
1994                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1995   if (totFree > 0) {
1996     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1997             (totFree * totFree));
1998     frag = (double)1.0  - frag;
1999   } else {
2000     assert(frag == 0.0, "Follows from totFree == 0");
2001   }
2002   return frag;
2003 }
2004 
2005 void CompactibleFreeListSpace::beginSweepFLCensus(
2006   float inter_sweep_current,
2007   float inter_sweep_estimate,
2008   float intra_sweep_estimate) {
2009   assert_locked();
2010   size_t i;
2011   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2012     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2013     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2014     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2015     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2016     fl->set_before_sweep(fl->count());
2017     fl->set_bfr_surp(fl->surplus());
2018   }
2019   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2020                                     inter_sweep_current,
2021                                     inter_sweep_estimate,
2022                                     intra_sweep_estimate);
2023 }
2024 
2025 void CompactibleFreeListSpace::setFLSurplus() {
2026   assert_locked();
2027   size_t i;
2028   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2029     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2030     fl->set_surplus(fl->count() -
2031                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2032   }
2033 }
2034 
2035 void CompactibleFreeListSpace::setFLHints() {
2036   assert_locked();
2037   size_t i;
2038   size_t h = IndexSetSize;
2039   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2040     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2041     fl->set_hint(h);
2042     if (fl->surplus() > 0) {
2043       h = i;
2044     }
2045   }
2046 }
2047 
2048 void CompactibleFreeListSpace::clearFLCensus() {
2049   assert_locked();
2050   size_t i;
2051   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2052     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2053     fl->set_prev_sweep(fl->count());
2054     fl->set_coal_births(0);
2055     fl->set_coal_deaths(0);
2056     fl->set_split_births(0);
2057     fl->set_split_deaths(0);
2058   }
2059 }
2060 
2061 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2062   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2063   setFLSurplus();
2064   setFLHints();
2065   printFLCensus(sweep_count);
2066   clearFLCensus();
2067   assert_locked();
2068   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2069 }
2070 
2071 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2072   if (size < SmallForDictionary) {
2073     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2074     return (fl->coal_desired() < 0) ||
2075            ((int)fl->count() > fl->coal_desired());
2076   } else {
2077     return dictionary()->coal_dict_over_populated(size);
2078   }
2079 }
2080 
2081 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2082   assert(size < SmallForDictionary, "Size too large for indexed list");
2083   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2084   fl->increment_coal_births();
2085   fl->increment_surplus();
2086 }
2087 
2088 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2089   assert(size < SmallForDictionary, "Size too large for indexed list");
2090   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2091   fl->increment_coal_deaths();
2092   fl->decrement_surplus();
2093 }
2094 
2095 void CompactibleFreeListSpace::coalBirth(size_t size) {
2096   if (size  < SmallForDictionary) {
2097     smallCoalBirth(size);
2098   } else {
2099     dictionary()->dict_census_update(size,
2100                                    false /* split */,
2101                                    true /* birth */);
2102   }
2103 }
2104 
2105 void CompactibleFreeListSpace::coalDeath(size_t size) {
2106   if(size  < SmallForDictionary) {
2107     smallCoalDeath(size);
2108   } else {
2109     dictionary()->dict_census_update(size,
2110                                    false /* split */,
2111                                    false /* birth */);
2112   }
2113 }
2114 
2115 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2116   assert(size < SmallForDictionary, "Size too large for indexed list");
2117   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2118   fl->increment_split_births();
2119   fl->increment_surplus();
2120 }
2121 
2122 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2123   assert(size < SmallForDictionary, "Size too large for indexed list");
2124   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2125   fl->increment_split_deaths();
2126   fl->decrement_surplus();
2127 }
2128 
2129 void CompactibleFreeListSpace::split_birth(size_t size) {
2130   if (size  < SmallForDictionary) {
2131     smallSplitBirth(size);
2132   } else {
2133     dictionary()->dict_census_update(size,
2134                                    true /* split */,
2135                                    true /* birth */);
2136   }
2137 }
2138 
2139 void CompactibleFreeListSpace::splitDeath(size_t size) {
2140   if (size  < SmallForDictionary) {
2141     smallSplitDeath(size);
2142   } else {
2143     dictionary()->dict_census_update(size,
2144                                    true /* split */,
2145                                    false /* birth */);
2146   }
2147 }
2148 
2149 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2150   size_t to2 = from - to1;
2151   splitDeath(from);
2152   split_birth(to1);
2153   split_birth(to2);
2154 }
2155 
2156 void CompactibleFreeListSpace::print() const {
2157   print_on(tty);
2158 }
2159 
2160 void CompactibleFreeListSpace::prepare_for_verify() {
2161   assert_locked();
2162   repairLinearAllocationBlocks();
2163   // Verify that the SpoolBlocks look like free blocks of
2164   // appropriate sizes... To be done ...
2165 }
2166 
2167 class VerifyAllBlksClosure: public BlkClosure {
2168  private:
2169   const CompactibleFreeListSpace* _sp;
2170   const MemRegion                 _span;
2171   HeapWord*                       _last_addr;
2172   size_t                          _last_size;
2173   bool                            _last_was_obj;
2174   bool                            _last_was_live;
2175 
2176  public:
2177   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2178     MemRegion span) :  _sp(sp), _span(span),
2179                        _last_addr(NULL), _last_size(0),
2180                        _last_was_obj(false), _last_was_live(false) { }
2181 
2182   virtual size_t do_blk(HeapWord* addr) {
2183     size_t res;
2184     bool   was_obj  = false;
2185     bool   was_live = false;
2186     if (_sp->block_is_obj(addr)) {
2187       was_obj = true;
2188       oop p = oop(addr);
2189       guarantee(p->is_oop(), "Should be an oop");
2190       res = _sp->adjustObjectSize(p->size());
2191       if (_sp->obj_is_alive(addr)) {
2192         was_live = true;
2193         p->verify();
2194       }
2195     } else {
2196       FreeChunk* fc = (FreeChunk*)addr;
2197       res = fc->size();
2198       if (FLSVerifyLists && !fc->cantCoalesce()) {
2199         guarantee(_sp->verify_chunk_in_free_list(fc),
2200                   "Chunk should be on a free list");
2201       }
2202     }
2203     if (res == 0) {
2204       LogHandle(gc, verify) log;
2205       log.info("Livelock: no rank reduction!");
2206       log.info(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2207                " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2208         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2209         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2210       ResourceMark rm;
2211       _sp->print_on(log.info_stream());
2212       guarantee(false, "Verification failed.");
2213     }
2214     _last_addr = addr;
2215     _last_size = res;
2216     _last_was_obj  = was_obj;
2217     _last_was_live = was_live;
2218     return res;
2219   }
2220 };
2221 
2222 class VerifyAllOopsClosure: public OopClosure {
2223  private:
2224   const CMSCollector*             _collector;
2225   const CompactibleFreeListSpace* _sp;
2226   const MemRegion                 _span;
2227   const bool                      _past_remark;
2228   const CMSBitMap*                _bit_map;
2229 
2230  protected:
2231   void do_oop(void* p, oop obj) {
2232     if (_span.contains(obj)) { // the interior oop points into CMS heap
2233       if (!_span.contains(p)) { // reference from outside CMS heap
2234         // Should be a valid object; the first disjunct below allows
2235         // us to sidestep an assertion in block_is_obj() that insists
2236         // that p be in _sp. Note that several generations (and spaces)
2237         // are spanned by _span (CMS heap) above.
2238         guarantee(!_sp->is_in_reserved(obj) ||
2239                   _sp->block_is_obj((HeapWord*)obj),
2240                   "Should be an object");
2241         guarantee(obj->is_oop(), "Should be an oop");
2242         obj->verify();
2243         if (_past_remark) {
2244           // Remark has been completed, the object should be marked
2245           _bit_map->isMarked((HeapWord*)obj);
2246         }
2247       } else { // reference within CMS heap
2248         if (_past_remark) {
2249           // Remark has been completed -- so the referent should have
2250           // been marked, if referring object is.
2251           if (_bit_map->isMarked(_collector->block_start(p))) {
2252             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2253           }
2254         }
2255       }
2256     } else if (_sp->is_in_reserved(p)) {
2257       // the reference is from FLS, and points out of FLS
2258       guarantee(obj->is_oop(), "Should be an oop");
2259       obj->verify();
2260     }
2261   }
2262 
2263   template <class T> void do_oop_work(T* p) {
2264     T heap_oop = oopDesc::load_heap_oop(p);
2265     if (!oopDesc::is_null(heap_oop)) {
2266       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2267       do_oop(p, obj);
2268     }
2269   }
2270 
2271  public:
2272   VerifyAllOopsClosure(const CMSCollector* collector,
2273     const CompactibleFreeListSpace* sp, MemRegion span,
2274     bool past_remark, CMSBitMap* bit_map) :
2275     _collector(collector), _sp(sp), _span(span),
2276     _past_remark(past_remark), _bit_map(bit_map) { }
2277 
2278   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2279   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2280 };
2281 
2282 void CompactibleFreeListSpace::verify() const {
2283   assert_lock_strong(&_freelistLock);
2284   verify_objects_initialized();
2285   MemRegion span = _collector->_span;
2286   bool past_remark = (_collector->abstract_state() ==
2287                       CMSCollector::Sweeping);
2288 
2289   ResourceMark rm;
2290   HandleMark  hm;
2291 
2292   // Check integrity of CFL data structures
2293   _promoInfo.verify();
2294   _dictionary->verify();
2295   if (FLSVerifyIndexTable) {
2296     verifyIndexedFreeLists();
2297   }
2298   // Check integrity of all objects and free blocks in space
2299   {
2300     VerifyAllBlksClosure cl(this, span);
2301     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2302   }
2303   // Check that all references in the heap to FLS
2304   // are to valid objects in FLS or that references in
2305   // FLS are to valid objects elsewhere in the heap
2306   if (FLSVerifyAllHeapReferences)
2307   {
2308     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2309       _collector->markBitMap());
2310 
2311     // Iterate over all oops in the heap. Uses the _no_header version
2312     // since we are not interested in following the klass pointers.
2313     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2314   }
2315 
2316   if (VerifyObjectStartArray) {
2317     // Verify the block offset table
2318     _bt.verify();
2319   }
2320 }
2321 
2322 #ifndef PRODUCT
2323 void CompactibleFreeListSpace::verifyFreeLists() const {
2324   if (FLSVerifyLists) {
2325     _dictionary->verify();
2326     verifyIndexedFreeLists();
2327   } else {
2328     if (FLSVerifyDictionary) {
2329       _dictionary->verify();
2330     }
2331     if (FLSVerifyIndexTable) {
2332       verifyIndexedFreeLists();
2333     }
2334   }
2335 }
2336 #endif
2337 
2338 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2339   size_t i = 0;
2340   for (; i < IndexSetStart; i++) {
2341     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2342   }
2343   for (; i < IndexSetSize; i++) {
2344     verifyIndexedFreeList(i);
2345   }
2346 }
2347 
2348 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2349   FreeChunk* fc   =  _indexedFreeList[size].head();
2350   FreeChunk* tail =  _indexedFreeList[size].tail();
2351   size_t    num = _indexedFreeList[size].count();
2352   size_t      n = 0;
2353   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2354             "Slot should have been empty");
2355   for (; fc != NULL; fc = fc->next(), n++) {
2356     guarantee(fc->size() == size, "Size inconsistency");
2357     guarantee(fc->is_free(), "!free?");
2358     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2359     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2360   }
2361   guarantee(n == num, "Incorrect count");
2362 }
2363 
2364 #ifndef PRODUCT
2365 void CompactibleFreeListSpace::check_free_list_consistency() const {
2366   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2367     "Some sizes can't be allocated without recourse to"
2368     " linear allocation buffers");
2369   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2370     "else MIN_TREE_CHUNK_SIZE is wrong");
2371   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2372   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2373 }
2374 #endif
2375 
2376 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2377   assert_lock_strong(&_freelistLock);
2378   LogHandle(gc, freelist, census) log;
2379   if (!log.is_debug()) {
2380     return;
2381   }
2382   AdaptiveFreeList<FreeChunk> total;
2383   log.debug("end sweep# " SIZE_FORMAT, sweep_count);
2384   ResourceMark rm;
2385   outputStream* out = log.debug_stream();
2386   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2387   size_t total_free = 0;
2388   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2389     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2390     total_free += fl->count() * fl->size();
2391     if (i % (40*IndexSetStride) == 0) {
2392       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2393     }
2394     fl->print_on(out);
2395     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2396     total.set_surplus(    total.surplus()     + fl->surplus()    );
2397     total.set_desired(    total.desired()     + fl->desired()    );
2398     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2399     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2400     total.set_count(      total.count()       + fl->count()      );
2401     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2402     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2403     total.set_split_births(total.split_births() + fl->split_births());
2404     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2405   }
2406   total.print_on(out, "TOTAL");
2407   log.debug("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2408   log.debug("growth: %8.5f  deficit: %8.5f",
2409             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2410                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2411             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2412   _dictionary->print_dict_census(out);
2413 }
2414 
2415 ///////////////////////////////////////////////////////////////////////////
2416 // CFLS_LAB
2417 ///////////////////////////////////////////////////////////////////////////
2418 
2419 #define VECTOR_257(x)                                                                                  \
2420   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2421   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2422      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2423      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2424      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2425      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2426      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2427      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2428      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2429      x }
2430 
2431 // Initialize with default setting for CMS, _not_
2432 // generic OldPLABSize, whose static default is different; if overridden at the
2433 // command-line, this will get reinitialized via a call to
2434 // modify_initialization() below.
2435 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2436   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CFLS_LAB::_default_dynamic_old_plab_size));
2437 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2438 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2439 
2440 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2441   _cfls(cfls)
2442 {
2443   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2444   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2445        i < CompactibleFreeListSpace::IndexSetSize;
2446        i += CompactibleFreeListSpace::IndexSetStride) {
2447     _indexedFreeList[i].set_size(i);
2448     _num_blocks[i] = 0;
2449   }
2450 }
2451 
2452 static bool _CFLS_LAB_modified = false;
2453 
2454 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2455   assert(!_CFLS_LAB_modified, "Call only once");
2456   _CFLS_LAB_modified = true;
2457   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2458        i < CompactibleFreeListSpace::IndexSetSize;
2459        i += CompactibleFreeListSpace::IndexSetStride) {
2460     _blocks_to_claim[i].modify(n, wt, true /* force */);
2461   }
2462 }
2463 
2464 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2465   FreeChunk* res;
2466   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2467   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2468     // This locking manages sync with other large object allocations.
2469     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2470                     Mutex::_no_safepoint_check_flag);
2471     res = _cfls->getChunkFromDictionaryExact(word_sz);
2472     if (res == NULL) return NULL;
2473   } else {
2474     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2475     if (fl->count() == 0) {
2476       // Attempt to refill this local free list.
2477       get_from_global_pool(word_sz, fl);
2478       // If it didn't work, give up.
2479       if (fl->count() == 0) return NULL;
2480     }
2481     res = fl->get_chunk_at_head();
2482     assert(res != NULL, "Why was count non-zero?");
2483   }
2484   res->markNotFree();
2485   assert(!res->is_free(), "shouldn't be marked free");
2486   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2487   // mangle a just allocated object with a distinct pattern.
2488   debug_only(res->mangleAllocated(word_sz));
2489   return (HeapWord*)res;
2490 }
2491 
2492 // Get a chunk of blocks of the right size and update related
2493 // book-keeping stats
2494 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2495   // Get the #blocks we want to claim
2496   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2497   assert(n_blks > 0, "Error");
2498   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2499   // In some cases, when the application has a phase change,
2500   // there may be a sudden and sharp shift in the object survival
2501   // profile, and updating the counts at the end of a scavenge
2502   // may not be quick enough, giving rise to large scavenge pauses
2503   // during these phase changes. It is beneficial to detect such
2504   // changes on-the-fly during a scavenge and avoid such a phase-change
2505   // pothole. The following code is a heuristic attempt to do that.
2506   // It is protected by a product flag until we have gained
2507   // enough experience with this heuristic and fine-tuned its behavior.
2508   // WARNING: This might increase fragmentation if we overreact to
2509   // small spikes, so some kind of historical smoothing based on
2510   // previous experience with the greater reactivity might be useful.
2511   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2512   // default.
2513   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2514     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2515     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2516     n_blks = MIN2(n_blks, CMSOldPLABMax);
2517   }
2518   assert(n_blks > 0, "Error");
2519   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2520   // Update stats table entry for this block size
2521   _num_blocks[word_sz] += fl->count();
2522 }
2523 
2524 void CFLS_LAB::compute_desired_plab_size() {
2525   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2526        i < CompactibleFreeListSpace::IndexSetSize;
2527        i += CompactibleFreeListSpace::IndexSetStride) {
2528     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2529            "Counter inconsistency");
2530     if (_global_num_workers[i] > 0) {
2531       // Need to smooth wrt historical average
2532       if (ResizeOldPLAB) {
2533         _blocks_to_claim[i].sample(
2534           MAX2(CMSOldPLABMin,
2535           MIN2(CMSOldPLABMax,
2536                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2537       }
2538       // Reset counters for next round
2539       _global_num_workers[i] = 0;
2540       _global_num_blocks[i] = 0;
2541       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2542     }
2543   }
2544 }
2545 
2546 // If this is changed in the future to allow parallel
2547 // access, one would need to take the FL locks and,
2548 // depending on how it is used, stagger access from
2549 // parallel threads to reduce contention.
2550 void CFLS_LAB::retire(int tid) {
2551   // We run this single threaded with the world stopped;
2552   // so no need for locks and such.
2553   NOT_PRODUCT(Thread* t = Thread::current();)
2554   assert(Thread::current()->is_VM_thread(), "Error");
2555   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2556        i < CompactibleFreeListSpace::IndexSetSize;
2557        i += CompactibleFreeListSpace::IndexSetStride) {
2558     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2559            "Can't retire more than what we obtained");
2560     if (_num_blocks[i] > 0) {
2561       size_t num_retire =  _indexedFreeList[i].count();
2562       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2563       {
2564         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2565         //                Mutex::_no_safepoint_check_flag);
2566 
2567         // Update globals stats for num_blocks used
2568         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2569         _global_num_workers[i]++;
2570         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2571         if (num_retire > 0) {
2572           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2573           // Reset this list.
2574           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2575           _indexedFreeList[i].set_size(i);
2576         }
2577       }
2578       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2579                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2580       // Reset stats for next round
2581       _num_blocks[i]         = 0;
2582     }
2583   }
2584 }
2585 
2586 // Used by par_get_chunk_of_blocks() for the chunks from the
2587 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2588 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2589 // to the free list "fl".  "n" is the maximum number of chunks to
2590 // be added to "fl".
2591 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2592 
2593   // We'll try all multiples of word_sz in the indexed set, starting with
2594   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2595   // then try getting a big chunk and splitting it.
2596   {
2597     bool found;
2598     int  k;
2599     size_t cur_sz;
2600     for (k = 1, cur_sz = k * word_sz, found = false;
2601          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2602          (CMSSplitIndexedFreeListBlocks || k <= 1);
2603          k++, cur_sz = k * word_sz) {
2604       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2605       fl_for_cur_sz.set_size(cur_sz);
2606       {
2607         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2608                         Mutex::_no_safepoint_check_flag);
2609         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2610         if (gfl->count() != 0) {
2611           // nn is the number of chunks of size cur_sz that
2612           // we'd need to split k-ways each, in order to create
2613           // "n" chunks of size word_sz each.
2614           const size_t nn = MAX2(n/k, (size_t)1);
2615           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2616           found = true;
2617           if (k > 1) {
2618             // Update split death stats for the cur_sz-size blocks list:
2619             // we increment the split death count by the number of blocks
2620             // we just took from the cur_sz-size blocks list and which
2621             // we will be splitting below.
2622             ssize_t deaths = gfl->split_deaths() +
2623                              fl_for_cur_sz.count();
2624             gfl->set_split_deaths(deaths);
2625           }
2626         }
2627       }
2628       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2629       if (found) {
2630         if (k == 1) {
2631           fl->prepend(&fl_for_cur_sz);
2632         } else {
2633           // Divide each block on fl_for_cur_sz up k ways.
2634           FreeChunk* fc;
2635           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2636             // Must do this in reverse order, so that anybody attempting to
2637             // access the main chunk sees it as a single free block until we
2638             // change it.
2639             size_t fc_size = fc->size();
2640             assert(fc->is_free(), "Error");
2641             for (int i = k-1; i >= 0; i--) {
2642               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2643               assert((i != 0) ||
2644                         ((fc == ffc) && ffc->is_free() &&
2645                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2646                         "Counting error");
2647               ffc->set_size(word_sz);
2648               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2649               ffc->link_next(NULL);
2650               // Above must occur before BOT is updated below.
2651               OrderAccess::storestore();
2652               // splitting from the right, fc_size == i * word_sz
2653               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2654               fc_size -= word_sz;
2655               assert(fc_size == i*word_sz, "Error");
2656               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2657               _bt.verify_single_block((HeapWord*)fc, fc_size);
2658               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2659               // Push this on "fl".
2660               fl->return_chunk_at_head(ffc);
2661             }
2662             // TRAP
2663             assert(fl->tail()->next() == NULL, "List invariant.");
2664           }
2665         }
2666         // Update birth stats for this block size.
2667         size_t num = fl->count();
2668         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2669                         Mutex::_no_safepoint_check_flag);
2670         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2671         _indexedFreeList[word_sz].set_split_births(births);
2672         return true;
2673       }
2674     }
2675     return found;
2676   }
2677 }
2678 
2679 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2680 
2681   FreeChunk* fc = NULL;
2682   FreeChunk* rem_fc = NULL;
2683   size_t rem;
2684   {
2685     MutexLockerEx x(parDictionaryAllocLock(),
2686                     Mutex::_no_safepoint_check_flag);
2687     while (n > 0) {
2688       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2689                                   FreeBlockDictionary<FreeChunk>::atLeast);
2690       if (fc != NULL) {
2691         break;
2692       } else {
2693         n--;
2694       }
2695     }
2696     if (fc == NULL) return NULL;
2697     // Otherwise, split up that block.
2698     assert((ssize_t)n >= 1, "Control point invariant");
2699     assert(fc->is_free(), "Error: should be a free block");
2700     _bt.verify_single_block((HeapWord*)fc, fc->size());
2701     const size_t nn = fc->size() / word_sz;
2702     n = MIN2(nn, n);
2703     assert((ssize_t)n >= 1, "Control point invariant");
2704     rem = fc->size() - n * word_sz;
2705     // If there is a remainder, and it's too small, allocate one fewer.
2706     if (rem > 0 && rem < MinChunkSize) {
2707       n--; rem += word_sz;
2708     }
2709     // Note that at this point we may have n == 0.
2710     assert((ssize_t)n >= 0, "Control point invariant");
2711 
2712     // If n is 0, the chunk fc that was found is not large
2713     // enough to leave a viable remainder.  We are unable to
2714     // allocate even one block.  Return fc to the
2715     // dictionary and return, leaving "fl" empty.
2716     if (n == 0) {
2717       returnChunkToDictionary(fc);
2718       return NULL;
2719     }
2720 
2721     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2722     dictionary()->dict_census_update(fc->size(),
2723                                      true /*split*/,
2724                                      false /*birth*/);
2725 
2726     // First return the remainder, if any.
2727     // Note that we hold the lock until we decide if we're going to give
2728     // back the remainder to the dictionary, since a concurrent allocation
2729     // may otherwise see the heap as empty.  (We're willing to take that
2730     // hit if the block is a small block.)
2731     if (rem > 0) {
2732       size_t prefix_size = n * word_sz;
2733       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2734       rem_fc->set_size(rem);
2735       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2736       rem_fc->link_next(NULL);
2737       // Above must occur before BOT is updated below.
2738       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2739       OrderAccess::storestore();
2740       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2741       assert(fc->is_free(), "Error");
2742       fc->set_size(prefix_size);
2743       if (rem >= IndexSetSize) {
2744         returnChunkToDictionary(rem_fc);
2745         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2746         rem_fc = NULL;
2747       }
2748       // Otherwise, return it to the small list below.
2749     }
2750   }
2751   if (rem_fc != NULL) {
2752     MutexLockerEx x(_indexedFreeListParLocks[rem],
2753                     Mutex::_no_safepoint_check_flag);
2754     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2755     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2756     smallSplitBirth(rem);
2757   }
2758   assert(n * word_sz == fc->size(),
2759          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2760          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2761          fc->size(), n, word_sz);
2762   return fc;
2763 }
2764 
2765 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2766 
2767   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2768 
2769   if (fc == NULL) {
2770     return;
2771   }
2772 
2773   size_t n = fc->size() / word_sz;
2774 
2775   assert((ssize_t)n > 0, "Consistency");
2776   // Now do the splitting up.
2777   // Must do this in reverse order, so that anybody attempting to
2778   // access the main chunk sees it as a single free block until we
2779   // change it.
2780   size_t fc_size = n * word_sz;
2781   // All but first chunk in this loop
2782   for (ssize_t i = n-1; i > 0; i--) {
2783     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2784     ffc->set_size(word_sz);
2785     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2786     ffc->link_next(NULL);
2787     // Above must occur before BOT is updated below.
2788     OrderAccess::storestore();
2789     // splitting from the right, fc_size == (n - i + 1) * wordsize
2790     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2791     fc_size -= word_sz;
2792     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2793     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2794     _bt.verify_single_block((HeapWord*)fc, fc_size);
2795     // Push this on "fl".
2796     fl->return_chunk_at_head(ffc);
2797   }
2798   // First chunk
2799   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2800   // The blocks above should show their new sizes before the first block below
2801   fc->set_size(word_sz);
2802   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2803   fc->link_next(NULL);
2804   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2805   _bt.verify_single_block((HeapWord*)fc, fc->size());
2806   fl->return_chunk_at_head(fc);
2807 
2808   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2809   {
2810     // Update the stats for this block size.
2811     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2812                     Mutex::_no_safepoint_check_flag);
2813     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2814     _indexedFreeList[word_sz].set_split_births(births);
2815     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2816     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2817   }
2818 
2819   // TRAP
2820   assert(fl->tail()->next() == NULL, "List invariant.");
2821 }
2822 
2823 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2824   assert(fl->count() == 0, "Precondition.");
2825   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2826          "Precondition");
2827 
2828   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2829     // Got it
2830     return;
2831   }
2832 
2833   // Otherwise, we'll split a block from the dictionary.
2834   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2835 }
2836 
2837 // Set up the space's par_seq_tasks structure for work claiming
2838 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2839 // XXX Need to suitably abstract and generalize this and the next
2840 // method into one.
2841 void
2842 CompactibleFreeListSpace::
2843 initialize_sequential_subtasks_for_rescan(int n_threads) {
2844   // The "size" of each task is fixed according to rescan_task_size.
2845   assert(n_threads > 0, "Unexpected n_threads argument");
2846   const size_t task_size = rescan_task_size();
2847   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2848   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2849   assert(n_tasks == 0 ||
2850          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2851           (used_region().start() + n_tasks*task_size >= used_region().end())),
2852          "n_tasks calculation incorrect");
2853   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2854   assert(!pst->valid(), "Clobbering existing data?");
2855   // Sets the condition for completion of the subtask (how many threads
2856   // need to finish in order to be done).
2857   pst->set_n_threads(n_threads);
2858   pst->set_n_tasks((int)n_tasks);
2859 }
2860 
2861 // Set up the space's par_seq_tasks structure for work claiming
2862 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2863 void
2864 CompactibleFreeListSpace::
2865 initialize_sequential_subtasks_for_marking(int n_threads,
2866                                            HeapWord* low) {
2867   // The "size" of each task is fixed according to rescan_task_size.
2868   assert(n_threads > 0, "Unexpected n_threads argument");
2869   const size_t task_size = marking_task_size();
2870   assert(task_size > CardTableModRefBS::card_size_in_words &&
2871          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2872          "Otherwise arithmetic below would be incorrect");
2873   MemRegion span = _old_gen->reserved();
2874   if (low != NULL) {
2875     if (span.contains(low)) {
2876       // Align low down to  a card boundary so that
2877       // we can use block_offset_careful() on span boundaries.
2878       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2879                                  CardTableModRefBS::card_size);
2880       // Clip span prefix at aligned_low
2881       span = span.intersection(MemRegion(aligned_low, span.end()));
2882     } else if (low > span.end()) {
2883       span = MemRegion(low, low);  // Null region
2884     } // else use entire span
2885   }
2886   assert(span.is_empty() ||
2887          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2888         "span should start at a card boundary");
2889   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2890   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2891   assert(n_tasks == 0 ||
2892          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2893           (span.start() + n_tasks*task_size >= span.end())),
2894          "n_tasks calculation incorrect");
2895   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2896   assert(!pst->valid(), "Clobbering existing data?");
2897   // Sets the condition for completion of the subtask (how many threads
2898   // need to finish in order to be done).
2899   pst->set_n_threads(n_threads);
2900   pst->set_n_tasks((int)n_tasks);
2901 }