1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "utilities/align.hpp"
  48 #include "utilities/copy.hpp"
  49 
  50 /////////////////////////////////////////////////////////////////////////
  51 //// CompactibleFreeListSpace
  52 /////////////////////////////////////////////////////////////////////////
  53 
  54 // highest ranked  free list lock rank
  55 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  56 
  57 // Defaults are 0 so things will break badly if incorrectly initialized.
  58 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  59 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  60 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
  61 
  62 size_t MinChunkSize = 0;
  63 
  64 void CompactibleFreeListSpace::set_cms_values() {
  65   // Set CMS global values
  66   assert(MinChunkSize == 0, "already set");
  67 
  68   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  69   // for chunks to contain a FreeChunk.
  70   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  71   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
  72 
  73   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  74   IndexSetStart  = MinChunkSize;
  75   IndexSetStride = MinObjAlignment;
  76 }
  77 
  78 // Constructor
  79 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  87                 Monitor::_safepoint_check_sometimes),
  88   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  89                           "CompactibleFreeListSpace._dict_par_lock", true,
  90                           Monitor::_safepoint_check_never),
  91   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
  92                     CMSRescanMultiple),
  93   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
  94                     CMSConcMarkMultiple),
  95   _collector(NULL),
  96   _preconsumptionDirtyCardClosure(NULL)
  97 {
  98   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  99          "FreeChunk is larger than expected");
 100   _bt.set_space(this);
 101   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 102 
 103   _dictionary = new AFLBinaryTreeDictionary(mr);
 104 
 105   assert(_dictionary != NULL, "CMS dictionary initialization");
 106   // The indexed free lists are initially all empty and are lazily
 107   // filled in on demand. Initialize the array elements to NULL.
 108   initializeIndexedFreeListArray();
 109 
 110   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 111                              SmallForLinearAlloc);
 112 
 113   // CMSIndexedFreeListReplenish should be at least 1
 114   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 115   _promoInfo.setSpace(this);
 116   if (UseCMSBestFit) {
 117     _fitStrategy = FreeBlockBestFitFirst;
 118   } else {
 119     _fitStrategy = FreeBlockStrategyNone;
 120   }
 121   check_free_list_consistency();
 122 
 123   // Initialize locks for parallel case.
 124   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 125     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 126                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 127     DEBUG_ONLY(
 128       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 129     )
 130   }
 131   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 132 }
 133 
 134 // Like CompactibleSpace forward() but always calls cross_threshold() to
 135 // update the block offset table.  Removed initialize_threshold call because
 136 // CFLS does not use a block offset array for contiguous spaces.
 137 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 138                                     CompactPoint* cp, HeapWord* compact_top) {
 139   // q is alive
 140   // First check if we should switch compaction space
 141   assert(this == cp->space, "'this' should be current compaction space.");
 142   size_t compaction_max_size = pointer_delta(end(), compact_top);
 143   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 144     "virtual adjustObjectSize_v() method is not correct");
 145   size_t adjusted_size = adjustObjectSize(size);
 146   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 147          "no small fragments allowed");
 148   assert(minimum_free_block_size() == MinChunkSize,
 149          "for de-virtualized reference below");
 150   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 151   if (adjusted_size + MinChunkSize > compaction_max_size &&
 152       adjusted_size != compaction_max_size) {
 153     do {
 154       // switch to next compaction space
 155       cp->space->set_compaction_top(compact_top);
 156       cp->space = cp->space->next_compaction_space();
 157       if (cp->space == NULL) {
 158         cp->gen = CMSHeap::heap()->young_gen();
 159         assert(cp->gen != NULL, "compaction must succeed");
 160         cp->space = cp->gen->first_compaction_space();
 161         assert(cp->space != NULL, "generation must have a first compaction space");
 162       }
 163       compact_top = cp->space->bottom();
 164       cp->space->set_compaction_top(compact_top);
 165       // The correct adjusted_size may not be the same as that for this method
 166       // (i.e., cp->space may no longer be "this" so adjust the size again.
 167       // Use the virtual method which is not used above to save the virtual
 168       // dispatch.
 169       adjusted_size = cp->space->adjust_object_size_v(size);
 170       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 171       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 172     } while (adjusted_size > compaction_max_size);
 173   }
 174 
 175   // store the forwarding pointer into the mark word
 176   if ((HeapWord*)q != compact_top) {
 177     q->forward_to(oop(compact_top));
 178     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 179   } else {
 180     // if the object isn't moving we can just set the mark to the default
 181     // mark and handle it specially later on.
 182     q->init_mark();
 183     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 184   }
 185 
 186   compact_top += adjusted_size;
 187 
 188   // we need to update the offset table so that the beginnings of objects can be
 189   // found during scavenge.  Note that we are updating the offset table based on
 190   // where the object will be once the compaction phase finishes.
 191 
 192   // Always call cross_threshold().  A contiguous space can only call it when
 193   // the compaction_top exceeds the current threshold but not for an
 194   // non-contiguous space.
 195   cp->threshold =
 196     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 197   return compact_top;
 198 }
 199 
 200 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 201 // and use of single_block instead of alloc_block.  The name here is not really
 202 // appropriate - maybe a more general name could be invented for both the
 203 // contiguous and noncontiguous spaces.
 204 
 205 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 206   _bt.single_block(start, the_end);
 207   return end();
 208 }
 209 
 210 // Initialize them to NULL.
 211 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 212   for (size_t i = 0; i < IndexSetSize; i++) {
 213     // Note that on platforms where objects are double word aligned,
 214     // the odd array elements are not used.  It is convenient, however,
 215     // to map directly from the object size to the array element.
 216     _indexedFreeList[i].reset(IndexSetSize);
 217     _indexedFreeList[i].set_size(i);
 218     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 219     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 220     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 221     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 222   }
 223 }
 224 
 225 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 226   return adjustObjectSize(oop(addr)->size());
 227 }
 228 
 229 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 230   for (size_t i = 1; i < IndexSetSize; i++) {
 231     assert(_indexedFreeList[i].size() == (size_t) i,
 232       "Indexed free list sizes are incorrect");
 233     _indexedFreeList[i].reset(IndexSetSize);
 234     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 235     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 236     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 237     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 238   }
 239 }
 240 
 241 void CompactibleFreeListSpace::reset(MemRegion mr) {
 242   resetIndexedFreeListArray();
 243   dictionary()->reset();
 244   if (BlockOffsetArrayUseUnallocatedBlock) {
 245     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 246     // Everything's allocated until proven otherwise.
 247     _bt.set_unallocated_block(end());
 248   }
 249   if (!mr.is_empty()) {
 250     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 251     _bt.single_block(mr.start(), mr.word_size());
 252     FreeChunk* fc = (FreeChunk*) mr.start();
 253     fc->set_size(mr.word_size());
 254     if (mr.word_size() >= IndexSetSize ) {
 255       returnChunkToDictionary(fc);
 256     } else {
 257       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 258       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 259     }
 260     coalBirth(mr.word_size());
 261   }
 262   _promoInfo.reset();
 263   _smallLinearAllocBlock._ptr = NULL;
 264   _smallLinearAllocBlock._word_size = 0;
 265 }
 266 
 267 void CompactibleFreeListSpace::reset_after_compaction() {
 268   // Reset the space to the new reality - one free chunk.
 269   MemRegion mr(compaction_top(), end());
 270   reset(mr);
 271   // Now refill the linear allocation block(s) if possible.
 272   refillLinearAllocBlocksIfNeeded();
 273 }
 274 
 275 // Walks the entire dictionary, returning a coterminal
 276 // chunk, if it exists. Use with caution since it involves
 277 // a potentially complete walk of a potentially large tree.
 278 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 279 
 280   assert_lock_strong(&_freelistLock);
 281 
 282   return dictionary()->find_chunk_ends_at(end());
 283 }
 284 
 285 
 286 #ifndef PRODUCT
 287 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 288   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 289     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 290   }
 291 }
 292 
 293 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 294   size_t sum = 0;
 295   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 296     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 297   }
 298   return sum;
 299 }
 300 
 301 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 302   size_t count = 0;
 303   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 304     debug_only(
 305       ssize_t total_list_count = 0;
 306       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 307          fc = fc->next()) {
 308         total_list_count++;
 309       }
 310       assert(total_list_count ==  _indexedFreeList[i].count(),
 311         "Count in list is incorrect");
 312     )
 313     count += _indexedFreeList[i].count();
 314   }
 315   return count;
 316 }
 317 
 318 size_t CompactibleFreeListSpace::totalCount() {
 319   size_t num = totalCountInIndexedFreeLists();
 320   num +=  dictionary()->total_count();
 321   if (_smallLinearAllocBlock._word_size != 0) {
 322     num++;
 323   }
 324   return num;
 325 }
 326 #endif
 327 
 328 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 329   FreeChunk* fc = (FreeChunk*) p;
 330   return fc->is_free();
 331 }
 332 
 333 size_t CompactibleFreeListSpace::used() const {
 334   return capacity() - free();
 335 }
 336 
 337 size_t CompactibleFreeListSpace::free() const {
 338   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 339   // if you do this while the structures are in flux you
 340   // may get an approximate answer only; for instance
 341   // because there is concurrent allocation either
 342   // directly by mutators or for promotion during a GC.
 343   // It's "MT-safe", however, in the sense that you are guaranteed
 344   // not to crash and burn, for instance, because of walking
 345   // pointers that could disappear as you were walking them.
 346   // The approximation is because the various components
 347   // that are read below are not read atomically (and
 348   // further the computation of totalSizeInIndexedFreeLists()
 349   // is itself a non-atomic computation. The normal use of
 350   // this is during a resize operation at the end of GC
 351   // and at that time you are guaranteed to get the
 352   // correct actual value. However, for instance, this is
 353   // also read completely asynchronously by the "perf-sampler"
 354   // that supports jvmstat, and you are apt to see the values
 355   // flicker in such cases.
 356   assert(_dictionary != NULL, "No _dictionary?");
 357   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 358           totalSizeInIndexedFreeLists() +
 359           _smallLinearAllocBlock._word_size) * HeapWordSize;
 360 }
 361 
 362 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 363   assert(_dictionary != NULL, "No _dictionary?");
 364   assert_locked();
 365   size_t res = _dictionary->max_chunk_size();
 366   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 367                        (size_t) SmallForLinearAlloc - 1));
 368   // XXX the following could potentially be pretty slow;
 369   // should one, pessimistically for the rare cases when res
 370   // calculated above is less than IndexSetSize,
 371   // just return res calculated above? My reasoning was that
 372   // those cases will be so rare that the extra time spent doesn't
 373   // really matter....
 374   // Note: do not change the loop test i >= res + IndexSetStride
 375   // to i > res below, because i is unsigned and res may be zero.
 376   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 377        i -= IndexSetStride) {
 378     if (_indexedFreeList[i].head() != NULL) {
 379       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 380       return i;
 381     }
 382   }
 383   return res;
 384 }
 385 
 386 void LinearAllocBlock::print_on(outputStream* st) const {
 387   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 388             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 389             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 390 }
 391 
 392 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 393   st->print_cr("COMPACTIBLE FREELIST SPACE");
 394   st->print_cr(" Space:");
 395   Space::print_on(st);
 396 
 397   st->print_cr("promoInfo:");
 398   _promoInfo.print_on(st);
 399 
 400   st->print_cr("_smallLinearAllocBlock");
 401   _smallLinearAllocBlock.print_on(st);
 402 
 403   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 404 
 405   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 406 }
 407 
 408 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 409 const {
 410   reportIndexedFreeListStatistics(st);
 411   st->print_cr("Layout of Indexed Freelists");
 412   st->print_cr("---------------------------");
 413   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 414   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 415     _indexedFreeList[i].print_on(st);
 416     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 417       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 418                    p2i(fc), p2i((HeapWord*)fc + i),
 419                    fc->cantCoalesce() ? "\t CC" : "");
 420     }
 421   }
 422 }
 423 
 424 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 425 const {
 426   _promoInfo.print_on(st);
 427 }
 428 
 429 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 430 const {
 431   _dictionary->report_statistics(st);
 432   st->print_cr("Layout of Freelists in Tree");
 433   st->print_cr("---------------------------");
 434   _dictionary->print_free_lists(st);
 435 }
 436 
 437 class BlkPrintingClosure: public BlkClosure {
 438   const CMSCollector*             _collector;
 439   const CompactibleFreeListSpace* _sp;
 440   const CMSBitMap*                _live_bit_map;
 441   const bool                      _post_remark;
 442   outputStream*                   _st;
 443 public:
 444   BlkPrintingClosure(const CMSCollector* collector,
 445                      const CompactibleFreeListSpace* sp,
 446                      const CMSBitMap* live_bit_map,
 447                      outputStream* st):
 448     _collector(collector),
 449     _sp(sp),
 450     _live_bit_map(live_bit_map),
 451     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 452     _st(st) { }
 453   size_t do_blk(HeapWord* addr);
 454 };
 455 
 456 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 457   size_t sz = _sp->block_size_no_stall(addr, _collector);
 458   assert(sz != 0, "Should always be able to compute a size");
 459   if (_sp->block_is_obj(addr)) {
 460     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 461     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 462       p2i(addr),
 463       dead ? "dead" : "live",
 464       sz,
 465       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 466     if (CMSPrintObjectsInDump && !dead) {
 467       oop(addr)->print_on(_st);
 468       _st->print_cr("--------------------------------------");
 469     }
 470   } else { // free block
 471     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 472       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 473     if (CMSPrintChunksInDump) {
 474       ((FreeChunk*)addr)->print_on(_st);
 475       _st->print_cr("--------------------------------------");
 476     }
 477   }
 478   return sz;
 479 }
 480 
 481 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 482   st->print_cr("=========================");
 483   st->print_cr("Block layout in CMS Heap:");
 484   st->print_cr("=========================");
 485   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 486   blk_iterate(&bpcl);
 487 
 488   st->print_cr("=======================================");
 489   st->print_cr("Order & Layout of Promotion Info Blocks");
 490   st->print_cr("=======================================");
 491   print_promo_info_blocks(st);
 492 
 493   st->print_cr("===========================");
 494   st->print_cr("Order of Indexed Free Lists");
 495   st->print_cr("=========================");
 496   print_indexed_free_lists(st);
 497 
 498   st->print_cr("=================================");
 499   st->print_cr("Order of Free Lists in Dictionary");
 500   st->print_cr("=================================");
 501   print_dictionary_free_lists(st);
 502 }
 503 
 504 
 505 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 506   assert_lock_strong(&_freelistLock);
 507   Log(gc, freelist, stats) log;
 508   if (!log.is_debug()) {
 509     return;
 510   }
 511   log.debug("%s", title);
 512 
 513   LogStream out(log.debug());
 514   _dictionary->report_statistics(&out);
 515 
 516   if (log.is_trace()) {
 517     LogStream trace_out(log.trace());
 518     reportIndexedFreeListStatistics(&trace_out);
 519     size_t total_size = totalSizeInIndexedFreeLists() +
 520                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 521     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 522   }
 523 }
 524 
 525 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 526   assert_lock_strong(&_freelistLock);
 527   st->print_cr("Statistics for IndexedFreeLists:");
 528   st->print_cr("--------------------------------");
 529   size_t total_size = totalSizeInIndexedFreeLists();
 530   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 531   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 532   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 533   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 534   if (free_blocks != 0) {
 535     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 536   }
 537 }
 538 
 539 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 540   size_t res = 0;
 541   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 542     debug_only(
 543       ssize_t recount = 0;
 544       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 545          fc = fc->next()) {
 546         recount += 1;
 547       }
 548       assert(recount == _indexedFreeList[i].count(),
 549         "Incorrect count in list");
 550     )
 551     res += _indexedFreeList[i].count();
 552   }
 553   return res;
 554 }
 555 
 556 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 557   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 558     if (_indexedFreeList[i].head() != NULL) {
 559       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 560       return (size_t)i;
 561     }
 562   }
 563   return 0;
 564 }
 565 
 566 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 567   HeapWord* prevEnd = end();
 568   assert(prevEnd != value, "unnecessary set_end call");
 569   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 570         "New end is below unallocated block");
 571   _end = value;
 572   if (prevEnd != NULL) {
 573     // Resize the underlying block offset table.
 574     _bt.resize(pointer_delta(value, bottom()));
 575     if (value <= prevEnd) {
 576       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 577              "New end is below unallocated block");
 578     } else {
 579       // Now, take this new chunk and add it to the free blocks.
 580       // Note that the BOT has not yet been updated for this block.
 581       size_t newFcSize = pointer_delta(value, prevEnd);
 582       // Add the block to the free lists, if possible coalescing it
 583       // with the last free block, and update the BOT and census data.
 584       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 585     }
 586   }
 587 }
 588 
 589 class FreeListSpaceDCTOC : public FilteringDCTOC {
 590   CompactibleFreeListSpace* _cfls;
 591   CMSCollector* _collector;
 592   bool _parallel;
 593 protected:
 594   // Override.
 595 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 596   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 597                                        HeapWord* bottom, HeapWord* top, \
 598                                        ClosureType* cl);                \
 599       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 600                                        HeapWord* bottom, HeapWord* top, \
 601                                        ClosureType* cl);                \
 602     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 603                                        HeapWord* bottom, HeapWord* top, \
 604                                        ClosureType* cl)
 605   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 606   walk_mem_region_with_cl_DECL(FilteringClosure);
 607 
 608 public:
 609   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 610                      CMSCollector* collector,
 611                      ExtendedOopClosure* cl,
 612                      CardTable::PrecisionStyle precision,
 613                      HeapWord* boundary,
 614                      bool parallel) :
 615     FilteringDCTOC(sp, cl, precision, boundary),
 616     _cfls(sp), _collector(collector), _parallel(parallel) {}
 617 };
 618 
 619 // We de-virtualize the block-related calls below, since we know that our
 620 // space is a CompactibleFreeListSpace.
 621 
 622 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 623 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 624                                                  HeapWord* bottom,              \
 625                                                  HeapWord* top,                 \
 626                                                  ClosureType* cl) {             \
 627    if (_parallel) {                                                             \
 628      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 629    } else {                                                                     \
 630      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 631    }                                                                            \
 632 }                                                                               \
 633 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 634                                                      HeapWord* bottom,          \
 635                                                      HeapWord* top,             \
 636                                                      ClosureType* cl) {         \
 637   /* Skip parts that are before "mr", in case "block_start" sent us             \
 638      back too far. */                                                           \
 639   HeapWord* mr_start = mr.start();                                              \
 640   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 641   HeapWord* next = bottom + bot_size;                                           \
 642   while (next < mr_start) {                                                     \
 643     bottom = next;                                                              \
 644     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 645     next = bottom + bot_size;                                                   \
 646   }                                                                             \
 647                                                                                 \
 648   while (bottom < top) {                                                        \
 649     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 650         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 651                     oop(bottom)) &&                                             \
 652         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 653       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 654       bottom += _cfls->adjustObjectSize(word_sz);                               \
 655     } else {                                                                    \
 656       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 657     }                                                                           \
 658   }                                                                             \
 659 }                                                                               \
 660 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 661                                                        HeapWord* bottom,        \
 662                                                        HeapWord* top,           \
 663                                                        ClosureType* cl) {       \
 664   /* Skip parts that are before "mr", in case "block_start" sent us             \
 665      back too far. */                                                           \
 666   HeapWord* mr_start = mr.start();                                              \
 667   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 668   HeapWord* next = bottom + bot_size;                                           \
 669   while (next < mr_start) {                                                     \
 670     bottom = next;                                                              \
 671     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 672     next = bottom + bot_size;                                                   \
 673   }                                                                             \
 674                                                                                 \
 675   while (bottom < top) {                                                        \
 676     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 677         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 678                     oop(bottom)) &&                                             \
 679         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 680       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 681       bottom += _cfls->adjustObjectSize(word_sz);                               \
 682     } else {                                                                    \
 683       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 684     }                                                                           \
 685   }                                                                             \
 686 }
 687 
 688 // (There are only two of these, rather than N, because the split is due
 689 // only to the introduction of the FilteringClosure, a local part of the
 690 // impl of this abstraction.)
 691 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 692 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 693 
 694 DirtyCardToOopClosure*
 695 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 696                                       CardTable::PrecisionStyle precision,
 697                                       HeapWord* boundary,
 698                                       bool parallel) {
 699   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 700 }
 701 
 702 
 703 // Note on locking for the space iteration functions:
 704 // since the collector's iteration activities are concurrent with
 705 // allocation activities by mutators, absent a suitable mutual exclusion
 706 // mechanism the iterators may go awry. For instance a block being iterated
 707 // may suddenly be allocated or divided up and part of it allocated and
 708 // so on.
 709 
 710 // Apply the given closure to each block in the space.
 711 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 712   assert_lock_strong(freelistLock());
 713   HeapWord *cur, *limit;
 714   for (cur = bottom(), limit = end(); cur < limit;
 715        cur += cl->do_blk_careful(cur));
 716 }
 717 
 718 // Apply the given closure to each block in the space.
 719 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 720   assert_lock_strong(freelistLock());
 721   HeapWord *cur, *limit;
 722   for (cur = bottom(), limit = end(); cur < limit;
 723        cur += cl->do_blk(cur));
 724 }
 725 
 726 // Apply the given closure to each oop in the space.
 727 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 728   assert_lock_strong(freelistLock());
 729   HeapWord *cur, *limit;
 730   size_t curSize;
 731   for (cur = bottom(), limit = end(); cur < limit;
 732        cur += curSize) {
 733     curSize = block_size(cur);
 734     if (block_is_obj(cur)) {
 735       oop(cur)->oop_iterate(cl);
 736     }
 737   }
 738 }
 739 
 740 // NOTE: In the following methods, in order to safely be able to
 741 // apply the closure to an object, we need to be sure that the
 742 // object has been initialized. We are guaranteed that an object
 743 // is initialized if we are holding the Heap_lock with the
 744 // world stopped.
 745 void CompactibleFreeListSpace::verify_objects_initialized() const {
 746   if (is_init_completed()) {
 747     assert_locked_or_safepoint(Heap_lock);
 748     if (Universe::is_fully_initialized()) {
 749       guarantee(SafepointSynchronize::is_at_safepoint(),
 750                 "Required for objects to be initialized");
 751     }
 752   } // else make a concession at vm start-up
 753 }
 754 
 755 // Apply the given closure to each object in the space
 756 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 757   assert_lock_strong(freelistLock());
 758   NOT_PRODUCT(verify_objects_initialized());
 759   HeapWord *cur, *limit;
 760   size_t curSize;
 761   for (cur = bottom(), limit = end(); cur < limit;
 762        cur += curSize) {
 763     curSize = block_size(cur);
 764     if (block_is_obj(cur)) {
 765       blk->do_object(oop(cur));
 766     }
 767   }
 768 }
 769 
 770 // Apply the given closure to each live object in the space
 771 //   The usage of CompactibleFreeListSpace
 772 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 773 // objects in the space with references to objects that are no longer
 774 // valid.  For example, an object may reference another object
 775 // that has already been sweep up (collected).  This method uses
 776 // obj_is_alive() to determine whether it is safe to apply the closure to
 777 // an object.  See obj_is_alive() for details on how liveness of an
 778 // object is decided.
 779 
 780 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 781   assert_lock_strong(freelistLock());
 782   NOT_PRODUCT(verify_objects_initialized());
 783   HeapWord *cur, *limit;
 784   size_t curSize;
 785   for (cur = bottom(), limit = end(); cur < limit;
 786        cur += curSize) {
 787     curSize = block_size(cur);
 788     if (block_is_obj(cur) && obj_is_alive(cur)) {
 789       blk->do_object(oop(cur));
 790     }
 791   }
 792 }
 793 
 794 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 795                                                   UpwardsObjectClosure* cl) {
 796   assert_locked(freelistLock());
 797   NOT_PRODUCT(verify_objects_initialized());
 798   assert(!mr.is_empty(), "Should be non-empty");
 799   // We use MemRegion(bottom(), end()) rather than used_region() below
 800   // because the two are not necessarily equal for some kinds of
 801   // spaces, in particular, certain kinds of free list spaces.
 802   // We could use the more complicated but more precise:
 803   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
 804   // but the slight imprecision seems acceptable in the assertion check.
 805   assert(MemRegion(bottom(), end()).contains(mr),
 806          "Should be within used space");
 807   HeapWord* prev = cl->previous();   // max address from last time
 808   if (prev >= mr.end()) { // nothing to do
 809     return;
 810   }
 811   // This assert will not work when we go from cms space to perm
 812   // space, and use same closure. Easy fix deferred for later. XXX YSR
 813   // assert(prev == NULL || contains(prev), "Should be within space");
 814 
 815   bool last_was_obj_array = false;
 816   HeapWord *blk_start_addr, *region_start_addr;
 817   if (prev > mr.start()) {
 818     region_start_addr = prev;
 819     blk_start_addr    = prev;
 820     // The previous invocation may have pushed "prev" beyond the
 821     // last allocated block yet there may be still be blocks
 822     // in this region due to a particular coalescing policy.
 823     // Relax the assertion so that the case where the unallocated
 824     // block is maintained and "prev" is beyond the unallocated
 825     // block does not cause the assertion to fire.
 826     assert((BlockOffsetArrayUseUnallocatedBlock &&
 827             (!is_in(prev))) ||
 828            (blk_start_addr == block_start(region_start_addr)), "invariant");
 829   } else {
 830     region_start_addr = mr.start();
 831     blk_start_addr    = block_start(region_start_addr);
 832   }
 833   HeapWord* region_end_addr = mr.end();
 834   MemRegion derived_mr(region_start_addr, region_end_addr);
 835   while (blk_start_addr < region_end_addr) {
 836     const size_t size = block_size(blk_start_addr);
 837     if (block_is_obj(blk_start_addr)) {
 838       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 839     } else {
 840       last_was_obj_array = false;
 841     }
 842     blk_start_addr += size;
 843   }
 844   if (!last_was_obj_array) {
 845     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 846            "Should be within (closed) used space");
 847     assert(blk_start_addr > prev, "Invariant");
 848     cl->set_previous(blk_start_addr); // min address for next time
 849   }
 850 }
 851 
 852 // Callers of this iterator beware: The closure application should
 853 // be robust in the face of uninitialized objects and should (always)
 854 // return a correct size so that the next addr + size below gives us a
 855 // valid block boundary. [See for instance,
 856 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 857 // in ConcurrentMarkSweepGeneration.cpp.]
 858 HeapWord*
 859 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 860   ObjectClosureCareful* cl) {
 861   assert_lock_strong(freelistLock());
 862   // Can't use used_region() below because it may not necessarily
 863   // be the same as [bottom(),end()); although we could
 864   // use [used_region().start(),align_up(used_region().end(),CardSize)),
 865   // that appears too cumbersome, so we just do the simpler check
 866   // in the assertion below.
 867   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 868          "mr should be non-empty and within used space");
 869   HeapWord *addr, *end;
 870   size_t size;
 871   for (addr = block_start_careful(mr.start()), end  = mr.end();
 872        addr < end; addr += size) {
 873     FreeChunk* fc = (FreeChunk*)addr;
 874     if (fc->is_free()) {
 875       // Since we hold the free list lock, which protects direct
 876       // allocation in this generation by mutators, a free object
 877       // will remain free throughout this iteration code.
 878       size = fc->size();
 879     } else {
 880       // Note that the object need not necessarily be initialized,
 881       // because (for instance) the free list lock does NOT protect
 882       // object initialization. The closure application below must
 883       // therefore be correct in the face of uninitialized objects.
 884       size = cl->do_object_careful_m(oop(addr), mr);
 885       if (size == 0) {
 886         // An unparsable object found. Signal early termination.
 887         return addr;
 888       }
 889     }
 890   }
 891   return NULL;
 892 }
 893 
 894 
 895 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 896   NOT_PRODUCT(verify_objects_initialized());
 897   return _bt.block_start(p);
 898 }
 899 
 900 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 901   return _bt.block_start_careful(p);
 902 }
 903 
 904 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 905   NOT_PRODUCT(verify_objects_initialized());
 906   // This must be volatile, or else there is a danger that the compiler
 907   // will compile the code below into a sometimes-infinite loop, by keeping
 908   // the value read the first time in a register.
 909   while (true) {
 910     // We must do this until we get a consistent view of the object.
 911     if (FreeChunk::indicatesFreeChunk(p)) {
 912       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 913       size_t res = fc->size();
 914 
 915       // Bugfix for systems with weak memory model (PPC64/IA64). The
 916       // block's free bit was set and we have read the size of the
 917       // block. Acquire and check the free bit again. If the block is
 918       // still free, the read size is correct.
 919       OrderAccess::acquire();
 920 
 921       // If the object is still a free chunk, return the size, else it
 922       // has been allocated so try again.
 923       if (FreeChunk::indicatesFreeChunk(p)) {
 924         assert(res != 0, "Block size should not be 0");
 925         return res;
 926       }
 927     } else {
 928       // Ensure klass read before size.
 929       Klass* k = oop(p)->klass_or_null_acquire();
 930       if (k != NULL) {
 931         assert(k->is_klass(), "Should really be klass oop.");
 932         oop o = (oop)p;
 933         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
 934 
 935         size_t res = o->size_given_klass(k);
 936         res = adjustObjectSize(res);
 937         assert(res != 0, "Block size should not be 0");
 938         return res;
 939       }
 940     }
 941   }
 942 }
 943 
 944 // TODO: Now that is_parsable is gone, we should combine these two functions.
 945 // A variant of the above that uses the Printezis bits for
 946 // unparsable but allocated objects. This avoids any possible
 947 // stalls waiting for mutators to initialize objects, and is
 948 // thus potentially faster than the variant above. However,
 949 // this variant may return a zero size for a block that is
 950 // under mutation and for which a consistent size cannot be
 951 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 952 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 953                                                      const CMSCollector* c)
 954 const {
 955   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 956   // This must be volatile, or else there is a danger that the compiler
 957   // will compile the code below into a sometimes-infinite loop, by keeping
 958   // the value read the first time in a register.
 959   DEBUG_ONLY(uint loops = 0;)
 960   while (true) {
 961     // We must do this until we get a consistent view of the object.
 962     if (FreeChunk::indicatesFreeChunk(p)) {
 963       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 964       size_t res = fc->size();
 965 
 966       // Bugfix for systems with weak memory model (PPC64/IA64). The
 967       // free bit of the block was set and we have read the size of
 968       // the block. Acquire and check the free bit again. If the
 969       // block is still free, the read size is correct.
 970       OrderAccess::acquire();
 971 
 972       if (FreeChunk::indicatesFreeChunk(p)) {
 973         assert(res != 0, "Block size should not be 0");
 974         assert(loops == 0, "Should be 0");
 975         return res;
 976       }
 977     } else {
 978       // Ensure klass read before size.
 979       Klass* k = oop(p)->klass_or_null_acquire();
 980       if (k != NULL) {
 981         assert(k->is_klass(), "Should really be klass oop.");
 982         oop o = (oop)p;
 983         assert(oopDesc::is_oop(o), "Should be an oop");
 984 
 985         size_t res = o->size_given_klass(k);
 986         res = adjustObjectSize(res);
 987         assert(res != 0, "Block size should not be 0");
 988         return res;
 989       } else {
 990         // May return 0 if P-bits not present.
 991         return c->block_size_if_printezis_bits(p);
 992       }
 993     }
 994     assert(loops == 0, "Can loop at most once");
 995     DEBUG_ONLY(loops++;)
 996   }
 997 }
 998 
 999 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1000   NOT_PRODUCT(verify_objects_initialized());
1001   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1002   FreeChunk* fc = (FreeChunk*)p;
1003   if (fc->is_free()) {
1004     return fc->size();
1005   } else {
1006     // Ignore mark word because this may be a recently promoted
1007     // object whose mark word is used to chain together grey
1008     // objects (the last one would have a null value).
1009     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1010     return adjustObjectSize(oop(p)->size());
1011   }
1012 }
1013 
1014 // This implementation assumes that the property of "being an object" is
1015 // stable.  But being a free chunk may not be (because of parallel
1016 // promotion.)
1017 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1018   FreeChunk* fc = (FreeChunk*)p;
1019   assert(is_in_reserved(p), "Should be in space");
1020   if (FreeChunk::indicatesFreeChunk(p)) return false;
1021   Klass* k = oop(p)->klass_or_null_acquire();
1022   if (k != NULL) {
1023     // Ignore mark word because it may have been used to
1024     // chain together promoted objects (the last one
1025     // would have a null value).
1026     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1027     return true;
1028   } else {
1029     return false;  // Was not an object at the start of collection.
1030   }
1031 }
1032 
1033 // Check if the object is alive. This fact is checked either by consulting
1034 // the main marking bitmap in the sweeping phase or, if it's a permanent
1035 // generation and we're not in the sweeping phase, by checking the
1036 // perm_gen_verify_bit_map where we store the "deadness" information if
1037 // we did not sweep the perm gen in the most recent previous GC cycle.
1038 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1039   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1040          "Else races are possible");
1041   assert(block_is_obj(p), "The address should point to an object");
1042 
1043   // If we're sweeping, we use object liveness information from the main bit map
1044   // for both perm gen and old gen.
1045   // We don't need to lock the bitmap (live_map or dead_map below), because
1046   // EITHER we are in the middle of the sweeping phase, and the
1047   // main marking bit map (live_map below) is locked,
1048   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1049   // is stable, because it's mutated only in the sweeping phase.
1050   // NOTE: This method is also used by jmap where, if class unloading is
1051   // off, the results can return "false" for legitimate perm objects,
1052   // when we are not in the midst of a sweeping phase, which can result
1053   // in jmap not reporting certain perm gen objects. This will be moot
1054   // if/when the perm gen goes away in the future.
1055   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1056     CMSBitMap* live_map = _collector->markBitMap();
1057     return live_map->par_isMarked((HeapWord*) p);
1058   }
1059   return true;
1060 }
1061 
1062 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1063   FreeChunk* fc = (FreeChunk*)p;
1064   assert(is_in_reserved(p), "Should be in space");
1065   assert(_bt.block_start(p) == p, "Should be a block boundary");
1066   if (!fc->is_free()) {
1067     // Ignore mark word because it may have been used to
1068     // chain together promoted objects (the last one
1069     // would have a null value).
1070     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1071     return true;
1072   }
1073   return false;
1074 }
1075 
1076 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1077 // approximate answer if you don't hold the freelistlock when you call this.
1078 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1079   size_t size = 0;
1080   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1081     debug_only(
1082       // We may be calling here without the lock in which case we
1083       // won't do this modest sanity check.
1084       if (freelistLock()->owned_by_self()) {
1085         size_t total_list_size = 0;
1086         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1087           fc = fc->next()) {
1088           total_list_size += i;
1089         }
1090         assert(total_list_size == i * _indexedFreeList[i].count(),
1091                "Count in list is incorrect");
1092       }
1093     )
1094     size += i * _indexedFreeList[i].count();
1095   }
1096   return size;
1097 }
1098 
1099 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1100   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1101   return allocate(size);
1102 }
1103 
1104 HeapWord*
1105 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1106   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1107 }
1108 
1109 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1110   assert_lock_strong(freelistLock());
1111   HeapWord* res = NULL;
1112   assert(size == adjustObjectSize(size),
1113          "use adjustObjectSize() before calling into allocate()");
1114 
1115   res = allocate_adaptive_freelists(size);
1116 
1117   if (res != NULL) {
1118     // check that res does lie in this space!
1119     assert(is_in_reserved(res), "Not in this space!");
1120     assert(is_aligned((void*)res), "alignment check");
1121 
1122     FreeChunk* fc = (FreeChunk*)res;
1123     fc->markNotFree();
1124     assert(!fc->is_free(), "shouldn't be marked free");
1125     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1126     // Verify that the block offset table shows this to
1127     // be a single block, but not one which is unallocated.
1128     _bt.verify_single_block(res, size);
1129     _bt.verify_not_unallocated(res, size);
1130     // mangle a just allocated object with a distinct pattern.
1131     debug_only(fc->mangleAllocated(size));
1132   }
1133 
1134   return res;
1135 }
1136 
1137 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1138   assert_lock_strong(freelistLock());
1139   HeapWord* res = NULL;
1140   assert(size == adjustObjectSize(size),
1141          "use adjustObjectSize() before calling into allocate()");
1142 
1143   // Strategy
1144   //   if small
1145   //     exact size from small object indexed list if small
1146   //     small or large linear allocation block (linAB) as appropriate
1147   //     take from lists of greater sized chunks
1148   //   else
1149   //     dictionary
1150   //     small or large linear allocation block if it has the space
1151   // Try allocating exact size from indexTable first
1152   if (size < IndexSetSize) {
1153     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1154     if(res != NULL) {
1155       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1156         "Not removed from free list");
1157       // no block offset table adjustment is necessary on blocks in
1158       // the indexed lists.
1159 
1160     // Try allocating from the small LinAB
1161     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1162         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1163         // if successful, the above also adjusts block offset table
1164         // Note that this call will refill the LinAB to
1165         // satisfy the request.  This is different that
1166         // evm.
1167         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1168     } else {
1169       // Raid the exact free lists larger than size, even if they are not
1170       // overpopulated.
1171       res = (HeapWord*) getChunkFromGreater(size);
1172     }
1173   } else {
1174     // Big objects get allocated directly from the dictionary.
1175     res = (HeapWord*) getChunkFromDictionaryExact(size);
1176     if (res == NULL) {
1177       // Try hard not to fail since an allocation failure will likely
1178       // trigger a synchronous GC.  Try to get the space from the
1179       // allocation blocks.
1180       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1181     }
1182   }
1183 
1184   return res;
1185 }
1186 
1187 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1188 // when promoting obj.
1189 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1190   // Depending on the object size, expansion may require refilling either a
1191   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1192   // is added because the dictionary may over-allocate to avoid fragmentation.
1193   size_t space = obj_size;
1194   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1195   return space;
1196 }
1197 
1198 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1199   FreeChunk* ret;
1200 
1201   assert(numWords >= MinChunkSize, "Size is less than minimum");
1202   assert(linearAllocationWouldFail() || bestFitFirst(),
1203     "Should not be here");
1204 
1205   size_t i;
1206   size_t currSize = numWords + MinChunkSize;
1207   assert(is_object_aligned(currSize), "currSize should be aligned");
1208   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1209     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1210     if (fl->head()) {
1211       ret = getFromListGreater(fl, numWords);
1212       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1213       return ret;
1214     }
1215   }
1216 
1217   currSize = MAX2((size_t)SmallForDictionary,
1218                   (size_t)(numWords + MinChunkSize));
1219 
1220   /* Try to get a chunk that satisfies request, while avoiding
1221      fragmentation that can't be handled. */
1222   {
1223     ret =  dictionary()->get_chunk(currSize);
1224     if (ret != NULL) {
1225       assert(ret->size() - numWords >= MinChunkSize,
1226              "Chunk is too small");
1227       _bt.allocated((HeapWord*)ret, ret->size());
1228       /* Carve returned chunk. */
1229       (void) splitChunkAndReturnRemainder(ret, numWords);
1230       /* Label this as no longer a free chunk. */
1231       assert(ret->is_free(), "This chunk should be free");
1232       ret->link_prev(NULL);
1233     }
1234     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1235     return ret;
1236   }
1237   ShouldNotReachHere();
1238 }
1239 
1240 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1241   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1242   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1243 }
1244 
1245 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1246   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1247          (_smallLinearAllocBlock._word_size == fc->size()),
1248          "Linear allocation block shows incorrect size");
1249   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1250           (_smallLinearAllocBlock._word_size == fc->size()));
1251 }
1252 
1253 // Check if the purported free chunk is present either as a linear
1254 // allocation block, the size-indexed table of (smaller) free blocks,
1255 // or the larger free blocks kept in the binary tree dictionary.
1256 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1257   if (verify_chunk_is_linear_alloc_block(fc)) {
1258     return true;
1259   } else if (fc->size() < IndexSetSize) {
1260     return verifyChunkInIndexedFreeLists(fc);
1261   } else {
1262     return dictionary()->verify_chunk_in_free_list(fc);
1263   }
1264 }
1265 
1266 #ifndef PRODUCT
1267 void CompactibleFreeListSpace::assert_locked() const {
1268   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1269 }
1270 
1271 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1272   CMSLockVerifier::assert_locked(lock);
1273 }
1274 #endif
1275 
1276 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1277   // In the parallel case, the main thread holds the free list lock
1278   // on behalf the parallel threads.
1279   FreeChunk* fc;
1280   {
1281     // If GC is parallel, this might be called by several threads.
1282     // This should be rare enough that the locking overhead won't affect
1283     // the sequential code.
1284     MutexLockerEx x(parDictionaryAllocLock(),
1285                     Mutex::_no_safepoint_check_flag);
1286     fc = getChunkFromDictionary(size);
1287   }
1288   if (fc != NULL) {
1289     fc->dontCoalesce();
1290     assert(fc->is_free(), "Should be free, but not coalescable");
1291     // Verify that the block offset table shows this to
1292     // be a single block, but not one which is unallocated.
1293     _bt.verify_single_block((HeapWord*)fc, fc->size());
1294     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1295   }
1296   return fc;
1297 }
1298 
1299 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1300   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1301   assert_locked();
1302 
1303   // if we are tracking promotions, then first ensure space for
1304   // promotion (including spooling space for saving header if necessary).
1305   // then allocate and copy, then track promoted info if needed.
1306   // When tracking (see PromotionInfo::track()), the mark word may
1307   // be displaced and in this case restoration of the mark word
1308   // occurs in the (oop_since_save_marks_)iterate phase.
1309   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1310     return NULL;
1311   }
1312   // Call the allocate(size_t, bool) form directly to avoid the
1313   // additional call through the allocate(size_t) form.  Having
1314   // the compile inline the call is problematic because allocate(size_t)
1315   // is a virtual method.
1316   HeapWord* res = allocate(adjustObjectSize(obj_size));
1317   if (res != NULL) {
1318     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1319     // if we should be tracking promotions, do so.
1320     if (_promoInfo.tracking()) {
1321         _promoInfo.track((PromotedObject*)res);
1322     }
1323   }
1324   return oop(res);
1325 }
1326 
1327 HeapWord*
1328 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1329   assert_locked();
1330   assert(size >= MinChunkSize, "minimum chunk size");
1331   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1332     "maximum from smallLinearAllocBlock");
1333   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1334 }
1335 
1336 HeapWord*
1337 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1338                                                        size_t size) {
1339   assert_locked();
1340   assert(size >= MinChunkSize, "too small");
1341   HeapWord* res = NULL;
1342   // Try to do linear allocation from blk, making sure that
1343   if (blk->_word_size == 0) {
1344     // We have probably been unable to fill this either in the prologue or
1345     // when it was exhausted at the last linear allocation. Bail out until
1346     // next time.
1347     assert(blk->_ptr == NULL, "consistency check");
1348     return NULL;
1349   }
1350   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1351   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1352   if (res != NULL) return res;
1353 
1354   // about to exhaust this linear allocation block
1355   if (blk->_word_size == size) { // exactly satisfied
1356     res = blk->_ptr;
1357     _bt.allocated(res, blk->_word_size);
1358   } else if (size + MinChunkSize <= blk->_refillSize) {
1359     size_t sz = blk->_word_size;
1360     // Update _unallocated_block if the size is such that chunk would be
1361     // returned to the indexed free list.  All other chunks in the indexed
1362     // free lists are allocated from the dictionary so that _unallocated_block
1363     // has already been adjusted for them.  Do it here so that the cost
1364     // for all chunks added back to the indexed free lists.
1365     if (sz < SmallForDictionary) {
1366       _bt.allocated(blk->_ptr, sz);
1367     }
1368     // Return the chunk that isn't big enough, and then refill below.
1369     addChunkToFreeLists(blk->_ptr, sz);
1370     split_birth(sz);
1371     // Don't keep statistics on adding back chunk from a LinAB.
1372   } else {
1373     // A refilled block would not satisfy the request.
1374     return NULL;
1375   }
1376 
1377   blk->_ptr = NULL; blk->_word_size = 0;
1378   refillLinearAllocBlock(blk);
1379   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1380          "block was replenished");
1381   if (res != NULL) {
1382     split_birth(size);
1383     repairLinearAllocBlock(blk);
1384   } else if (blk->_ptr != NULL) {
1385     res = blk->_ptr;
1386     size_t blk_size = blk->_word_size;
1387     blk->_word_size -= size;
1388     blk->_ptr  += size;
1389     split_birth(size);
1390     repairLinearAllocBlock(blk);
1391     // Update BOT last so that other (parallel) GC threads see a consistent
1392     // view of the BOT and free blocks.
1393     // Above must occur before BOT is updated below.
1394     OrderAccess::storestore();
1395     _bt.split_block(res, blk_size, size);  // adjust block offset table
1396   }
1397   return res;
1398 }
1399 
1400 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1401                                         LinearAllocBlock* blk,
1402                                         size_t size) {
1403   assert_locked();
1404   assert(size >= MinChunkSize, "too small");
1405 
1406   HeapWord* res = NULL;
1407   // This is the common case.  Keep it simple.
1408   if (blk->_word_size >= size + MinChunkSize) {
1409     assert(blk->_ptr != NULL, "consistency check");
1410     res = blk->_ptr;
1411     // Note that the BOT is up-to-date for the linAB before allocation.  It
1412     // indicates the start of the linAB.  The split_block() updates the
1413     // BOT for the linAB after the allocation (indicates the start of the
1414     // next chunk to be allocated).
1415     size_t blk_size = blk->_word_size;
1416     blk->_word_size -= size;
1417     blk->_ptr  += size;
1418     split_birth(size);
1419     repairLinearAllocBlock(blk);
1420     // Update BOT last so that other (parallel) GC threads see a consistent
1421     // view of the BOT and free blocks.
1422     // Above must occur before BOT is updated below.
1423     OrderAccess::storestore();
1424     _bt.split_block(res, blk_size, size);  // adjust block offset table
1425     _bt.allocated(res, size);
1426   }
1427   return res;
1428 }
1429 
1430 FreeChunk*
1431 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1432   assert_locked();
1433   assert(size < SmallForDictionary, "just checking");
1434   FreeChunk* res;
1435   res = _indexedFreeList[size].get_chunk_at_head();
1436   if (res == NULL) {
1437     res = getChunkFromIndexedFreeListHelper(size);
1438   }
1439   _bt.verify_not_unallocated((HeapWord*) res, size);
1440   assert(res == NULL || res->size() == size, "Incorrect block size");
1441   return res;
1442 }
1443 
1444 FreeChunk*
1445 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1446   bool replenish) {
1447   assert_locked();
1448   FreeChunk* fc = NULL;
1449   if (size < SmallForDictionary) {
1450     assert(_indexedFreeList[size].head() == NULL ||
1451       _indexedFreeList[size].surplus() <= 0,
1452       "List for this size should be empty or under populated");
1453     // Try best fit in exact lists before replenishing the list
1454     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1455       // Replenish list.
1456       //
1457       // Things tried that failed.
1458       //   Tried allocating out of the two LinAB's first before
1459       // replenishing lists.
1460       //   Tried small linAB of size 256 (size in indexed list)
1461       // and replenishing indexed lists from the small linAB.
1462       //
1463       FreeChunk* newFc = NULL;
1464       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1465       if (replenish_size < SmallForDictionary) {
1466         // Do not replenish from an underpopulated size.
1467         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1468             _indexedFreeList[replenish_size].head() != NULL) {
1469           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1470         } else if (bestFitFirst()) {
1471           newFc = bestFitSmall(replenish_size);
1472         }
1473       }
1474       if (newFc == NULL && replenish_size > size) {
1475         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1476         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1477       }
1478       // Note: The stats update re split-death of block obtained above
1479       // will be recorded below precisely when we know we are going to
1480       // be actually splitting it into more than one pieces below.
1481       if (newFc != NULL) {
1482         if  (replenish || CMSReplenishIntermediate) {
1483           // Replenish this list and return one block to caller.
1484           size_t i;
1485           FreeChunk *curFc, *nextFc;
1486           size_t num_blk = newFc->size() / size;
1487           assert(num_blk >= 1, "Smaller than requested?");
1488           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1489           if (num_blk > 1) {
1490             // we are sure we will be splitting the block just obtained
1491             // into multiple pieces; record the split-death of the original
1492             splitDeath(replenish_size);
1493           }
1494           // carve up and link blocks 0, ..., num_blk - 2
1495           // The last chunk is not added to the lists but is returned as the
1496           // free chunk.
1497           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1498                i = 0;
1499                i < (num_blk - 1);
1500                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1501                i++) {
1502             curFc->set_size(size);
1503             // Don't record this as a return in order to try and
1504             // determine the "returns" from a GC.
1505             _bt.verify_not_unallocated((HeapWord*) fc, size);
1506             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1507             _bt.mark_block((HeapWord*)curFc, size);
1508             split_birth(size);
1509             // Don't record the initial population of the indexed list
1510             // as a split birth.
1511           }
1512 
1513           // check that the arithmetic was OK above
1514           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1515             "inconsistency in carving newFc");
1516           curFc->set_size(size);
1517           _bt.mark_block((HeapWord*)curFc, size);
1518           split_birth(size);
1519           fc = curFc;
1520         } else {
1521           // Return entire block to caller
1522           fc = newFc;
1523         }
1524       }
1525     }
1526   } else {
1527     // Get a free chunk from the free chunk dictionary to be returned to
1528     // replenish the indexed free list.
1529     fc = getChunkFromDictionaryExact(size);
1530   }
1531   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1532   return fc;
1533 }
1534 
1535 FreeChunk*
1536 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1537   assert_locked();
1538   FreeChunk* fc = _dictionary->get_chunk(size);
1539   if (fc == NULL) {
1540     return NULL;
1541   }
1542   _bt.allocated((HeapWord*)fc, fc->size());
1543   if (fc->size() >= size + MinChunkSize) {
1544     fc = splitChunkAndReturnRemainder(fc, size);
1545   }
1546   assert(fc->size() >= size, "chunk too small");
1547   assert(fc->size() < size + MinChunkSize, "chunk too big");
1548   _bt.verify_single_block((HeapWord*)fc, fc->size());
1549   return fc;
1550 }
1551 
1552 FreeChunk*
1553 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1554   assert_locked();
1555   FreeChunk* fc = _dictionary->get_chunk(size);
1556   if (fc == NULL) {
1557     return fc;
1558   }
1559   _bt.allocated((HeapWord*)fc, fc->size());
1560   if (fc->size() == size) {
1561     _bt.verify_single_block((HeapWord*)fc, size);
1562     return fc;
1563   }
1564   assert(fc->size() > size, "get_chunk() guarantee");
1565   if (fc->size() < size + MinChunkSize) {
1566     // Return the chunk to the dictionary and go get a bigger one.
1567     returnChunkToDictionary(fc);
1568     fc = _dictionary->get_chunk(size + MinChunkSize);
1569     if (fc == NULL) {
1570       return NULL;
1571     }
1572     _bt.allocated((HeapWord*)fc, fc->size());
1573   }
1574   assert(fc->size() >= size + MinChunkSize, "tautology");
1575   fc = splitChunkAndReturnRemainder(fc, size);
1576   assert(fc->size() == size, "chunk is wrong size");
1577   _bt.verify_single_block((HeapWord*)fc, size);
1578   return fc;
1579 }
1580 
1581 void
1582 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1583   assert_locked();
1584 
1585   size_t size = chunk->size();
1586   _bt.verify_single_block((HeapWord*)chunk, size);
1587   // adjust _unallocated_block downward, as necessary
1588   _bt.freed((HeapWord*)chunk, size);
1589   _dictionary->return_chunk(chunk);
1590 #ifndef PRODUCT
1591   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1592     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1593     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1594     tl->verify_stats();
1595   }
1596 #endif // PRODUCT
1597 }
1598 
1599 void
1600 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1601   assert_locked();
1602   size_t size = fc->size();
1603   _bt.verify_single_block((HeapWord*) fc, size);
1604   _bt.verify_not_unallocated((HeapWord*) fc, size);
1605   _indexedFreeList[size].return_chunk_at_tail(fc);
1606 #ifndef PRODUCT
1607   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1608      _indexedFreeList[size].verify_stats();
1609   }
1610 #endif // PRODUCT
1611 }
1612 
1613 // Add chunk to end of last block -- if it's the largest
1614 // block -- and update BOT and census data. We would
1615 // of course have preferred to coalesce it with the
1616 // last block, but it's currently less expensive to find the
1617 // largest block than it is to find the last.
1618 void
1619 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1620   HeapWord* chunk, size_t     size) {
1621   // check that the chunk does lie in this space!
1622   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1623   // One of the parallel gc task threads may be here
1624   // whilst others are allocating.
1625   Mutex* lock = &_parDictionaryAllocLock;
1626   FreeChunk* ec;
1627   {
1628     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1629     ec = dictionary()->find_largest_dict();  // get largest block
1630     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1631       // It's a coterminal block - we can coalesce.
1632       size_t old_size = ec->size();
1633       coalDeath(old_size);
1634       removeChunkFromDictionary(ec);
1635       size += old_size;
1636     } else {
1637       ec = (FreeChunk*)chunk;
1638     }
1639   }
1640   ec->set_size(size);
1641   debug_only(ec->mangleFreed(size));
1642   if (size < SmallForDictionary) {
1643     lock = _indexedFreeListParLocks[size];
1644   }
1645   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1646   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1647   // record the birth under the lock since the recording involves
1648   // manipulation of the list on which the chunk lives and
1649   // if the chunk is allocated and is the last on the list,
1650   // the list can go away.
1651   coalBirth(size);
1652 }
1653 
1654 void
1655 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1656                                               size_t     size) {
1657   // check that the chunk does lie in this space!
1658   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1659   assert_locked();
1660   _bt.verify_single_block(chunk, size);
1661 
1662   FreeChunk* fc = (FreeChunk*) chunk;
1663   fc->set_size(size);
1664   debug_only(fc->mangleFreed(size));
1665   if (size < SmallForDictionary) {
1666     returnChunkToFreeList(fc);
1667   } else {
1668     returnChunkToDictionary(fc);
1669   }
1670 }
1671 
1672 void
1673 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1674   size_t size, bool coalesced) {
1675   assert_locked();
1676   assert(chunk != NULL, "null chunk");
1677   if (coalesced) {
1678     // repair BOT
1679     _bt.single_block(chunk, size);
1680   }
1681   addChunkToFreeLists(chunk, size);
1682 }
1683 
1684 // We _must_ find the purported chunk on our free lists;
1685 // we assert if we don't.
1686 void
1687 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1688   size_t size = fc->size();
1689   assert_locked();
1690   debug_only(verifyFreeLists());
1691   if (size < SmallForDictionary) {
1692     removeChunkFromIndexedFreeList(fc);
1693   } else {
1694     removeChunkFromDictionary(fc);
1695   }
1696   _bt.verify_single_block((HeapWord*)fc, size);
1697   debug_only(verifyFreeLists());
1698 }
1699 
1700 void
1701 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1702   size_t size = fc->size();
1703   assert_locked();
1704   assert(fc != NULL, "null chunk");
1705   _bt.verify_single_block((HeapWord*)fc, size);
1706   _dictionary->remove_chunk(fc);
1707   // adjust _unallocated_block upward, as necessary
1708   _bt.allocated((HeapWord*)fc, size);
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1713   assert_locked();
1714   size_t size = fc->size();
1715   _bt.verify_single_block((HeapWord*)fc, size);
1716   NOT_PRODUCT(
1717     if (FLSVerifyIndexTable) {
1718       verifyIndexedFreeList(size);
1719     }
1720   )
1721   _indexedFreeList[size].remove_chunk(fc);
1722   NOT_PRODUCT(
1723     if (FLSVerifyIndexTable) {
1724       verifyIndexedFreeList(size);
1725     }
1726   )
1727 }
1728 
1729 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1730   /* A hint is the next larger size that has a surplus.
1731      Start search at a size large enough to guarantee that
1732      the excess is >= MIN_CHUNK. */
1733   size_t start = align_object_size(numWords + MinChunkSize);
1734   if (start < IndexSetSize) {
1735     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1736     size_t    hint = _indexedFreeList[start].hint();
1737     while (hint < IndexSetSize) {
1738       assert(is_object_aligned(hint), "hint should be aligned");
1739       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1740       if (fl->surplus() > 0 && fl->head() != NULL) {
1741         // Found a list with surplus, reset original hint
1742         // and split out a free chunk which is returned.
1743         _indexedFreeList[start].set_hint(hint);
1744         FreeChunk* res = getFromListGreater(fl, numWords);
1745         assert(res == NULL || res->is_free(),
1746           "Should be returning a free chunk");
1747         return res;
1748       }
1749       hint = fl->hint(); /* keep looking */
1750     }
1751     /* None found. */
1752     it[start].set_hint(IndexSetSize);
1753   }
1754   return NULL;
1755 }
1756 
1757 /* Requires fl->size >= numWords + MinChunkSize */
1758 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1759   size_t numWords) {
1760   FreeChunk *curr = fl->head();
1761   size_t oldNumWords = curr->size();
1762   assert(numWords >= MinChunkSize, "Word size is too small");
1763   assert(curr != NULL, "List is empty");
1764   assert(oldNumWords >= numWords + MinChunkSize,
1765         "Size of chunks in the list is too small");
1766 
1767   fl->remove_chunk(curr);
1768   // recorded indirectly by splitChunkAndReturnRemainder -
1769   // smallSplit(oldNumWords, numWords);
1770   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1771   // Does anything have to be done for the remainder in terms of
1772   // fixing the card table?
1773   assert(new_chunk == NULL || new_chunk->is_free(),
1774     "Should be returning a free chunk");
1775   return new_chunk;
1776 }
1777 
1778 FreeChunk*
1779 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1780   size_t new_size) {
1781   assert_locked();
1782   size_t size = chunk->size();
1783   assert(size > new_size, "Split from a smaller block?");
1784   assert(is_aligned(chunk), "alignment problem");
1785   assert(size == adjustObjectSize(size), "alignment problem");
1786   size_t rem_sz = size - new_size;
1787   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1788   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1789   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1790   assert(is_aligned(ffc), "alignment problem");
1791   ffc->set_size(rem_sz);
1792   ffc->link_next(NULL);
1793   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1794   // Above must occur before BOT is updated below.
1795   // adjust block offset table
1796   OrderAccess::storestore();
1797   assert(chunk->is_free() && ffc->is_free(), "Error");
1798   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1799   if (rem_sz < SmallForDictionary) {
1800     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1801     bool is_par = Thread::current()->is_GC_task_thread();
1802     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1803     returnChunkToFreeList(ffc);
1804     split(size, rem_sz);
1805     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1806   } else {
1807     returnChunkToDictionary(ffc);
1808     split(size, rem_sz);
1809   }
1810   chunk->set_size(new_size);
1811   return chunk;
1812 }
1813 
1814 void
1815 CompactibleFreeListSpace::sweep_completed() {
1816   // Now that space is probably plentiful, refill linear
1817   // allocation blocks as needed.
1818   refillLinearAllocBlocksIfNeeded();
1819 }
1820 
1821 void
1822 CompactibleFreeListSpace::gc_prologue() {
1823   assert_locked();
1824   reportFreeListStatistics("Before GC:");
1825   refillLinearAllocBlocksIfNeeded();
1826 }
1827 
1828 void
1829 CompactibleFreeListSpace::gc_epilogue() {
1830   assert_locked();
1831   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1832   _promoInfo.stopTrackingPromotions();
1833   repairLinearAllocationBlocks();
1834   reportFreeListStatistics("After GC:");
1835 }
1836 
1837 // Iteration support, mostly delegated from a CMS generation
1838 
1839 void CompactibleFreeListSpace::save_marks() {
1840   assert(Thread::current()->is_VM_thread(),
1841          "Global variable should only be set when single-threaded");
1842   // Mark the "end" of the used space at the time of this call;
1843   // note, however, that promoted objects from this point
1844   // on are tracked in the _promoInfo below.
1845   set_saved_mark_word(unallocated_block());
1846 #ifdef ASSERT
1847   // Check the sanity of save_marks() etc.
1848   MemRegion ur    = used_region();
1849   MemRegion urasm = used_region_at_save_marks();
1850   assert(ur.contains(urasm),
1851          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1852          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1853          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1854 #endif
1855   // inform allocator that promotions should be tracked.
1856   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1857   _promoInfo.startTrackingPromotions();
1858 }
1859 
1860 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1861   assert(_promoInfo.tracking(), "No preceding save_marks?");
1862   return _promoInfo.noPromotions();
1863 }
1864 
1865 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1866                                                                             \
1867 void CompactibleFreeListSpace::                                             \
1868 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1869   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1870   /*                                                                        \
1871    * This also restores any displaced headers and removes the elements from \
1872    * the iteration set as they are processed, so that we have a clean slate \
1873    * at the end of the iteration. Note, thus, that if new objects are       \
1874    * promoted as a result of the iteration they are iterated over as well.  \
1875    */                                                                       \
1876   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1877 }
1878 
1879 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1880 
1881 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1882   return _smallLinearAllocBlock._word_size == 0;
1883 }
1884 
1885 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1886   // Fix up linear allocation blocks to look like free blocks
1887   repairLinearAllocBlock(&_smallLinearAllocBlock);
1888 }
1889 
1890 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1891   assert_locked();
1892   if (blk->_ptr != NULL) {
1893     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1894            "Minimum block size requirement");
1895     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1896     fc->set_size(blk->_word_size);
1897     fc->link_prev(NULL);   // mark as free
1898     fc->dontCoalesce();
1899     assert(fc->is_free(), "just marked it free");
1900     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1901   }
1902 }
1903 
1904 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1905   assert_locked();
1906   if (_smallLinearAllocBlock._ptr == NULL) {
1907     assert(_smallLinearAllocBlock._word_size == 0,
1908       "Size of linAB should be zero if the ptr is NULL");
1909     // Reset the linAB refill and allocation size limit.
1910     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1911   }
1912   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1913 }
1914 
1915 void
1916 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1917   assert_locked();
1918   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1919          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1920          "blk invariant");
1921   if (blk->_ptr == NULL) {
1922     refillLinearAllocBlock(blk);
1923   }
1924 }
1925 
1926 void
1927 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1928   assert_locked();
1929   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1930          "linear allocation block should be empty");
1931   FreeChunk* fc;
1932   if (blk->_refillSize < SmallForDictionary &&
1933       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1934     // A linAB's strategy might be to use small sizes to reduce
1935     // fragmentation but still get the benefits of allocation from a
1936     // linAB.
1937   } else {
1938     fc = getChunkFromDictionary(blk->_refillSize);
1939   }
1940   if (fc != NULL) {
1941     blk->_ptr  = (HeapWord*)fc;
1942     blk->_word_size = fc->size();
1943     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1944   }
1945 }
1946 
1947 // Support for compaction
1948 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1949   scan_and_forward(this, cp);
1950   // Prepare_for_compaction() uses the space between live objects
1951   // so that later phase can skip dead space quickly.  So verification
1952   // of the free lists doesn't work after.
1953 }
1954 
1955 void CompactibleFreeListSpace::adjust_pointers() {
1956   // In other versions of adjust_pointers(), a bail out
1957   // based on the amount of live data in the generation
1958   // (i.e., if 0, bail out) may be used.
1959   // Cannot test used() == 0 here because the free lists have already
1960   // been mangled by the compaction.
1961 
1962   scan_and_adjust_pointers(this);
1963   // See note about verification in prepare_for_compaction().
1964 }
1965 
1966 void CompactibleFreeListSpace::compact() {
1967   scan_and_compact(this);
1968 }
1969 
1970 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1971 // where fbs is free block sizes
1972 double CompactibleFreeListSpace::flsFrag() const {
1973   size_t itabFree = totalSizeInIndexedFreeLists();
1974   double frag = 0.0;
1975   size_t i;
1976 
1977   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1978     double sz  = i;
1979     frag      += _indexedFreeList[i].count() * (sz * sz);
1980   }
1981 
1982   double totFree = itabFree +
1983                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1984   if (totFree > 0) {
1985     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1986             (totFree * totFree));
1987     frag = (double)1.0  - frag;
1988   } else {
1989     assert(frag == 0.0, "Follows from totFree == 0");
1990   }
1991   return frag;
1992 }
1993 
1994 void CompactibleFreeListSpace::beginSweepFLCensus(
1995   float inter_sweep_current,
1996   float inter_sweep_estimate,
1997   float intra_sweep_estimate) {
1998   assert_locked();
1999   size_t i;
2000   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2001     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2002     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2003     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2004     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2005     fl->set_before_sweep(fl->count());
2006     fl->set_bfr_surp(fl->surplus());
2007   }
2008   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2009                                     inter_sweep_current,
2010                                     inter_sweep_estimate,
2011                                     intra_sweep_estimate);
2012 }
2013 
2014 void CompactibleFreeListSpace::setFLSurplus() {
2015   assert_locked();
2016   size_t i;
2017   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2018     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2019     fl->set_surplus(fl->count() -
2020                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2021   }
2022 }
2023 
2024 void CompactibleFreeListSpace::setFLHints() {
2025   assert_locked();
2026   size_t i;
2027   size_t h = IndexSetSize;
2028   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2029     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2030     fl->set_hint(h);
2031     if (fl->surplus() > 0) {
2032       h = i;
2033     }
2034   }
2035 }
2036 
2037 void CompactibleFreeListSpace::clearFLCensus() {
2038   assert_locked();
2039   size_t i;
2040   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2041     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2042     fl->set_prev_sweep(fl->count());
2043     fl->set_coal_births(0);
2044     fl->set_coal_deaths(0);
2045     fl->set_split_births(0);
2046     fl->set_split_deaths(0);
2047   }
2048 }
2049 
2050 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2051   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2052   setFLSurplus();
2053   setFLHints();
2054   printFLCensus(sweep_count);
2055   clearFLCensus();
2056   assert_locked();
2057   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2058 }
2059 
2060 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2061   if (size < SmallForDictionary) {
2062     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2063     return (fl->coal_desired() < 0) ||
2064            ((int)fl->count() > fl->coal_desired());
2065   } else {
2066     return dictionary()->coal_dict_over_populated(size);
2067   }
2068 }
2069 
2070 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2071   assert(size < SmallForDictionary, "Size too large for indexed list");
2072   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2073   fl->increment_coal_births();
2074   fl->increment_surplus();
2075 }
2076 
2077 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2078   assert(size < SmallForDictionary, "Size too large for indexed list");
2079   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2080   fl->increment_coal_deaths();
2081   fl->decrement_surplus();
2082 }
2083 
2084 void CompactibleFreeListSpace::coalBirth(size_t size) {
2085   if (size  < SmallForDictionary) {
2086     smallCoalBirth(size);
2087   } else {
2088     dictionary()->dict_census_update(size,
2089                                    false /* split */,
2090                                    true /* birth */);
2091   }
2092 }
2093 
2094 void CompactibleFreeListSpace::coalDeath(size_t size) {
2095   if(size  < SmallForDictionary) {
2096     smallCoalDeath(size);
2097   } else {
2098     dictionary()->dict_census_update(size,
2099                                    false /* split */,
2100                                    false /* birth */);
2101   }
2102 }
2103 
2104 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2105   assert(size < SmallForDictionary, "Size too large for indexed list");
2106   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2107   fl->increment_split_births();
2108   fl->increment_surplus();
2109 }
2110 
2111 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2112   assert(size < SmallForDictionary, "Size too large for indexed list");
2113   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2114   fl->increment_split_deaths();
2115   fl->decrement_surplus();
2116 }
2117 
2118 void CompactibleFreeListSpace::split_birth(size_t size) {
2119   if (size  < SmallForDictionary) {
2120     smallSplitBirth(size);
2121   } else {
2122     dictionary()->dict_census_update(size,
2123                                    true /* split */,
2124                                    true /* birth */);
2125   }
2126 }
2127 
2128 void CompactibleFreeListSpace::splitDeath(size_t size) {
2129   if (size  < SmallForDictionary) {
2130     smallSplitDeath(size);
2131   } else {
2132     dictionary()->dict_census_update(size,
2133                                    true /* split */,
2134                                    false /* birth */);
2135   }
2136 }
2137 
2138 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2139   size_t to2 = from - to1;
2140   splitDeath(from);
2141   split_birth(to1);
2142   split_birth(to2);
2143 }
2144 
2145 void CompactibleFreeListSpace::print() const {
2146   print_on(tty);
2147 }
2148 
2149 void CompactibleFreeListSpace::prepare_for_verify() {
2150   assert_locked();
2151   repairLinearAllocationBlocks();
2152   // Verify that the SpoolBlocks look like free blocks of
2153   // appropriate sizes... To be done ...
2154 }
2155 
2156 class VerifyAllBlksClosure: public BlkClosure {
2157  private:
2158   const CompactibleFreeListSpace* _sp;
2159   const MemRegion                 _span;
2160   HeapWord*                       _last_addr;
2161   size_t                          _last_size;
2162   bool                            _last_was_obj;
2163   bool                            _last_was_live;
2164 
2165  public:
2166   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2167     MemRegion span) :  _sp(sp), _span(span),
2168                        _last_addr(NULL), _last_size(0),
2169                        _last_was_obj(false), _last_was_live(false) { }
2170 
2171   virtual size_t do_blk(HeapWord* addr) {
2172     size_t res;
2173     bool   was_obj  = false;
2174     bool   was_live = false;
2175     if (_sp->block_is_obj(addr)) {
2176       was_obj = true;
2177       oop p = oop(addr);
2178       guarantee(oopDesc::is_oop(p), "Should be an oop");
2179       res = _sp->adjustObjectSize(p->size());
2180       if (_sp->obj_is_alive(addr)) {
2181         was_live = true;
2182         p->verify();
2183       }
2184     } else {
2185       FreeChunk* fc = (FreeChunk*)addr;
2186       res = fc->size();
2187       if (FLSVerifyLists && !fc->cantCoalesce()) {
2188         guarantee(_sp->verify_chunk_in_free_list(fc),
2189                   "Chunk should be on a free list");
2190       }
2191     }
2192     if (res == 0) {
2193       Log(gc, verify) log;
2194       log.error("Livelock: no rank reduction!");
2195       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2196                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2197         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2198         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2199       LogStream ls(log.error());
2200       _sp->print_on(&ls);
2201       guarantee(false, "Verification failed.");
2202     }
2203     _last_addr = addr;
2204     _last_size = res;
2205     _last_was_obj  = was_obj;
2206     _last_was_live = was_live;
2207     return res;
2208   }
2209 };
2210 
2211 class VerifyAllOopsClosure: public OopClosure {
2212  private:
2213   const CMSCollector*             _collector;
2214   const CompactibleFreeListSpace* _sp;
2215   const MemRegion                 _span;
2216   const bool                      _past_remark;
2217   const CMSBitMap*                _bit_map;
2218 
2219  protected:
2220   void do_oop(void* p, oop obj) {
2221     if (_span.contains(obj)) { // the interior oop points into CMS heap
2222       if (!_span.contains(p)) { // reference from outside CMS heap
2223         // Should be a valid object; the first disjunct below allows
2224         // us to sidestep an assertion in block_is_obj() that insists
2225         // that p be in _sp. Note that several generations (and spaces)
2226         // are spanned by _span (CMS heap) above.
2227         guarantee(!_sp->is_in_reserved(obj) ||
2228                   _sp->block_is_obj((HeapWord*)obj),
2229                   "Should be an object");
2230         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2231         obj->verify();
2232         if (_past_remark) {
2233           // Remark has been completed, the object should be marked
2234           _bit_map->isMarked((HeapWord*)obj);
2235         }
2236       } else { // reference within CMS heap
2237         if (_past_remark) {
2238           // Remark has been completed -- so the referent should have
2239           // been marked, if referring object is.
2240           if (_bit_map->isMarked(_collector->block_start(p))) {
2241             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2242           }
2243         }
2244       }
2245     } else if (_sp->is_in_reserved(p)) {
2246       // the reference is from FLS, and points out of FLS
2247       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2248       obj->verify();
2249     }
2250   }
2251 
2252   template <class T> void do_oop_work(T* p) {
2253     T heap_oop = oopDesc::load_heap_oop(p);
2254     if (!oopDesc::is_null(heap_oop)) {
2255       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2256       do_oop(p, obj);
2257     }
2258   }
2259 
2260  public:
2261   VerifyAllOopsClosure(const CMSCollector* collector,
2262     const CompactibleFreeListSpace* sp, MemRegion span,
2263     bool past_remark, CMSBitMap* bit_map) :
2264     _collector(collector), _sp(sp), _span(span),
2265     _past_remark(past_remark), _bit_map(bit_map) { }
2266 
2267   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2268   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2269 };
2270 
2271 void CompactibleFreeListSpace::verify() const {
2272   assert_lock_strong(&_freelistLock);
2273   verify_objects_initialized();
2274   MemRegion span = _collector->_span;
2275   bool past_remark = (_collector->abstract_state() ==
2276                       CMSCollector::Sweeping);
2277 
2278   ResourceMark rm;
2279   HandleMark  hm;
2280 
2281   // Check integrity of CFL data structures
2282   _promoInfo.verify();
2283   _dictionary->verify();
2284   if (FLSVerifyIndexTable) {
2285     verifyIndexedFreeLists();
2286   }
2287   // Check integrity of all objects and free blocks in space
2288   {
2289     VerifyAllBlksClosure cl(this, span);
2290     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2291   }
2292   // Check that all references in the heap to FLS
2293   // are to valid objects in FLS or that references in
2294   // FLS are to valid objects elsewhere in the heap
2295   if (FLSVerifyAllHeapReferences)
2296   {
2297     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2298       _collector->markBitMap());
2299 
2300     // Iterate over all oops in the heap. Uses the _no_header version
2301     // since we are not interested in following the klass pointers.
2302     CMSHeap::heap()->oop_iterate_no_header(&cl);
2303   }
2304 
2305   if (VerifyObjectStartArray) {
2306     // Verify the block offset table
2307     _bt.verify();
2308   }
2309 }
2310 
2311 #ifndef PRODUCT
2312 void CompactibleFreeListSpace::verifyFreeLists() const {
2313   if (FLSVerifyLists) {
2314     _dictionary->verify();
2315     verifyIndexedFreeLists();
2316   } else {
2317     if (FLSVerifyDictionary) {
2318       _dictionary->verify();
2319     }
2320     if (FLSVerifyIndexTable) {
2321       verifyIndexedFreeLists();
2322     }
2323   }
2324 }
2325 #endif
2326 
2327 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2328   size_t i = 0;
2329   for (; i < IndexSetStart; i++) {
2330     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2331   }
2332   for (; i < IndexSetSize; i++) {
2333     verifyIndexedFreeList(i);
2334   }
2335 }
2336 
2337 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2338   FreeChunk* fc   =  _indexedFreeList[size].head();
2339   FreeChunk* tail =  _indexedFreeList[size].tail();
2340   size_t    num = _indexedFreeList[size].count();
2341   size_t      n = 0;
2342   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2343             "Slot should have been empty");
2344   for (; fc != NULL; fc = fc->next(), n++) {
2345     guarantee(fc->size() == size, "Size inconsistency");
2346     guarantee(fc->is_free(), "!free?");
2347     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2348     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2349   }
2350   guarantee(n == num, "Incorrect count");
2351 }
2352 
2353 #ifndef PRODUCT
2354 void CompactibleFreeListSpace::check_free_list_consistency() const {
2355   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2356     "Some sizes can't be allocated without recourse to"
2357     " linear allocation buffers");
2358   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2359     "else MIN_TREE_CHUNK_SIZE is wrong");
2360   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2361   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2362 }
2363 #endif
2364 
2365 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2366   assert_lock_strong(&_freelistLock);
2367   LogTarget(Debug, gc, freelist, census) log;
2368   if (!log.is_enabled()) {
2369     return;
2370   }
2371   AdaptiveFreeList<FreeChunk> total;
2372   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2373   ResourceMark rm;
2374   LogStream ls(log);
2375   outputStream* out = &ls;
2376   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2377   size_t total_free = 0;
2378   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2379     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2380     total_free += fl->count() * fl->size();
2381     if (i % (40*IndexSetStride) == 0) {
2382       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2383     }
2384     fl->print_on(out);
2385     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2386     total.set_surplus(    total.surplus()     + fl->surplus()    );
2387     total.set_desired(    total.desired()     + fl->desired()    );
2388     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2389     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2390     total.set_count(      total.count()       + fl->count()      );
2391     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2392     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2393     total.set_split_births(total.split_births() + fl->split_births());
2394     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2395   }
2396   total.print_on(out, "TOTAL");
2397   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2398   log.print("growth: %8.5f  deficit: %8.5f",
2399             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2400                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2401             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2402   _dictionary->print_dict_census(out);
2403 }
2404 
2405 ///////////////////////////////////////////////////////////////////////////
2406 // CompactibleFreeListSpaceLAB
2407 ///////////////////////////////////////////////////////////////////////////
2408 
2409 #define VECTOR_257(x)                                                                                  \
2410   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2411   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2412      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2413      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2417      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2418      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2419      x }
2420 
2421 // Initialize with default setting for CMS, _not_
2422 // generic OldPLABSize, whose static default is different; if overridden at the
2423 // command-line, this will get reinitialized via a call to
2424 // modify_initialization() below.
2425 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2426   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2427 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2428 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2429 
2430 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2431   _cfls(cfls)
2432 {
2433   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2434   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2435        i < CompactibleFreeListSpace::IndexSetSize;
2436        i += CompactibleFreeListSpace::IndexSetStride) {
2437     _indexedFreeList[i].set_size(i);
2438     _num_blocks[i] = 0;
2439   }
2440 }
2441 
2442 static bool _CFLS_LAB_modified = false;
2443 
2444 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2445   assert(!_CFLS_LAB_modified, "Call only once");
2446   _CFLS_LAB_modified = true;
2447   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2448        i < CompactibleFreeListSpace::IndexSetSize;
2449        i += CompactibleFreeListSpace::IndexSetStride) {
2450     _blocks_to_claim[i].modify(n, wt, true /* force */);
2451   }
2452 }
2453 
2454 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2455   FreeChunk* res;
2456   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2457   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2458     // This locking manages sync with other large object allocations.
2459     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2460                     Mutex::_no_safepoint_check_flag);
2461     res = _cfls->getChunkFromDictionaryExact(word_sz);
2462     if (res == NULL) return NULL;
2463   } else {
2464     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2465     if (fl->count() == 0) {
2466       // Attempt to refill this local free list.
2467       get_from_global_pool(word_sz, fl);
2468       // If it didn't work, give up.
2469       if (fl->count() == 0) return NULL;
2470     }
2471     res = fl->get_chunk_at_head();
2472     assert(res != NULL, "Why was count non-zero?");
2473   }
2474   res->markNotFree();
2475   assert(!res->is_free(), "shouldn't be marked free");
2476   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2477   // mangle a just allocated object with a distinct pattern.
2478   debug_only(res->mangleAllocated(word_sz));
2479   return (HeapWord*)res;
2480 }
2481 
2482 // Get a chunk of blocks of the right size and update related
2483 // book-keeping stats
2484 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2485   // Get the #blocks we want to claim
2486   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2487   assert(n_blks > 0, "Error");
2488   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2489   // In some cases, when the application has a phase change,
2490   // there may be a sudden and sharp shift in the object survival
2491   // profile, and updating the counts at the end of a scavenge
2492   // may not be quick enough, giving rise to large scavenge pauses
2493   // during these phase changes. It is beneficial to detect such
2494   // changes on-the-fly during a scavenge and avoid such a phase-change
2495   // pothole. The following code is a heuristic attempt to do that.
2496   // It is protected by a product flag until we have gained
2497   // enough experience with this heuristic and fine-tuned its behavior.
2498   // WARNING: This might increase fragmentation if we overreact to
2499   // small spikes, so some kind of historical smoothing based on
2500   // previous experience with the greater reactivity might be useful.
2501   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2502   // default.
2503   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2504     //
2505     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2506     // which is why there is a cast to double.
2507     //
2508     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2509     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2510     n_blks = MIN2(n_blks, CMSOldPLABMax);
2511   }
2512   assert(n_blks > 0, "Error");
2513   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2514   // Update stats table entry for this block size
2515   _num_blocks[word_sz] += fl->count();
2516 }
2517 
2518 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2519   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2520        i < CompactibleFreeListSpace::IndexSetSize;
2521        i += CompactibleFreeListSpace::IndexSetStride) {
2522     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2523            "Counter inconsistency");
2524     if (_global_num_workers[i] > 0) {
2525       // Need to smooth wrt historical average
2526       if (ResizeOldPLAB) {
2527         _blocks_to_claim[i].sample(
2528           MAX2(CMSOldPLABMin,
2529           MIN2(CMSOldPLABMax,
2530                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2531       }
2532       // Reset counters for next round
2533       _global_num_workers[i] = 0;
2534       _global_num_blocks[i] = 0;
2535       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2536     }
2537   }
2538 }
2539 
2540 // If this is changed in the future to allow parallel
2541 // access, one would need to take the FL locks and,
2542 // depending on how it is used, stagger access from
2543 // parallel threads to reduce contention.
2544 void CompactibleFreeListSpaceLAB::retire(int tid) {
2545   // We run this single threaded with the world stopped;
2546   // so no need for locks and such.
2547   NOT_PRODUCT(Thread* t = Thread::current();)
2548   assert(Thread::current()->is_VM_thread(), "Error");
2549   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2550        i < CompactibleFreeListSpace::IndexSetSize;
2551        i += CompactibleFreeListSpace::IndexSetStride) {
2552     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2553            "Can't retire more than what we obtained");
2554     if (_num_blocks[i] > 0) {
2555       size_t num_retire =  _indexedFreeList[i].count();
2556       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2557       {
2558         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2559         //                Mutex::_no_safepoint_check_flag);
2560 
2561         // Update globals stats for num_blocks used
2562         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2563         _global_num_workers[i]++;
2564         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2565         if (num_retire > 0) {
2566           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2567           // Reset this list.
2568           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2569           _indexedFreeList[i].set_size(i);
2570         }
2571       }
2572       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2573                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2574       // Reset stats for next round
2575       _num_blocks[i]         = 0;
2576     }
2577   }
2578 }
2579 
2580 // Used by par_get_chunk_of_blocks() for the chunks from the
2581 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2582 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2583 // to the free list "fl".  "n" is the maximum number of chunks to
2584 // be added to "fl".
2585 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2586 
2587   // We'll try all multiples of word_sz in the indexed set, starting with
2588   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2589   // then try getting a big chunk and splitting it.
2590   {
2591     bool found;
2592     int  k;
2593     size_t cur_sz;
2594     for (k = 1, cur_sz = k * word_sz, found = false;
2595          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2596          (CMSSplitIndexedFreeListBlocks || k <= 1);
2597          k++, cur_sz = k * word_sz) {
2598       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2599       fl_for_cur_sz.set_size(cur_sz);
2600       {
2601         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2602                         Mutex::_no_safepoint_check_flag);
2603         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2604         if (gfl->count() != 0) {
2605           // nn is the number of chunks of size cur_sz that
2606           // we'd need to split k-ways each, in order to create
2607           // "n" chunks of size word_sz each.
2608           const size_t nn = MAX2(n/k, (size_t)1);
2609           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2610           found = true;
2611           if (k > 1) {
2612             // Update split death stats for the cur_sz-size blocks list:
2613             // we increment the split death count by the number of blocks
2614             // we just took from the cur_sz-size blocks list and which
2615             // we will be splitting below.
2616             ssize_t deaths = gfl->split_deaths() +
2617                              fl_for_cur_sz.count();
2618             gfl->set_split_deaths(deaths);
2619           }
2620         }
2621       }
2622       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2623       if (found) {
2624         if (k == 1) {
2625           fl->prepend(&fl_for_cur_sz);
2626         } else {
2627           // Divide each block on fl_for_cur_sz up k ways.
2628           FreeChunk* fc;
2629           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2630             // Must do this in reverse order, so that anybody attempting to
2631             // access the main chunk sees it as a single free block until we
2632             // change it.
2633             size_t fc_size = fc->size();
2634             assert(fc->is_free(), "Error");
2635             for (int i = k-1; i >= 0; i--) {
2636               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2637               assert((i != 0) ||
2638                         ((fc == ffc) && ffc->is_free() &&
2639                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2640                         "Counting error");
2641               ffc->set_size(word_sz);
2642               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2643               ffc->link_next(NULL);
2644               // Above must occur before BOT is updated below.
2645               OrderAccess::storestore();
2646               // splitting from the right, fc_size == i * word_sz
2647               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2648               fc_size -= word_sz;
2649               assert(fc_size == i*word_sz, "Error");
2650               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2651               _bt.verify_single_block((HeapWord*)fc, fc_size);
2652               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2653               // Push this on "fl".
2654               fl->return_chunk_at_head(ffc);
2655             }
2656             // TRAP
2657             assert(fl->tail()->next() == NULL, "List invariant.");
2658           }
2659         }
2660         // Update birth stats for this block size.
2661         size_t num = fl->count();
2662         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2663                         Mutex::_no_safepoint_check_flag);
2664         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2665         _indexedFreeList[word_sz].set_split_births(births);
2666         return true;
2667       }
2668     }
2669     return found;
2670   }
2671 }
2672 
2673 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2674 
2675   FreeChunk* fc = NULL;
2676   FreeChunk* rem_fc = NULL;
2677   size_t rem;
2678   {
2679     MutexLockerEx x(parDictionaryAllocLock(),
2680                     Mutex::_no_safepoint_check_flag);
2681     while (n > 0) {
2682       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2683       if (fc != NULL) {
2684         break;
2685       } else {
2686         n--;
2687       }
2688     }
2689     if (fc == NULL) return NULL;
2690     // Otherwise, split up that block.
2691     assert((ssize_t)n >= 1, "Control point invariant");
2692     assert(fc->is_free(), "Error: should be a free block");
2693     _bt.verify_single_block((HeapWord*)fc, fc->size());
2694     const size_t nn = fc->size() / word_sz;
2695     n = MIN2(nn, n);
2696     assert((ssize_t)n >= 1, "Control point invariant");
2697     rem = fc->size() - n * word_sz;
2698     // If there is a remainder, and it's too small, allocate one fewer.
2699     if (rem > 0 && rem < MinChunkSize) {
2700       n--; rem += word_sz;
2701     }
2702     // Note that at this point we may have n == 0.
2703     assert((ssize_t)n >= 0, "Control point invariant");
2704 
2705     // If n is 0, the chunk fc that was found is not large
2706     // enough to leave a viable remainder.  We are unable to
2707     // allocate even one block.  Return fc to the
2708     // dictionary and return, leaving "fl" empty.
2709     if (n == 0) {
2710       returnChunkToDictionary(fc);
2711       return NULL;
2712     }
2713 
2714     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2715     dictionary()->dict_census_update(fc->size(),
2716                                      true /*split*/,
2717                                      false /*birth*/);
2718 
2719     // First return the remainder, if any.
2720     // Note that we hold the lock until we decide if we're going to give
2721     // back the remainder to the dictionary, since a concurrent allocation
2722     // may otherwise see the heap as empty.  (We're willing to take that
2723     // hit if the block is a small block.)
2724     if (rem > 0) {
2725       size_t prefix_size = n * word_sz;
2726       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2727       rem_fc->set_size(rem);
2728       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2729       rem_fc->link_next(NULL);
2730       // Above must occur before BOT is updated below.
2731       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2732       OrderAccess::storestore();
2733       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2734       assert(fc->is_free(), "Error");
2735       fc->set_size(prefix_size);
2736       if (rem >= IndexSetSize) {
2737         returnChunkToDictionary(rem_fc);
2738         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2739         rem_fc = NULL;
2740       }
2741       // Otherwise, return it to the small list below.
2742     }
2743   }
2744   if (rem_fc != NULL) {
2745     MutexLockerEx x(_indexedFreeListParLocks[rem],
2746                     Mutex::_no_safepoint_check_flag);
2747     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2748     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2749     smallSplitBirth(rem);
2750   }
2751   assert(n * word_sz == fc->size(),
2752          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2753          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2754          fc->size(), n, word_sz);
2755   return fc;
2756 }
2757 
2758 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2759 
2760   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2761 
2762   if (fc == NULL) {
2763     return;
2764   }
2765 
2766   size_t n = fc->size() / word_sz;
2767 
2768   assert((ssize_t)n > 0, "Consistency");
2769   // Now do the splitting up.
2770   // Must do this in reverse order, so that anybody attempting to
2771   // access the main chunk sees it as a single free block until we
2772   // change it.
2773   size_t fc_size = n * word_sz;
2774   // All but first chunk in this loop
2775   for (ssize_t i = n-1; i > 0; i--) {
2776     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2777     ffc->set_size(word_sz);
2778     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2779     ffc->link_next(NULL);
2780     // Above must occur before BOT is updated below.
2781     OrderAccess::storestore();
2782     // splitting from the right, fc_size == (n - i + 1) * wordsize
2783     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2784     fc_size -= word_sz;
2785     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2786     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2787     _bt.verify_single_block((HeapWord*)fc, fc_size);
2788     // Push this on "fl".
2789     fl->return_chunk_at_head(ffc);
2790   }
2791   // First chunk
2792   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2793   // The blocks above should show their new sizes before the first block below
2794   fc->set_size(word_sz);
2795   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2796   fc->link_next(NULL);
2797   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2798   _bt.verify_single_block((HeapWord*)fc, fc->size());
2799   fl->return_chunk_at_head(fc);
2800 
2801   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2802   {
2803     // Update the stats for this block size.
2804     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2805                     Mutex::_no_safepoint_check_flag);
2806     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2807     _indexedFreeList[word_sz].set_split_births(births);
2808     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2809     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2810   }
2811 
2812   // TRAP
2813   assert(fl->tail()->next() == NULL, "List invariant.");
2814 }
2815 
2816 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2817   assert(fl->count() == 0, "Precondition.");
2818   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2819          "Precondition");
2820 
2821   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2822     // Got it
2823     return;
2824   }
2825 
2826   // Otherwise, we'll split a block from the dictionary.
2827   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2828 }
2829 
2830 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2831   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
2832   return ergo_max;
2833 }
2834 
2835 // Set up the space's par_seq_tasks structure for work claiming
2836 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2837 // XXX Need to suitably abstract and generalize this and the next
2838 // method into one.
2839 void
2840 CompactibleFreeListSpace::
2841 initialize_sequential_subtasks_for_rescan(int n_threads) {
2842   // The "size" of each task is fixed according to rescan_task_size.
2843   assert(n_threads > 0, "Unexpected n_threads argument");
2844   const size_t task_size = rescan_task_size();
2845   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2846   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2847   assert(n_tasks == 0 ||
2848          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2849           (used_region().start() + n_tasks*task_size >= used_region().end())),
2850          "n_tasks calculation incorrect");
2851   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2852   assert(!pst->valid(), "Clobbering existing data?");
2853   // Sets the condition for completion of the subtask (how many threads
2854   // need to finish in order to be done).
2855   pst->set_n_threads(n_threads);
2856   pst->set_n_tasks((int)n_tasks);
2857 }
2858 
2859 // Set up the space's par_seq_tasks structure for work claiming
2860 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2861 void
2862 CompactibleFreeListSpace::
2863 initialize_sequential_subtasks_for_marking(int n_threads,
2864                                            HeapWord* low) {
2865   // The "size" of each task is fixed according to rescan_task_size.
2866   assert(n_threads > 0, "Unexpected n_threads argument");
2867   const size_t task_size = marking_task_size();
2868   assert(task_size > CardTable::card_size_in_words &&
2869          (task_size %  CardTable::card_size_in_words == 0),
2870          "Otherwise arithmetic below would be incorrect");
2871   MemRegion span = _old_gen->reserved();
2872   if (low != NULL) {
2873     if (span.contains(low)) {
2874       // Align low down to  a card boundary so that
2875       // we can use block_offset_careful() on span boundaries.
2876       HeapWord* aligned_low = align_down(low, CardTable::card_size);
2877       // Clip span prefix at aligned_low
2878       span = span.intersection(MemRegion(aligned_low, span.end()));
2879     } else if (low > span.end()) {
2880       span = MemRegion(low, low);  // Null region
2881     } // else use entire span
2882   }
2883   assert(span.is_empty() ||
2884          ((uintptr_t)span.start() %  CardTable::card_size == 0),
2885         "span should start at a card boundary");
2886   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2887   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2888   assert(n_tasks == 0 ||
2889          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2890           (span.start() + n_tasks*task_size >= span.end())),
2891          "n_tasks calculation incorrect");
2892   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2893   assert(!pst->valid(), "Clobbering existing data?");
2894   // Sets the condition for completion of the subtask (how many threads
2895   // need to finish in order to be done).
2896   pst->set_n_threads(n_threads);
2897   pst->set_n_tasks((int)n_tasks);
2898 }