1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "runtime/globals.hpp"
  42 #include "runtime/handles.inline.hpp"
  43 #include "runtime/init.hpp"
  44 #include "runtime/java.hpp"
  45 #include "runtime/orderAccess.inline.hpp"
  46 #include "runtime/vmThread.hpp"
  47 #include "utilities/align.hpp"
  48 #include "utilities/copy.hpp"
  49 
  50 /////////////////////////////////////////////////////////////////////////
  51 //// CompactibleFreeListSpace
  52 /////////////////////////////////////////////////////////////////////////
  53 
  54 // highest ranked  free list lock rank
  55 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  56 
  57 // Defaults are 0 so things will break badly if incorrectly initialized.
  58 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  59 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  60 
  61 size_t MinChunkSize = 0;
  62 
  63 void CompactibleFreeListSpace::set_cms_values() {
  64   // Set CMS global values
  65   assert(MinChunkSize == 0, "already set");
  66 
  67   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  68   // for chunks to contain a FreeChunk.
  69   size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  70   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  71 
  72   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  73   IndexSetStart  = MinChunkSize;
  74   IndexSetStride = MinObjAlignment;
  75 }
  76 
  77 // Constructor
  78 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  79   _bt(bs, mr),
  80   // free list locks are in the range of values taken by _lockRank
  81   // This range currently is [_leaf+2, _leaf+3]
  82   // Note: this requires that CFLspace c'tors
  83   // are called serially in the order in which the locks are
  84   // are acquired in the program text. This is true today.
  85   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  86                 Monitor::_safepoint_check_sometimes),
  87   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  88                           "CompactibleFreeListSpace._dict_par_lock", true,
  89                           Monitor::_safepoint_check_never),
  90   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  91                     CMSRescanMultiple),
  92   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  93                     CMSConcMarkMultiple),
  94   _collector(NULL),
  95   _preconsumptionDirtyCardClosure(NULL)
  96 {
  97   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  98          "FreeChunk is larger than expected");
  99   _bt.set_space(this);
 100   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 101 
 102   _dictionary = new AFLBinaryTreeDictionary(mr);
 103 
 104   assert(_dictionary != NULL, "CMS dictionary initialization");
 105   // The indexed free lists are initially all empty and are lazily
 106   // filled in on demand. Initialize the array elements to NULL.
 107   initializeIndexedFreeListArray();
 108 
 109   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 110                              SmallForLinearAlloc);
 111 
 112   // CMSIndexedFreeListReplenish should be at least 1
 113   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 114   _promoInfo.setSpace(this);
 115   if (UseCMSBestFit) {
 116     _fitStrategy = FreeBlockBestFitFirst;
 117   } else {
 118     _fitStrategy = FreeBlockStrategyNone;
 119   }
 120   check_free_list_consistency();
 121 
 122   // Initialize locks for parallel case.
 123   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 124     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 125                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 126     DEBUG_ONLY(
 127       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 128     )
 129   }
 130   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 131 }
 132 
 133 // Like CompactibleSpace forward() but always calls cross_threshold() to
 134 // update the block offset table.  Removed initialize_threshold call because
 135 // CFLS does not use a block offset array for contiguous spaces.
 136 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 137                                     CompactPoint* cp, HeapWord* compact_top) {
 138   // q is alive
 139   // First check if we should switch compaction space
 140   assert(this == cp->space, "'this' should be current compaction space.");
 141   size_t compaction_max_size = pointer_delta(end(), compact_top);
 142   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 143     "virtual adjustObjectSize_v() method is not correct");
 144   size_t adjusted_size = adjustObjectSize(size);
 145   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 146          "no small fragments allowed");
 147   assert(minimum_free_block_size() == MinChunkSize,
 148          "for de-virtualized reference below");
 149   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 150   if (adjusted_size + MinChunkSize > compaction_max_size &&
 151       adjusted_size != compaction_max_size) {
 152     do {
 153       // switch to next compaction space
 154       cp->space->set_compaction_top(compact_top);
 155       cp->space = cp->space->next_compaction_space();
 156       if (cp->space == NULL) {
 157         cp->gen = GenCollectedHeap::heap()->young_gen();
 158         assert(cp->gen != NULL, "compaction must succeed");
 159         cp->space = cp->gen->first_compaction_space();
 160         assert(cp->space != NULL, "generation must have a first compaction space");
 161       }
 162       compact_top = cp->space->bottom();
 163       cp->space->set_compaction_top(compact_top);
 164       // The correct adjusted_size may not be the same as that for this method
 165       // (i.e., cp->space may no longer be "this" so adjust the size again.
 166       // Use the virtual method which is not used above to save the virtual
 167       // dispatch.
 168       adjusted_size = cp->space->adjust_object_size_v(size);
 169       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 170       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 171     } while (adjusted_size > compaction_max_size);
 172   }
 173 
 174   // store the forwarding pointer into the mark word
 175   if ((HeapWord*)q != compact_top) {
 176     q->forward_to(oop(compact_top));
 177     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 178   } else {
 179     // if the object isn't moving we can just set the mark to the default
 180     // mark and handle it specially later on.
 181     q->init_mark();
 182     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 183   }
 184 
 185   compact_top += adjusted_size;
 186 
 187   // we need to update the offset table so that the beginnings of objects can be
 188   // found during scavenge.  Note that we are updating the offset table based on
 189   // where the object will be once the compaction phase finishes.
 190 
 191   // Always call cross_threshold().  A contiguous space can only call it when
 192   // the compaction_top exceeds the current threshold but not for an
 193   // non-contiguous space.
 194   cp->threshold =
 195     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 196   return compact_top;
 197 }
 198 
 199 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 200 // and use of single_block instead of alloc_block.  The name here is not really
 201 // appropriate - maybe a more general name could be invented for both the
 202 // contiguous and noncontiguous spaces.
 203 
 204 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 205   _bt.single_block(start, the_end);
 206   return end();
 207 }
 208 
 209 // Initialize them to NULL.
 210 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 211   for (size_t i = 0; i < IndexSetSize; i++) {
 212     // Note that on platforms where objects are double word aligned,
 213     // the odd array elements are not used.  It is convenient, however,
 214     // to map directly from the object size to the array element.
 215     _indexedFreeList[i].reset(IndexSetSize);
 216     _indexedFreeList[i].set_size(i);
 217     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 218     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 219     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 220     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 221   }
 222 }
 223 
 224 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 225   return adjustObjectSize(oop(addr)->size());
 226 }
 227 
 228 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 229   for (size_t i = 1; i < IndexSetSize; i++) {
 230     assert(_indexedFreeList[i].size() == (size_t) i,
 231       "Indexed free list sizes are incorrect");
 232     _indexedFreeList[i].reset(IndexSetSize);
 233     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 234     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 235     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 236     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 237   }
 238 }
 239 
 240 void CompactibleFreeListSpace::reset(MemRegion mr) {
 241   resetIndexedFreeListArray();
 242   dictionary()->reset();
 243   if (BlockOffsetArrayUseUnallocatedBlock) {
 244     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 245     // Everything's allocated until proven otherwise.
 246     _bt.set_unallocated_block(end());
 247   }
 248   if (!mr.is_empty()) {
 249     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 250     _bt.single_block(mr.start(), mr.word_size());
 251     FreeChunk* fc = (FreeChunk*) mr.start();
 252     fc->set_size(mr.word_size());
 253     if (mr.word_size() >= IndexSetSize ) {
 254       returnChunkToDictionary(fc);
 255     } else {
 256       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 257       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 258     }
 259     coalBirth(mr.word_size());
 260   }
 261   _promoInfo.reset();
 262   _smallLinearAllocBlock._ptr = NULL;
 263   _smallLinearAllocBlock._word_size = 0;
 264 }
 265 
 266 void CompactibleFreeListSpace::reset_after_compaction() {
 267   // Reset the space to the new reality - one free chunk.
 268   MemRegion mr(compaction_top(), end());
 269   reset(mr);
 270   // Now refill the linear allocation block(s) if possible.
 271   refillLinearAllocBlocksIfNeeded();
 272 }
 273 
 274 // Walks the entire dictionary, returning a coterminal
 275 // chunk, if it exists. Use with caution since it involves
 276 // a potentially complete walk of a potentially large tree.
 277 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 278 
 279   assert_lock_strong(&_freelistLock);
 280 
 281   return dictionary()->find_chunk_ends_at(end());
 282 }
 283 
 284 
 285 #ifndef PRODUCT
 286 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 287   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 288     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 289   }
 290 }
 291 
 292 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 293   size_t sum = 0;
 294   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 295     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 296   }
 297   return sum;
 298 }
 299 
 300 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 301   size_t count = 0;
 302   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 303     debug_only(
 304       ssize_t total_list_count = 0;
 305       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 306          fc = fc->next()) {
 307         total_list_count++;
 308       }
 309       assert(total_list_count ==  _indexedFreeList[i].count(),
 310         "Count in list is incorrect");
 311     )
 312     count += _indexedFreeList[i].count();
 313   }
 314   return count;
 315 }
 316 
 317 size_t CompactibleFreeListSpace::totalCount() {
 318   size_t num = totalCountInIndexedFreeLists();
 319   num +=  dictionary()->total_count();
 320   if (_smallLinearAllocBlock._word_size != 0) {
 321     num++;
 322   }
 323   return num;
 324 }
 325 #endif
 326 
 327 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 328   FreeChunk* fc = (FreeChunk*) p;
 329   return fc->is_free();
 330 }
 331 
 332 size_t CompactibleFreeListSpace::used() const {
 333   return capacity() - free();
 334 }
 335 
 336 size_t CompactibleFreeListSpace::free() const {
 337   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 338   // if you do this while the structures are in flux you
 339   // may get an approximate answer only; for instance
 340   // because there is concurrent allocation either
 341   // directly by mutators or for promotion during a GC.
 342   // It's "MT-safe", however, in the sense that you are guaranteed
 343   // not to crash and burn, for instance, because of walking
 344   // pointers that could disappear as you were walking them.
 345   // The approximation is because the various components
 346   // that are read below are not read atomically (and
 347   // further the computation of totalSizeInIndexedFreeLists()
 348   // is itself a non-atomic computation. The normal use of
 349   // this is during a resize operation at the end of GC
 350   // and at that time you are guaranteed to get the
 351   // correct actual value. However, for instance, this is
 352   // also read completely asynchronously by the "perf-sampler"
 353   // that supports jvmstat, and you are apt to see the values
 354   // flicker in such cases.
 355   assert(_dictionary != NULL, "No _dictionary?");
 356   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 357           totalSizeInIndexedFreeLists() +
 358           _smallLinearAllocBlock._word_size) * HeapWordSize;
 359 }
 360 
 361 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 362   assert(_dictionary != NULL, "No _dictionary?");
 363   assert_locked();
 364   size_t res = _dictionary->max_chunk_size();
 365   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 366                        (size_t) SmallForLinearAlloc - 1));
 367   // XXX the following could potentially be pretty slow;
 368   // should one, pessimistically for the rare cases when res
 369   // calculated above is less than IndexSetSize,
 370   // just return res calculated above? My reasoning was that
 371   // those cases will be so rare that the extra time spent doesn't
 372   // really matter....
 373   // Note: do not change the loop test i >= res + IndexSetStride
 374   // to i > res below, because i is unsigned and res may be zero.
 375   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 376        i -= IndexSetStride) {
 377     if (_indexedFreeList[i].head() != NULL) {
 378       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 379       return i;
 380     }
 381   }
 382   return res;
 383 }
 384 
 385 void LinearAllocBlock::print_on(outputStream* st) const {
 386   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 387             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 388             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 389 }
 390 
 391 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 392   st->print_cr("COMPACTIBLE FREELIST SPACE");
 393   st->print_cr(" Space:");
 394   Space::print_on(st);
 395 
 396   st->print_cr("promoInfo:");
 397   _promoInfo.print_on(st);
 398 
 399   st->print_cr("_smallLinearAllocBlock");
 400   _smallLinearAllocBlock.print_on(st);
 401 
 402   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 403 
 404   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 405 }
 406 
 407 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 408 const {
 409   reportIndexedFreeListStatistics(st);
 410   st->print_cr("Layout of Indexed Freelists");
 411   st->print_cr("---------------------------");
 412   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 413   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 414     _indexedFreeList[i].print_on(st);
 415     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 416       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 417                    p2i(fc), p2i((HeapWord*)fc + i),
 418                    fc->cantCoalesce() ? "\t CC" : "");
 419     }
 420   }
 421 }
 422 
 423 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 424 const {
 425   _promoInfo.print_on(st);
 426 }
 427 
 428 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 429 const {
 430   _dictionary->report_statistics(st);
 431   st->print_cr("Layout of Freelists in Tree");
 432   st->print_cr("---------------------------");
 433   _dictionary->print_free_lists(st);
 434 }
 435 
 436 class BlkPrintingClosure: public BlkClosure {
 437   const CMSCollector*             _collector;
 438   const CompactibleFreeListSpace* _sp;
 439   const CMSBitMap*                _live_bit_map;
 440   const bool                      _post_remark;
 441   outputStream*                   _st;
 442 public:
 443   BlkPrintingClosure(const CMSCollector* collector,
 444                      const CompactibleFreeListSpace* sp,
 445                      const CMSBitMap* live_bit_map,
 446                      outputStream* st):
 447     _collector(collector),
 448     _sp(sp),
 449     _live_bit_map(live_bit_map),
 450     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 451     _st(st) { }
 452   size_t do_blk(HeapWord* addr);
 453 };
 454 
 455 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 456   size_t sz = _sp->block_size_no_stall(addr, _collector);
 457   assert(sz != 0, "Should always be able to compute a size");
 458   if (_sp->block_is_obj(addr)) {
 459     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 460     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 461       p2i(addr),
 462       dead ? "dead" : "live",
 463       sz,
 464       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 465     if (CMSPrintObjectsInDump && !dead) {
 466       oop(addr)->print_on(_st);
 467       _st->print_cr("--------------------------------------");
 468     }
 469   } else { // free block
 470     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 471       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 472     if (CMSPrintChunksInDump) {
 473       ((FreeChunk*)addr)->print_on(_st);
 474       _st->print_cr("--------------------------------------");
 475     }
 476   }
 477   return sz;
 478 }
 479 
 480 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 481   st->print_cr("=========================");
 482   st->print_cr("Block layout in CMS Heap:");
 483   st->print_cr("=========================");
 484   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 485   blk_iterate(&bpcl);
 486 
 487   st->print_cr("=======================================");
 488   st->print_cr("Order & Layout of Promotion Info Blocks");
 489   st->print_cr("=======================================");
 490   print_promo_info_blocks(st);
 491 
 492   st->print_cr("===========================");
 493   st->print_cr("Order of Indexed Free Lists");
 494   st->print_cr("=========================");
 495   print_indexed_free_lists(st);
 496 
 497   st->print_cr("=================================");
 498   st->print_cr("Order of Free Lists in Dictionary");
 499   st->print_cr("=================================");
 500   print_dictionary_free_lists(st);
 501 }
 502 
 503 
 504 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 505   assert_lock_strong(&_freelistLock);
 506   Log(gc, freelist, stats) log;
 507   if (!log.is_debug()) {
 508     return;
 509   }
 510   log.debug("%s", title);
 511 
 512   LogStream out(log.debug());
 513   _dictionary->report_statistics(&out);
 514 
 515   if (log.is_trace()) {
 516     LogStream trace_out(log.trace());
 517     reportIndexedFreeListStatistics(&trace_out);
 518     size_t total_size = totalSizeInIndexedFreeLists() +
 519                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 520     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 521   }
 522 }
 523 
 524 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 525   assert_lock_strong(&_freelistLock);
 526   st->print_cr("Statistics for IndexedFreeLists:");
 527   st->print_cr("--------------------------------");
 528   size_t total_size = totalSizeInIndexedFreeLists();
 529   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 530   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 531   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 532   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 533   if (free_blocks != 0) {
 534     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 535   }
 536 }
 537 
 538 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 539   size_t res = 0;
 540   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 541     debug_only(
 542       ssize_t recount = 0;
 543       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 544          fc = fc->next()) {
 545         recount += 1;
 546       }
 547       assert(recount == _indexedFreeList[i].count(),
 548         "Incorrect count in list");
 549     )
 550     res += _indexedFreeList[i].count();
 551   }
 552   return res;
 553 }
 554 
 555 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 556   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 557     if (_indexedFreeList[i].head() != NULL) {
 558       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 559       return (size_t)i;
 560     }
 561   }
 562   return 0;
 563 }
 564 
 565 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 566   HeapWord* prevEnd = end();
 567   assert(prevEnd != value, "unnecessary set_end call");
 568   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 569         "New end is below unallocated block");
 570   _end = value;
 571   if (prevEnd != NULL) {
 572     // Resize the underlying block offset table.
 573     _bt.resize(pointer_delta(value, bottom()));
 574     if (value <= prevEnd) {
 575       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 576              "New end is below unallocated block");
 577     } else {
 578       // Now, take this new chunk and add it to the free blocks.
 579       // Note that the BOT has not yet been updated for this block.
 580       size_t newFcSize = pointer_delta(value, prevEnd);
 581       // Add the block to the free lists, if possible coalescing it
 582       // with the last free block, and update the BOT and census data.
 583       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 584     }
 585   }
 586 }
 587 
 588 class FreeListSpaceDCTOC : public FilteringDCTOC {
 589   CompactibleFreeListSpace* _cfls;
 590   CMSCollector* _collector;
 591   bool _parallel;
 592 protected:
 593   // Override.
 594 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 595   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 596                                        HeapWord* bottom, HeapWord* top, \
 597                                        ClosureType* cl);                \
 598       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 599                                        HeapWord* bottom, HeapWord* top, \
 600                                        ClosureType* cl);                \
 601     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 602                                        HeapWord* bottom, HeapWord* top, \
 603                                        ClosureType* cl)
 604   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 605   walk_mem_region_with_cl_DECL(FilteringClosure);
 606 
 607 public:
 608   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 609                      CMSCollector* collector,
 610                      ExtendedOopClosure* cl,
 611                      CardTableModRefBS::PrecisionStyle precision,
 612                      HeapWord* boundary,
 613                      bool parallel) :
 614     FilteringDCTOC(sp, cl, precision, boundary),
 615     _cfls(sp), _collector(collector), _parallel(parallel) {}
 616 };
 617 
 618 // We de-virtualize the block-related calls below, since we know that our
 619 // space is a CompactibleFreeListSpace.
 620 
 621 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 622 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 623                                                  HeapWord* bottom,              \
 624                                                  HeapWord* top,                 \
 625                                                  ClosureType* cl) {             \
 626    if (_parallel) {                                                             \
 627      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 628    } else {                                                                     \
 629      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 630    }                                                                            \
 631 }                                                                               \
 632 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 633                                                      HeapWord* bottom,          \
 634                                                      HeapWord* top,             \
 635                                                      ClosureType* cl) {         \
 636   /* Skip parts that are before "mr", in case "block_start" sent us             \
 637      back too far. */                                                           \
 638   HeapWord* mr_start = mr.start();                                              \
 639   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 640   HeapWord* next = bottom + bot_size;                                           \
 641   while (next < mr_start) {                                                     \
 642     bottom = next;                                                              \
 643     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 644     next = bottom + bot_size;                                                   \
 645   }                                                                             \
 646                                                                                 \
 647   while (bottom < top) {                                                        \
 648     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 649         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 650                     oop(bottom)) &&                                             \
 651         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 652       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 653       bottom += _cfls->adjustObjectSize(word_sz);                               \
 654     } else {                                                                    \
 655       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 656     }                                                                           \
 657   }                                                                             \
 658 }                                                                               \
 659 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 660                                                        HeapWord* bottom,        \
 661                                                        HeapWord* top,           \
 662                                                        ClosureType* cl) {       \
 663   /* Skip parts that are before "mr", in case "block_start" sent us             \
 664      back too far. */                                                           \
 665   HeapWord* mr_start = mr.start();                                              \
 666   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 667   HeapWord* next = bottom + bot_size;                                           \
 668   while (next < mr_start) {                                                     \
 669     bottom = next;                                                              \
 670     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 671     next = bottom + bot_size;                                                   \
 672   }                                                                             \
 673                                                                                 \
 674   while (bottom < top) {                                                        \
 675     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 676         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 677                     oop(bottom)) &&                                             \
 678         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 679       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 680       bottom += _cfls->adjustObjectSize(word_sz);                               \
 681     } else {                                                                    \
 682       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 683     }                                                                           \
 684   }                                                                             \
 685 }
 686 
 687 // (There are only two of these, rather than N, because the split is due
 688 // only to the introduction of the FilteringClosure, a local part of the
 689 // impl of this abstraction.)
 690 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 691 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 692 
 693 DirtyCardToOopClosure*
 694 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 695                                       CardTableModRefBS::PrecisionStyle precision,
 696                                       HeapWord* boundary,
 697                                       bool parallel) {
 698   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 699 }
 700 
 701 
 702 // Note on locking for the space iteration functions:
 703 // since the collector's iteration activities are concurrent with
 704 // allocation activities by mutators, absent a suitable mutual exclusion
 705 // mechanism the iterators may go awry. For instance a block being iterated
 706 // may suddenly be allocated or divided up and part of it allocated and
 707 // so on.
 708 
 709 // Apply the given closure to each block in the space.
 710 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 711   assert_lock_strong(freelistLock());
 712   HeapWord *cur, *limit;
 713   for (cur = bottom(), limit = end(); cur < limit;
 714        cur += cl->do_blk_careful(cur));
 715 }
 716 
 717 // Apply the given closure to each block in the space.
 718 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 719   assert_lock_strong(freelistLock());
 720   HeapWord *cur, *limit;
 721   for (cur = bottom(), limit = end(); cur < limit;
 722        cur += cl->do_blk(cur));
 723 }
 724 
 725 // Apply the given closure to each oop in the space.
 726 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 727   assert_lock_strong(freelistLock());
 728   HeapWord *cur, *limit;
 729   size_t curSize;
 730   for (cur = bottom(), limit = end(); cur < limit;
 731        cur += curSize) {
 732     curSize = block_size(cur);
 733     if (block_is_obj(cur)) {
 734       oop(cur)->oop_iterate(cl);
 735     }
 736   }
 737 }
 738 
 739 // NOTE: In the following methods, in order to safely be able to
 740 // apply the closure to an object, we need to be sure that the
 741 // object has been initialized. We are guaranteed that an object
 742 // is initialized if we are holding the Heap_lock with the
 743 // world stopped.
 744 void CompactibleFreeListSpace::verify_objects_initialized() const {
 745   if (is_init_completed()) {
 746     assert_locked_or_safepoint(Heap_lock);
 747     if (Universe::is_fully_initialized()) {
 748       guarantee(SafepointSynchronize::is_at_safepoint(),
 749                 "Required for objects to be initialized");
 750     }
 751   } // else make a concession at vm start-up
 752 }
 753 
 754 // Apply the given closure to each object in the space
 755 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 756   assert_lock_strong(freelistLock());
 757   NOT_PRODUCT(verify_objects_initialized());
 758   HeapWord *cur, *limit;
 759   size_t curSize;
 760   for (cur = bottom(), limit = end(); cur < limit;
 761        cur += curSize) {
 762     curSize = block_size(cur);
 763     if (block_is_obj(cur)) {
 764       blk->do_object(oop(cur));
 765     }
 766   }
 767 }
 768 
 769 // Apply the given closure to each live object in the space
 770 //   The usage of CompactibleFreeListSpace
 771 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 772 // objects in the space with references to objects that are no longer
 773 // valid.  For example, an object may reference another object
 774 // that has already been sweep up (collected).  This method uses
 775 // obj_is_alive() to determine whether it is safe to apply the closure to
 776 // an object.  See obj_is_alive() for details on how liveness of an
 777 // object is decided.
 778 
 779 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 780   assert_lock_strong(freelistLock());
 781   NOT_PRODUCT(verify_objects_initialized());
 782   HeapWord *cur, *limit;
 783   size_t curSize;
 784   for (cur = bottom(), limit = end(); cur < limit;
 785        cur += curSize) {
 786     curSize = block_size(cur);
 787     if (block_is_obj(cur) && obj_is_alive(cur)) {
 788       blk->do_object(oop(cur));
 789     }
 790   }
 791 }
 792 
 793 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 794                                                   UpwardsObjectClosure* cl) {
 795   assert_locked(freelistLock());
 796   NOT_PRODUCT(verify_objects_initialized());
 797   assert(!mr.is_empty(), "Should be non-empty");
 798   // We use MemRegion(bottom(), end()) rather than used_region() below
 799   // because the two are not necessarily equal for some kinds of
 800   // spaces, in particular, certain kinds of free list spaces.
 801   // We could use the more complicated but more precise:
 802   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
 803   // but the slight imprecision seems acceptable in the assertion check.
 804   assert(MemRegion(bottom(), end()).contains(mr),
 805          "Should be within used space");
 806   HeapWord* prev = cl->previous();   // max address from last time
 807   if (prev >= mr.end()) { // nothing to do
 808     return;
 809   }
 810   // This assert will not work when we go from cms space to perm
 811   // space, and use same closure. Easy fix deferred for later. XXX YSR
 812   // assert(prev == NULL || contains(prev), "Should be within space");
 813 
 814   bool last_was_obj_array = false;
 815   HeapWord *blk_start_addr, *region_start_addr;
 816   if (prev > mr.start()) {
 817     region_start_addr = prev;
 818     blk_start_addr    = prev;
 819     // The previous invocation may have pushed "prev" beyond the
 820     // last allocated block yet there may be still be blocks
 821     // in this region due to a particular coalescing policy.
 822     // Relax the assertion so that the case where the unallocated
 823     // block is maintained and "prev" is beyond the unallocated
 824     // block does not cause the assertion to fire.
 825     assert((BlockOffsetArrayUseUnallocatedBlock &&
 826             (!is_in(prev))) ||
 827            (blk_start_addr == block_start(region_start_addr)), "invariant");
 828   } else {
 829     region_start_addr = mr.start();
 830     blk_start_addr    = block_start(region_start_addr);
 831   }
 832   HeapWord* region_end_addr = mr.end();
 833   MemRegion derived_mr(region_start_addr, region_end_addr);
 834   while (blk_start_addr < region_end_addr) {
 835     const size_t size = block_size(blk_start_addr);
 836     if (block_is_obj(blk_start_addr)) {
 837       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 838     } else {
 839       last_was_obj_array = false;
 840     }
 841     blk_start_addr += size;
 842   }
 843   if (!last_was_obj_array) {
 844     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 845            "Should be within (closed) used space");
 846     assert(blk_start_addr > prev, "Invariant");
 847     cl->set_previous(blk_start_addr); // min address for next time
 848   }
 849 }
 850 
 851 // Callers of this iterator beware: The closure application should
 852 // be robust in the face of uninitialized objects and should (always)
 853 // return a correct size so that the next addr + size below gives us a
 854 // valid block boundary. [See for instance,
 855 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 856 // in ConcurrentMarkSweepGeneration.cpp.]
 857 HeapWord*
 858 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 859   ObjectClosureCareful* cl) {
 860   assert_lock_strong(freelistLock());
 861   // Can't use used_region() below because it may not necessarily
 862   // be the same as [bottom(),end()); although we could
 863   // use [used_region().start(),align_up(used_region().end(),CardSize)),
 864   // that appears too cumbersome, so we just do the simpler check
 865   // in the assertion below.
 866   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 867          "mr should be non-empty and within used space");
 868   HeapWord *addr, *end;
 869   size_t size;
 870   for (addr = block_start_careful(mr.start()), end  = mr.end();
 871        addr < end; addr += size) {
 872     FreeChunk* fc = (FreeChunk*)addr;
 873     if (fc->is_free()) {
 874       // Since we hold the free list lock, which protects direct
 875       // allocation in this generation by mutators, a free object
 876       // will remain free throughout this iteration code.
 877       size = fc->size();
 878     } else {
 879       // Note that the object need not necessarily be initialized,
 880       // because (for instance) the free list lock does NOT protect
 881       // object initialization. The closure application below must
 882       // therefore be correct in the face of uninitialized objects.
 883       size = cl->do_object_careful_m(oop(addr), mr);
 884       if (size == 0) {
 885         // An unparsable object found. Signal early termination.
 886         return addr;
 887       }
 888     }
 889   }
 890   return NULL;
 891 }
 892 
 893 
 894 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 895   NOT_PRODUCT(verify_objects_initialized());
 896   return _bt.block_start(p);
 897 }
 898 
 899 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 900   return _bt.block_start_careful(p);
 901 }
 902 
 903 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 904   NOT_PRODUCT(verify_objects_initialized());
 905   // This must be volatile, or else there is a danger that the compiler
 906   // will compile the code below into a sometimes-infinite loop, by keeping
 907   // the value read the first time in a register.
 908   while (true) {
 909     // We must do this until we get a consistent view of the object.
 910     if (FreeChunk::indicatesFreeChunk(p)) {
 911       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 912       size_t res = fc->size();
 913 
 914       // Bugfix for systems with weak memory model (PPC64/IA64). The
 915       // block's free bit was set and we have read the size of the
 916       // block. Acquire and check the free bit again. If the block is
 917       // still free, the read size is correct.
 918       OrderAccess::acquire();
 919 
 920       // If the object is still a free chunk, return the size, else it
 921       // has been allocated so try again.
 922       if (FreeChunk::indicatesFreeChunk(p)) {
 923         assert(res != 0, "Block size should not be 0");
 924         return res;
 925       }
 926     } else {
 927       // Ensure klass read before size.
 928       Klass* k = oop(p)->klass_or_null_acquire();
 929       if (k != NULL) {
 930         assert(k->is_klass(), "Should really be klass oop.");
 931         oop o = (oop)p;
 932         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 933 
 934         size_t res = o->size_given_klass(k);
 935         res = adjustObjectSize(res);
 936         assert(res != 0, "Block size should not be 0");
 937         return res;
 938       }
 939     }
 940   }
 941 }
 942 
 943 // TODO: Now that is_parsable is gone, we should combine these two functions.
 944 // A variant of the above that uses the Printezis bits for
 945 // unparsable but allocated objects. This avoids any possible
 946 // stalls waiting for mutators to initialize objects, and is
 947 // thus potentially faster than the variant above. However,
 948 // this variant may return a zero size for a block that is
 949 // under mutation and for which a consistent size cannot be
 950 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 951 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 952                                                      const CMSCollector* c)
 953 const {
 954   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 955   // This must be volatile, or else there is a danger that the compiler
 956   // will compile the code below into a sometimes-infinite loop, by keeping
 957   // the value read the first time in a register.
 958   DEBUG_ONLY(uint loops = 0;)
 959   while (true) {
 960     // We must do this until we get a consistent view of the object.
 961     if (FreeChunk::indicatesFreeChunk(p)) {
 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // free bit of the block was set and we have read the size of
 967       // the block. Acquire and check the free bit again. If the
 968       // block is still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       if (FreeChunk::indicatesFreeChunk(p)) {
 972         assert(res != 0, "Block size should not be 0");
 973         assert(loops == 0, "Should be 0");
 974         return res;
 975       }
 976     } else {
 977       // Ensure klass read before size.
 978       Klass* k = oop(p)->klass_or_null_acquire();
 979       if (k != NULL) {
 980         assert(k->is_klass(), "Should really be klass oop.");
 981         oop o = (oop)p;
 982         assert(o->is_oop(), "Should be an oop");
 983 
 984         size_t res = o->size_given_klass(k);
 985         res = adjustObjectSize(res);
 986         assert(res != 0, "Block size should not be 0");
 987         return res;
 988       } else {
 989         // May return 0 if P-bits not present.
 990         return c->block_size_if_printezis_bits(p);
 991       }
 992     }
 993     assert(loops == 0, "Can loop at most once");
 994     DEBUG_ONLY(loops++;)
 995   }
 996 }
 997 
 998 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 999   NOT_PRODUCT(verify_objects_initialized());
1000   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1001   FreeChunk* fc = (FreeChunk*)p;
1002   if (fc->is_free()) {
1003     return fc->size();
1004   } else {
1005     // Ignore mark word because this may be a recently promoted
1006     // object whose mark word is used to chain together grey
1007     // objects (the last one would have a null value).
1008     assert(oop(p)->is_oop(true), "Should be an oop");
1009     return adjustObjectSize(oop(p)->size());
1010   }
1011 }
1012 
1013 // This implementation assumes that the property of "being an object" is
1014 // stable.  But being a free chunk may not be (because of parallel
1015 // promotion.)
1016 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1017   FreeChunk* fc = (FreeChunk*)p;
1018   assert(is_in_reserved(p), "Should be in space");
1019   if (FreeChunk::indicatesFreeChunk(p)) return false;
1020   Klass* k = oop(p)->klass_or_null_acquire();
1021   if (k != NULL) {
1022     // Ignore mark word because it may have been used to
1023     // chain together promoted objects (the last one
1024     // would have a null value).
1025     assert(oop(p)->is_oop(true), "Should be an oop");
1026     return true;
1027   } else {
1028     return false;  // Was not an object at the start of collection.
1029   }
1030 }
1031 
1032 // Check if the object is alive. This fact is checked either by consulting
1033 // the main marking bitmap in the sweeping phase or, if it's a permanent
1034 // generation and we're not in the sweeping phase, by checking the
1035 // perm_gen_verify_bit_map where we store the "deadness" information if
1036 // we did not sweep the perm gen in the most recent previous GC cycle.
1037 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1038   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1039          "Else races are possible");
1040   assert(block_is_obj(p), "The address should point to an object");
1041 
1042   // If we're sweeping, we use object liveness information from the main bit map
1043   // for both perm gen and old gen.
1044   // We don't need to lock the bitmap (live_map or dead_map below), because
1045   // EITHER we are in the middle of the sweeping phase, and the
1046   // main marking bit map (live_map below) is locked,
1047   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1048   // is stable, because it's mutated only in the sweeping phase.
1049   // NOTE: This method is also used by jmap where, if class unloading is
1050   // off, the results can return "false" for legitimate perm objects,
1051   // when we are not in the midst of a sweeping phase, which can result
1052   // in jmap not reporting certain perm gen objects. This will be moot
1053   // if/when the perm gen goes away in the future.
1054   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1055     CMSBitMap* live_map = _collector->markBitMap();
1056     return live_map->par_isMarked((HeapWord*) p);
1057   }
1058   return true;
1059 }
1060 
1061 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1062   FreeChunk* fc = (FreeChunk*)p;
1063   assert(is_in_reserved(p), "Should be in space");
1064   assert(_bt.block_start(p) == p, "Should be a block boundary");
1065   if (!fc->is_free()) {
1066     // Ignore mark word because it may have been used to
1067     // chain together promoted objects (the last one
1068     // would have a null value).
1069     assert(oop(p)->is_oop(true), "Should be an oop");
1070     return true;
1071   }
1072   return false;
1073 }
1074 
1075 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1076 // approximate answer if you don't hold the freelistlock when you call this.
1077 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1078   size_t size = 0;
1079   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1080     debug_only(
1081       // We may be calling here without the lock in which case we
1082       // won't do this modest sanity check.
1083       if (freelistLock()->owned_by_self()) {
1084         size_t total_list_size = 0;
1085         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1086           fc = fc->next()) {
1087           total_list_size += i;
1088         }
1089         assert(total_list_size == i * _indexedFreeList[i].count(),
1090                "Count in list is incorrect");
1091       }
1092     )
1093     size += i * _indexedFreeList[i].count();
1094   }
1095   return size;
1096 }
1097 
1098 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1099   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1100   return allocate(size);
1101 }
1102 
1103 HeapWord*
1104 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1105   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1106 }
1107 
1108 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1109   assert_lock_strong(freelistLock());
1110   HeapWord* res = NULL;
1111   assert(size == adjustObjectSize(size),
1112          "use adjustObjectSize() before calling into allocate()");
1113 
1114   res = allocate_adaptive_freelists(size);
1115 
1116   if (res != NULL) {
1117     // check that res does lie in this space!
1118     assert(is_in_reserved(res), "Not in this space!");
1119     assert(is_aligned((void*)res), "alignment check");
1120 
1121     FreeChunk* fc = (FreeChunk*)res;
1122     fc->markNotFree();
1123     assert(!fc->is_free(), "shouldn't be marked free");
1124     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1125     // Verify that the block offset table shows this to
1126     // be a single block, but not one which is unallocated.
1127     _bt.verify_single_block(res, size);
1128     _bt.verify_not_unallocated(res, size);
1129     // mangle a just allocated object with a distinct pattern.
1130     debug_only(fc->mangleAllocated(size));
1131   }
1132 
1133   return res;
1134 }
1135 
1136 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1137   assert_lock_strong(freelistLock());
1138   HeapWord* res = NULL;
1139   assert(size == adjustObjectSize(size),
1140          "use adjustObjectSize() before calling into allocate()");
1141 
1142   // Strategy
1143   //   if small
1144   //     exact size from small object indexed list if small
1145   //     small or large linear allocation block (linAB) as appropriate
1146   //     take from lists of greater sized chunks
1147   //   else
1148   //     dictionary
1149   //     small or large linear allocation block if it has the space
1150   // Try allocating exact size from indexTable first
1151   if (size < IndexSetSize) {
1152     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1153     if(res != NULL) {
1154       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1155         "Not removed from free list");
1156       // no block offset table adjustment is necessary on blocks in
1157       // the indexed lists.
1158 
1159     // Try allocating from the small LinAB
1160     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1161         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1162         // if successful, the above also adjusts block offset table
1163         // Note that this call will refill the LinAB to
1164         // satisfy the request.  This is different that
1165         // evm.
1166         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1167     } else {
1168       // Raid the exact free lists larger than size, even if they are not
1169       // overpopulated.
1170       res = (HeapWord*) getChunkFromGreater(size);
1171     }
1172   } else {
1173     // Big objects get allocated directly from the dictionary.
1174     res = (HeapWord*) getChunkFromDictionaryExact(size);
1175     if (res == NULL) {
1176       // Try hard not to fail since an allocation failure will likely
1177       // trigger a synchronous GC.  Try to get the space from the
1178       // allocation blocks.
1179       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1180     }
1181   }
1182 
1183   return res;
1184 }
1185 
1186 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1187 // when promoting obj.
1188 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1189   // Depending on the object size, expansion may require refilling either a
1190   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1191   // is added because the dictionary may over-allocate to avoid fragmentation.
1192   size_t space = obj_size;
1193   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1194   return space;
1195 }
1196 
1197 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1198   FreeChunk* ret;
1199 
1200   assert(numWords >= MinChunkSize, "Size is less than minimum");
1201   assert(linearAllocationWouldFail() || bestFitFirst(),
1202     "Should not be here");
1203 
1204   size_t i;
1205   size_t currSize = numWords + MinChunkSize;
1206   assert(is_object_aligned(currSize), "currSize should be aligned");
1207   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1208     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1209     if (fl->head()) {
1210       ret = getFromListGreater(fl, numWords);
1211       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1212       return ret;
1213     }
1214   }
1215 
1216   currSize = MAX2((size_t)SmallForDictionary,
1217                   (size_t)(numWords + MinChunkSize));
1218 
1219   /* Try to get a chunk that satisfies request, while avoiding
1220      fragmentation that can't be handled. */
1221   {
1222     ret =  dictionary()->get_chunk(currSize);
1223     if (ret != NULL) {
1224       assert(ret->size() - numWords >= MinChunkSize,
1225              "Chunk is too small");
1226       _bt.allocated((HeapWord*)ret, ret->size());
1227       /* Carve returned chunk. */
1228       (void) splitChunkAndReturnRemainder(ret, numWords);
1229       /* Label this as no longer a free chunk. */
1230       assert(ret->is_free(), "This chunk should be free");
1231       ret->link_prev(NULL);
1232     }
1233     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1234     return ret;
1235   }
1236   ShouldNotReachHere();
1237 }
1238 
1239 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1240   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1241   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1242 }
1243 
1244 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1245   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1246          (_smallLinearAllocBlock._word_size == fc->size()),
1247          "Linear allocation block shows incorrect size");
1248   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1249           (_smallLinearAllocBlock._word_size == fc->size()));
1250 }
1251 
1252 // Check if the purported free chunk is present either as a linear
1253 // allocation block, the size-indexed table of (smaller) free blocks,
1254 // or the larger free blocks kept in the binary tree dictionary.
1255 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1256   if (verify_chunk_is_linear_alloc_block(fc)) {
1257     return true;
1258   } else if (fc->size() < IndexSetSize) {
1259     return verifyChunkInIndexedFreeLists(fc);
1260   } else {
1261     return dictionary()->verify_chunk_in_free_list(fc);
1262   }
1263 }
1264 
1265 #ifndef PRODUCT
1266 void CompactibleFreeListSpace::assert_locked() const {
1267   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1268 }
1269 
1270 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1271   CMSLockVerifier::assert_locked(lock);
1272 }
1273 #endif
1274 
1275 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1276   // In the parallel case, the main thread holds the free list lock
1277   // on behalf the parallel threads.
1278   FreeChunk* fc;
1279   {
1280     // If GC is parallel, this might be called by several threads.
1281     // This should be rare enough that the locking overhead won't affect
1282     // the sequential code.
1283     MutexLockerEx x(parDictionaryAllocLock(),
1284                     Mutex::_no_safepoint_check_flag);
1285     fc = getChunkFromDictionary(size);
1286   }
1287   if (fc != NULL) {
1288     fc->dontCoalesce();
1289     assert(fc->is_free(), "Should be free, but not coalescable");
1290     // Verify that the block offset table shows this to
1291     // be a single block, but not one which is unallocated.
1292     _bt.verify_single_block((HeapWord*)fc, fc->size());
1293     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1294   }
1295   return fc;
1296 }
1297 
1298 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1299   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1300   assert_locked();
1301 
1302   // if we are tracking promotions, then first ensure space for
1303   // promotion (including spooling space for saving header if necessary).
1304   // then allocate and copy, then track promoted info if needed.
1305   // When tracking (see PromotionInfo::track()), the mark word may
1306   // be displaced and in this case restoration of the mark word
1307   // occurs in the (oop_since_save_marks_)iterate phase.
1308   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1309     return NULL;
1310   }
1311   // Call the allocate(size_t, bool) form directly to avoid the
1312   // additional call through the allocate(size_t) form.  Having
1313   // the compile inline the call is problematic because allocate(size_t)
1314   // is a virtual method.
1315   HeapWord* res = allocate(adjustObjectSize(obj_size));
1316   if (res != NULL) {
1317     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1318     // if we should be tracking promotions, do so.
1319     if (_promoInfo.tracking()) {
1320         _promoInfo.track((PromotedObject*)res);
1321     }
1322   }
1323   return oop(res);
1324 }
1325 
1326 HeapWord*
1327 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1328   assert_locked();
1329   assert(size >= MinChunkSize, "minimum chunk size");
1330   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1331     "maximum from smallLinearAllocBlock");
1332   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1333 }
1334 
1335 HeapWord*
1336 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1337                                                        size_t size) {
1338   assert_locked();
1339   assert(size >= MinChunkSize, "too small");
1340   HeapWord* res = NULL;
1341   // Try to do linear allocation from blk, making sure that
1342   if (blk->_word_size == 0) {
1343     // We have probably been unable to fill this either in the prologue or
1344     // when it was exhausted at the last linear allocation. Bail out until
1345     // next time.
1346     assert(blk->_ptr == NULL, "consistency check");
1347     return NULL;
1348   }
1349   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1350   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1351   if (res != NULL) return res;
1352 
1353   // about to exhaust this linear allocation block
1354   if (blk->_word_size == size) { // exactly satisfied
1355     res = blk->_ptr;
1356     _bt.allocated(res, blk->_word_size);
1357   } else if (size + MinChunkSize <= blk->_refillSize) {
1358     size_t sz = blk->_word_size;
1359     // Update _unallocated_block if the size is such that chunk would be
1360     // returned to the indexed free list.  All other chunks in the indexed
1361     // free lists are allocated from the dictionary so that _unallocated_block
1362     // has already been adjusted for them.  Do it here so that the cost
1363     // for all chunks added back to the indexed free lists.
1364     if (sz < SmallForDictionary) {
1365       _bt.allocated(blk->_ptr, sz);
1366     }
1367     // Return the chunk that isn't big enough, and then refill below.
1368     addChunkToFreeLists(blk->_ptr, sz);
1369     split_birth(sz);
1370     // Don't keep statistics on adding back chunk from a LinAB.
1371   } else {
1372     // A refilled block would not satisfy the request.
1373     return NULL;
1374   }
1375 
1376   blk->_ptr = NULL; blk->_word_size = 0;
1377   refillLinearAllocBlock(blk);
1378   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1379          "block was replenished");
1380   if (res != NULL) {
1381     split_birth(size);
1382     repairLinearAllocBlock(blk);
1383   } else if (blk->_ptr != NULL) {
1384     res = blk->_ptr;
1385     size_t blk_size = blk->_word_size;
1386     blk->_word_size -= size;
1387     blk->_ptr  += size;
1388     split_birth(size);
1389     repairLinearAllocBlock(blk);
1390     // Update BOT last so that other (parallel) GC threads see a consistent
1391     // view of the BOT and free blocks.
1392     // Above must occur before BOT is updated below.
1393     OrderAccess::storestore();
1394     _bt.split_block(res, blk_size, size);  // adjust block offset table
1395   }
1396   return res;
1397 }
1398 
1399 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1400                                         LinearAllocBlock* blk,
1401                                         size_t size) {
1402   assert_locked();
1403   assert(size >= MinChunkSize, "too small");
1404 
1405   HeapWord* res = NULL;
1406   // This is the common case.  Keep it simple.
1407   if (blk->_word_size >= size + MinChunkSize) {
1408     assert(blk->_ptr != NULL, "consistency check");
1409     res = blk->_ptr;
1410     // Note that the BOT is up-to-date for the linAB before allocation.  It
1411     // indicates the start of the linAB.  The split_block() updates the
1412     // BOT for the linAB after the allocation (indicates the start of the
1413     // next chunk to be allocated).
1414     size_t blk_size = blk->_word_size;
1415     blk->_word_size -= size;
1416     blk->_ptr  += size;
1417     split_birth(size);
1418     repairLinearAllocBlock(blk);
1419     // Update BOT last so that other (parallel) GC threads see a consistent
1420     // view of the BOT and free blocks.
1421     // Above must occur before BOT is updated below.
1422     OrderAccess::storestore();
1423     _bt.split_block(res, blk_size, size);  // adjust block offset table
1424     _bt.allocated(res, size);
1425   }
1426   return res;
1427 }
1428 
1429 FreeChunk*
1430 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1431   assert_locked();
1432   assert(size < SmallForDictionary, "just checking");
1433   FreeChunk* res;
1434   res = _indexedFreeList[size].get_chunk_at_head();
1435   if (res == NULL) {
1436     res = getChunkFromIndexedFreeListHelper(size);
1437   }
1438   _bt.verify_not_unallocated((HeapWord*) res, size);
1439   assert(res == NULL || res->size() == size, "Incorrect block size");
1440   return res;
1441 }
1442 
1443 FreeChunk*
1444 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1445   bool replenish) {
1446   assert_locked();
1447   FreeChunk* fc = NULL;
1448   if (size < SmallForDictionary) {
1449     assert(_indexedFreeList[size].head() == NULL ||
1450       _indexedFreeList[size].surplus() <= 0,
1451       "List for this size should be empty or under populated");
1452     // Try best fit in exact lists before replenishing the list
1453     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1454       // Replenish list.
1455       //
1456       // Things tried that failed.
1457       //   Tried allocating out of the two LinAB's first before
1458       // replenishing lists.
1459       //   Tried small linAB of size 256 (size in indexed list)
1460       // and replenishing indexed lists from the small linAB.
1461       //
1462       FreeChunk* newFc = NULL;
1463       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1464       if (replenish_size < SmallForDictionary) {
1465         // Do not replenish from an underpopulated size.
1466         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1467             _indexedFreeList[replenish_size].head() != NULL) {
1468           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1469         } else if (bestFitFirst()) {
1470           newFc = bestFitSmall(replenish_size);
1471         }
1472       }
1473       if (newFc == NULL && replenish_size > size) {
1474         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1475         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1476       }
1477       // Note: The stats update re split-death of block obtained above
1478       // will be recorded below precisely when we know we are going to
1479       // be actually splitting it into more than one pieces below.
1480       if (newFc != NULL) {
1481         if  (replenish || CMSReplenishIntermediate) {
1482           // Replenish this list and return one block to caller.
1483           size_t i;
1484           FreeChunk *curFc, *nextFc;
1485           size_t num_blk = newFc->size() / size;
1486           assert(num_blk >= 1, "Smaller than requested?");
1487           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1488           if (num_blk > 1) {
1489             // we are sure we will be splitting the block just obtained
1490             // into multiple pieces; record the split-death of the original
1491             splitDeath(replenish_size);
1492           }
1493           // carve up and link blocks 0, ..., num_blk - 2
1494           // The last chunk is not added to the lists but is returned as the
1495           // free chunk.
1496           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1497                i = 0;
1498                i < (num_blk - 1);
1499                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1500                i++) {
1501             curFc->set_size(size);
1502             // Don't record this as a return in order to try and
1503             // determine the "returns" from a GC.
1504             _bt.verify_not_unallocated((HeapWord*) fc, size);
1505             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1506             _bt.mark_block((HeapWord*)curFc, size);
1507             split_birth(size);
1508             // Don't record the initial population of the indexed list
1509             // as a split birth.
1510           }
1511 
1512           // check that the arithmetic was OK above
1513           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1514             "inconsistency in carving newFc");
1515           curFc->set_size(size);
1516           _bt.mark_block((HeapWord*)curFc, size);
1517           split_birth(size);
1518           fc = curFc;
1519         } else {
1520           // Return entire block to caller
1521           fc = newFc;
1522         }
1523       }
1524     }
1525   } else {
1526     // Get a free chunk from the free chunk dictionary to be returned to
1527     // replenish the indexed free list.
1528     fc = getChunkFromDictionaryExact(size);
1529   }
1530   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1531   return fc;
1532 }
1533 
1534 FreeChunk*
1535 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1536   assert_locked();
1537   FreeChunk* fc = _dictionary->get_chunk(size);
1538   if (fc == NULL) {
1539     return NULL;
1540   }
1541   _bt.allocated((HeapWord*)fc, fc->size());
1542   if (fc->size() >= size + MinChunkSize) {
1543     fc = splitChunkAndReturnRemainder(fc, size);
1544   }
1545   assert(fc->size() >= size, "chunk too small");
1546   assert(fc->size() < size + MinChunkSize, "chunk too big");
1547   _bt.verify_single_block((HeapWord*)fc, fc->size());
1548   return fc;
1549 }
1550 
1551 FreeChunk*
1552 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1553   assert_locked();
1554   FreeChunk* fc = _dictionary->get_chunk(size);
1555   if (fc == NULL) {
1556     return fc;
1557   }
1558   _bt.allocated((HeapWord*)fc, fc->size());
1559   if (fc->size() == size) {
1560     _bt.verify_single_block((HeapWord*)fc, size);
1561     return fc;
1562   }
1563   assert(fc->size() > size, "get_chunk() guarantee");
1564   if (fc->size() < size + MinChunkSize) {
1565     // Return the chunk to the dictionary and go get a bigger one.
1566     returnChunkToDictionary(fc);
1567     fc = _dictionary->get_chunk(size + MinChunkSize);
1568     if (fc == NULL) {
1569       return NULL;
1570     }
1571     _bt.allocated((HeapWord*)fc, fc->size());
1572   }
1573   assert(fc->size() >= size + MinChunkSize, "tautology");
1574   fc = splitChunkAndReturnRemainder(fc, size);
1575   assert(fc->size() == size, "chunk is wrong size");
1576   _bt.verify_single_block((HeapWord*)fc, size);
1577   return fc;
1578 }
1579 
1580 void
1581 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1582   assert_locked();
1583 
1584   size_t size = chunk->size();
1585   _bt.verify_single_block((HeapWord*)chunk, size);
1586   // adjust _unallocated_block downward, as necessary
1587   _bt.freed((HeapWord*)chunk, size);
1588   _dictionary->return_chunk(chunk);
1589 #ifndef PRODUCT
1590   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1591     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1592     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1593     tl->verify_stats();
1594   }
1595 #endif // PRODUCT
1596 }
1597 
1598 void
1599 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1600   assert_locked();
1601   size_t size = fc->size();
1602   _bt.verify_single_block((HeapWord*) fc, size);
1603   _bt.verify_not_unallocated((HeapWord*) fc, size);
1604   _indexedFreeList[size].return_chunk_at_tail(fc);
1605 #ifndef PRODUCT
1606   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1607      _indexedFreeList[size].verify_stats();
1608   }
1609 #endif // PRODUCT
1610 }
1611 
1612 // Add chunk to end of last block -- if it's the largest
1613 // block -- and update BOT and census data. We would
1614 // of course have preferred to coalesce it with the
1615 // last block, but it's currently less expensive to find the
1616 // largest block than it is to find the last.
1617 void
1618 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1619   HeapWord* chunk, size_t     size) {
1620   // check that the chunk does lie in this space!
1621   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1622   // One of the parallel gc task threads may be here
1623   // whilst others are allocating.
1624   Mutex* lock = &_parDictionaryAllocLock;
1625   FreeChunk* ec;
1626   {
1627     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1628     ec = dictionary()->find_largest_dict();  // get largest block
1629     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1630       // It's a coterminal block - we can coalesce.
1631       size_t old_size = ec->size();
1632       coalDeath(old_size);
1633       removeChunkFromDictionary(ec);
1634       size += old_size;
1635     } else {
1636       ec = (FreeChunk*)chunk;
1637     }
1638   }
1639   ec->set_size(size);
1640   debug_only(ec->mangleFreed(size));
1641   if (size < SmallForDictionary) {
1642     lock = _indexedFreeListParLocks[size];
1643   }
1644   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1645   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1646   // record the birth under the lock since the recording involves
1647   // manipulation of the list on which the chunk lives and
1648   // if the chunk is allocated and is the last on the list,
1649   // the list can go away.
1650   coalBirth(size);
1651 }
1652 
1653 void
1654 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1655                                               size_t     size) {
1656   // check that the chunk does lie in this space!
1657   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1658   assert_locked();
1659   _bt.verify_single_block(chunk, size);
1660 
1661   FreeChunk* fc = (FreeChunk*) chunk;
1662   fc->set_size(size);
1663   debug_only(fc->mangleFreed(size));
1664   if (size < SmallForDictionary) {
1665     returnChunkToFreeList(fc);
1666   } else {
1667     returnChunkToDictionary(fc);
1668   }
1669 }
1670 
1671 void
1672 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1673   size_t size, bool coalesced) {
1674   assert_locked();
1675   assert(chunk != NULL, "null chunk");
1676   if (coalesced) {
1677     // repair BOT
1678     _bt.single_block(chunk, size);
1679   }
1680   addChunkToFreeLists(chunk, size);
1681 }
1682 
1683 // We _must_ find the purported chunk on our free lists;
1684 // we assert if we don't.
1685 void
1686 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1687   size_t size = fc->size();
1688   assert_locked();
1689   debug_only(verifyFreeLists());
1690   if (size < SmallForDictionary) {
1691     removeChunkFromIndexedFreeList(fc);
1692   } else {
1693     removeChunkFromDictionary(fc);
1694   }
1695   _bt.verify_single_block((HeapWord*)fc, size);
1696   debug_only(verifyFreeLists());
1697 }
1698 
1699 void
1700 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1701   size_t size = fc->size();
1702   assert_locked();
1703   assert(fc != NULL, "null chunk");
1704   _bt.verify_single_block((HeapWord*)fc, size);
1705   _dictionary->remove_chunk(fc);
1706   // adjust _unallocated_block upward, as necessary
1707   _bt.allocated((HeapWord*)fc, size);
1708 }
1709 
1710 void
1711 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1712   assert_locked();
1713   size_t size = fc->size();
1714   _bt.verify_single_block((HeapWord*)fc, size);
1715   NOT_PRODUCT(
1716     if (FLSVerifyIndexTable) {
1717       verifyIndexedFreeList(size);
1718     }
1719   )
1720   _indexedFreeList[size].remove_chunk(fc);
1721   NOT_PRODUCT(
1722     if (FLSVerifyIndexTable) {
1723       verifyIndexedFreeList(size);
1724     }
1725   )
1726 }
1727 
1728 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1729   /* A hint is the next larger size that has a surplus.
1730      Start search at a size large enough to guarantee that
1731      the excess is >= MIN_CHUNK. */
1732   size_t start = align_object_size(numWords + MinChunkSize);
1733   if (start < IndexSetSize) {
1734     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1735     size_t    hint = _indexedFreeList[start].hint();
1736     while (hint < IndexSetSize) {
1737       assert(is_object_aligned(hint), "hint should be aligned");
1738       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1739       if (fl->surplus() > 0 && fl->head() != NULL) {
1740         // Found a list with surplus, reset original hint
1741         // and split out a free chunk which is returned.
1742         _indexedFreeList[start].set_hint(hint);
1743         FreeChunk* res = getFromListGreater(fl, numWords);
1744         assert(res == NULL || res->is_free(),
1745           "Should be returning a free chunk");
1746         return res;
1747       }
1748       hint = fl->hint(); /* keep looking */
1749     }
1750     /* None found. */
1751     it[start].set_hint(IndexSetSize);
1752   }
1753   return NULL;
1754 }
1755 
1756 /* Requires fl->size >= numWords + MinChunkSize */
1757 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1758   size_t numWords) {
1759   FreeChunk *curr = fl->head();
1760   size_t oldNumWords = curr->size();
1761   assert(numWords >= MinChunkSize, "Word size is too small");
1762   assert(curr != NULL, "List is empty");
1763   assert(oldNumWords >= numWords + MinChunkSize,
1764         "Size of chunks in the list is too small");
1765 
1766   fl->remove_chunk(curr);
1767   // recorded indirectly by splitChunkAndReturnRemainder -
1768   // smallSplit(oldNumWords, numWords);
1769   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1770   // Does anything have to be done for the remainder in terms of
1771   // fixing the card table?
1772   assert(new_chunk == NULL || new_chunk->is_free(),
1773     "Should be returning a free chunk");
1774   return new_chunk;
1775 }
1776 
1777 FreeChunk*
1778 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1779   size_t new_size) {
1780   assert_locked();
1781   size_t size = chunk->size();
1782   assert(size > new_size, "Split from a smaller block?");
1783   assert(is_aligned(chunk), "alignment problem");
1784   assert(size == adjustObjectSize(size), "alignment problem");
1785   size_t rem_sz = size - new_size;
1786   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1787   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1788   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1789   assert(is_aligned(ffc), "alignment problem");
1790   ffc->set_size(rem_sz);
1791   ffc->link_next(NULL);
1792   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1793   // Above must occur before BOT is updated below.
1794   // adjust block offset table
1795   OrderAccess::storestore();
1796   assert(chunk->is_free() && ffc->is_free(), "Error");
1797   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1798   if (rem_sz < SmallForDictionary) {
1799     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1800     bool is_par = Thread::current()->is_GC_task_thread();
1801     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1802     returnChunkToFreeList(ffc);
1803     split(size, rem_sz);
1804     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1805   } else {
1806     returnChunkToDictionary(ffc);
1807     split(size, rem_sz);
1808   }
1809   chunk->set_size(new_size);
1810   return chunk;
1811 }
1812 
1813 void
1814 CompactibleFreeListSpace::sweep_completed() {
1815   // Now that space is probably plentiful, refill linear
1816   // allocation blocks as needed.
1817   refillLinearAllocBlocksIfNeeded();
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::gc_prologue() {
1822   assert_locked();
1823   reportFreeListStatistics("Before GC:");
1824   refillLinearAllocBlocksIfNeeded();
1825 }
1826 
1827 void
1828 CompactibleFreeListSpace::gc_epilogue() {
1829   assert_locked();
1830   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1831   _promoInfo.stopTrackingPromotions();
1832   repairLinearAllocationBlocks();
1833   reportFreeListStatistics("After GC:");
1834 }
1835 
1836 // Iteration support, mostly delegated from a CMS generation
1837 
1838 void CompactibleFreeListSpace::save_marks() {
1839   assert(Thread::current()->is_VM_thread(),
1840          "Global variable should only be set when single-threaded");
1841   // Mark the "end" of the used space at the time of this call;
1842   // note, however, that promoted objects from this point
1843   // on are tracked in the _promoInfo below.
1844   set_saved_mark_word(unallocated_block());
1845 #ifdef ASSERT
1846   // Check the sanity of save_marks() etc.
1847   MemRegion ur    = used_region();
1848   MemRegion urasm = used_region_at_save_marks();
1849   assert(ur.contains(urasm),
1850          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1851          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1852          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1853 #endif
1854   // inform allocator that promotions should be tracked.
1855   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1856   _promoInfo.startTrackingPromotions();
1857 }
1858 
1859 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1860   assert(_promoInfo.tracking(), "No preceding save_marks?");
1861   return _promoInfo.noPromotions();
1862 }
1863 
1864 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1865                                                                             \
1866 void CompactibleFreeListSpace::                                             \
1867 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1868   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1869   /*                                                                        \
1870    * This also restores any displaced headers and removes the elements from \
1871    * the iteration set as they are processed, so that we have a clean slate \
1872    * at the end of the iteration. Note, thus, that if new objects are       \
1873    * promoted as a result of the iteration they are iterated over as well.  \
1874    */                                                                       \
1875   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1876 }
1877 
1878 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1879 
1880 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1881   return _smallLinearAllocBlock._word_size == 0;
1882 }
1883 
1884 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1885   // Fix up linear allocation blocks to look like free blocks
1886   repairLinearAllocBlock(&_smallLinearAllocBlock);
1887 }
1888 
1889 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1890   assert_locked();
1891   if (blk->_ptr != NULL) {
1892     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1893            "Minimum block size requirement");
1894     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1895     fc->set_size(blk->_word_size);
1896     fc->link_prev(NULL);   // mark as free
1897     fc->dontCoalesce();
1898     assert(fc->is_free(), "just marked it free");
1899     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1900   }
1901 }
1902 
1903 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1904   assert_locked();
1905   if (_smallLinearAllocBlock._ptr == NULL) {
1906     assert(_smallLinearAllocBlock._word_size == 0,
1907       "Size of linAB should be zero if the ptr is NULL");
1908     // Reset the linAB refill and allocation size limit.
1909     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1910   }
1911   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1912 }
1913 
1914 void
1915 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1916   assert_locked();
1917   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1918          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1919          "blk invariant");
1920   if (blk->_ptr == NULL) {
1921     refillLinearAllocBlock(blk);
1922   }
1923 }
1924 
1925 void
1926 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1927   assert_locked();
1928   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1929          "linear allocation block should be empty");
1930   FreeChunk* fc;
1931   if (blk->_refillSize < SmallForDictionary &&
1932       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1933     // A linAB's strategy might be to use small sizes to reduce
1934     // fragmentation but still get the benefits of allocation from a
1935     // linAB.
1936   } else {
1937     fc = getChunkFromDictionary(blk->_refillSize);
1938   }
1939   if (fc != NULL) {
1940     blk->_ptr  = (HeapWord*)fc;
1941     blk->_word_size = fc->size();
1942     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1943   }
1944 }
1945 
1946 // Support for compaction
1947 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1948   scan_and_forward(this, cp);
1949   // Prepare_for_compaction() uses the space between live objects
1950   // so that later phase can skip dead space quickly.  So verification
1951   // of the free lists doesn't work after.
1952 }
1953 
1954 void CompactibleFreeListSpace::adjust_pointers() {
1955   // In other versions of adjust_pointers(), a bail out
1956   // based on the amount of live data in the generation
1957   // (i.e., if 0, bail out) may be used.
1958   // Cannot test used() == 0 here because the free lists have already
1959   // been mangled by the compaction.
1960 
1961   scan_and_adjust_pointers(this);
1962   // See note about verification in prepare_for_compaction().
1963 }
1964 
1965 void CompactibleFreeListSpace::compact() {
1966   scan_and_compact(this);
1967 }
1968 
1969 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1970 // where fbs is free block sizes
1971 double CompactibleFreeListSpace::flsFrag() const {
1972   size_t itabFree = totalSizeInIndexedFreeLists();
1973   double frag = 0.0;
1974   size_t i;
1975 
1976   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1977     double sz  = i;
1978     frag      += _indexedFreeList[i].count() * (sz * sz);
1979   }
1980 
1981   double totFree = itabFree +
1982                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1983   if (totFree > 0) {
1984     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1985             (totFree * totFree));
1986     frag = (double)1.0  - frag;
1987   } else {
1988     assert(frag == 0.0, "Follows from totFree == 0");
1989   }
1990   return frag;
1991 }
1992 
1993 void CompactibleFreeListSpace::beginSweepFLCensus(
1994   float inter_sweep_current,
1995   float inter_sweep_estimate,
1996   float intra_sweep_estimate) {
1997   assert_locked();
1998   size_t i;
1999   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2000     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2001     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2002     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2003     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2004     fl->set_before_sweep(fl->count());
2005     fl->set_bfr_surp(fl->surplus());
2006   }
2007   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2008                                     inter_sweep_current,
2009                                     inter_sweep_estimate,
2010                                     intra_sweep_estimate);
2011 }
2012 
2013 void CompactibleFreeListSpace::setFLSurplus() {
2014   assert_locked();
2015   size_t i;
2016   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2017     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2018     fl->set_surplus(fl->count() -
2019                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2020   }
2021 }
2022 
2023 void CompactibleFreeListSpace::setFLHints() {
2024   assert_locked();
2025   size_t i;
2026   size_t h = IndexSetSize;
2027   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2028     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2029     fl->set_hint(h);
2030     if (fl->surplus() > 0) {
2031       h = i;
2032     }
2033   }
2034 }
2035 
2036 void CompactibleFreeListSpace::clearFLCensus() {
2037   assert_locked();
2038   size_t i;
2039   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2040     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2041     fl->set_prev_sweep(fl->count());
2042     fl->set_coal_births(0);
2043     fl->set_coal_deaths(0);
2044     fl->set_split_births(0);
2045     fl->set_split_deaths(0);
2046   }
2047 }
2048 
2049 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2050   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2051   setFLSurplus();
2052   setFLHints();
2053   printFLCensus(sweep_count);
2054   clearFLCensus();
2055   assert_locked();
2056   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2057 }
2058 
2059 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2060   if (size < SmallForDictionary) {
2061     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2062     return (fl->coal_desired() < 0) ||
2063            ((int)fl->count() > fl->coal_desired());
2064   } else {
2065     return dictionary()->coal_dict_over_populated(size);
2066   }
2067 }
2068 
2069 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2070   assert(size < SmallForDictionary, "Size too large for indexed list");
2071   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2072   fl->increment_coal_births();
2073   fl->increment_surplus();
2074 }
2075 
2076 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2077   assert(size < SmallForDictionary, "Size too large for indexed list");
2078   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2079   fl->increment_coal_deaths();
2080   fl->decrement_surplus();
2081 }
2082 
2083 void CompactibleFreeListSpace::coalBirth(size_t size) {
2084   if (size  < SmallForDictionary) {
2085     smallCoalBirth(size);
2086   } else {
2087     dictionary()->dict_census_update(size,
2088                                    false /* split */,
2089                                    true /* birth */);
2090   }
2091 }
2092 
2093 void CompactibleFreeListSpace::coalDeath(size_t size) {
2094   if(size  < SmallForDictionary) {
2095     smallCoalDeath(size);
2096   } else {
2097     dictionary()->dict_census_update(size,
2098                                    false /* split */,
2099                                    false /* birth */);
2100   }
2101 }
2102 
2103 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2104   assert(size < SmallForDictionary, "Size too large for indexed list");
2105   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2106   fl->increment_split_births();
2107   fl->increment_surplus();
2108 }
2109 
2110 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2111   assert(size < SmallForDictionary, "Size too large for indexed list");
2112   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2113   fl->increment_split_deaths();
2114   fl->decrement_surplus();
2115 }
2116 
2117 void CompactibleFreeListSpace::split_birth(size_t size) {
2118   if (size  < SmallForDictionary) {
2119     smallSplitBirth(size);
2120   } else {
2121     dictionary()->dict_census_update(size,
2122                                    true /* split */,
2123                                    true /* birth */);
2124   }
2125 }
2126 
2127 void CompactibleFreeListSpace::splitDeath(size_t size) {
2128   if (size  < SmallForDictionary) {
2129     smallSplitDeath(size);
2130   } else {
2131     dictionary()->dict_census_update(size,
2132                                    true /* split */,
2133                                    false /* birth */);
2134   }
2135 }
2136 
2137 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2138   size_t to2 = from - to1;
2139   splitDeath(from);
2140   split_birth(to1);
2141   split_birth(to2);
2142 }
2143 
2144 void CompactibleFreeListSpace::print() const {
2145   print_on(tty);
2146 }
2147 
2148 void CompactibleFreeListSpace::prepare_for_verify() {
2149   assert_locked();
2150   repairLinearAllocationBlocks();
2151   // Verify that the SpoolBlocks look like free blocks of
2152   // appropriate sizes... To be done ...
2153 }
2154 
2155 class VerifyAllBlksClosure: public BlkClosure {
2156  private:
2157   const CompactibleFreeListSpace* _sp;
2158   const MemRegion                 _span;
2159   HeapWord*                       _last_addr;
2160   size_t                          _last_size;
2161   bool                            _last_was_obj;
2162   bool                            _last_was_live;
2163 
2164  public:
2165   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2166     MemRegion span) :  _sp(sp), _span(span),
2167                        _last_addr(NULL), _last_size(0),
2168                        _last_was_obj(false), _last_was_live(false) { }
2169 
2170   virtual size_t do_blk(HeapWord* addr) {
2171     size_t res;
2172     bool   was_obj  = false;
2173     bool   was_live = false;
2174     if (_sp->block_is_obj(addr)) {
2175       was_obj = true;
2176       oop p = oop(addr);
2177       guarantee(p->is_oop(), "Should be an oop");
2178       res = _sp->adjustObjectSize(p->size());
2179       if (_sp->obj_is_alive(addr)) {
2180         was_live = true;
2181         p->verify();
2182       }
2183     } else {
2184       FreeChunk* fc = (FreeChunk*)addr;
2185       res = fc->size();
2186       if (FLSVerifyLists && !fc->cantCoalesce()) {
2187         guarantee(_sp->verify_chunk_in_free_list(fc),
2188                   "Chunk should be on a free list");
2189       }
2190     }
2191     if (res == 0) {
2192       Log(gc, verify) log;
2193       log.error("Livelock: no rank reduction!");
2194       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2195                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2196         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2197         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2198       LogStream ls(log.error());
2199       _sp->print_on(&ls);
2200       guarantee(false, "Verification failed.");
2201     }
2202     _last_addr = addr;
2203     _last_size = res;
2204     _last_was_obj  = was_obj;
2205     _last_was_live = was_live;
2206     return res;
2207   }
2208 };
2209 
2210 class VerifyAllOopsClosure: public OopClosure {
2211  private:
2212   const CMSCollector*             _collector;
2213   const CompactibleFreeListSpace* _sp;
2214   const MemRegion                 _span;
2215   const bool                      _past_remark;
2216   const CMSBitMap*                _bit_map;
2217 
2218  protected:
2219   void do_oop(void* p, oop obj) {
2220     if (_span.contains(obj)) { // the interior oop points into CMS heap
2221       if (!_span.contains(p)) { // reference from outside CMS heap
2222         // Should be a valid object; the first disjunct below allows
2223         // us to sidestep an assertion in block_is_obj() that insists
2224         // that p be in _sp. Note that several generations (and spaces)
2225         // are spanned by _span (CMS heap) above.
2226         guarantee(!_sp->is_in_reserved(obj) ||
2227                   _sp->block_is_obj((HeapWord*)obj),
2228                   "Should be an object");
2229         guarantee(obj->is_oop(), "Should be an oop");
2230         obj->verify();
2231         if (_past_remark) {
2232           // Remark has been completed, the object should be marked
2233           _bit_map->isMarked((HeapWord*)obj);
2234         }
2235       } else { // reference within CMS heap
2236         if (_past_remark) {
2237           // Remark has been completed -- so the referent should have
2238           // been marked, if referring object is.
2239           if (_bit_map->isMarked(_collector->block_start(p))) {
2240             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2241           }
2242         }
2243       }
2244     } else if (_sp->is_in_reserved(p)) {
2245       // the reference is from FLS, and points out of FLS
2246       guarantee(obj->is_oop(), "Should be an oop");
2247       obj->verify();
2248     }
2249   }
2250 
2251   template <class T> void do_oop_work(T* p) {
2252     T heap_oop = oopDesc::load_heap_oop(p);
2253     if (!oopDesc::is_null(heap_oop)) {
2254       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2255       do_oop(p, obj);
2256     }
2257   }
2258 
2259  public:
2260   VerifyAllOopsClosure(const CMSCollector* collector,
2261     const CompactibleFreeListSpace* sp, MemRegion span,
2262     bool past_remark, CMSBitMap* bit_map) :
2263     _collector(collector), _sp(sp), _span(span),
2264     _past_remark(past_remark), _bit_map(bit_map) { }
2265 
2266   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2267   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2268 };
2269 
2270 void CompactibleFreeListSpace::verify() const {
2271   assert_lock_strong(&_freelistLock);
2272   verify_objects_initialized();
2273   MemRegion span = _collector->_span;
2274   bool past_remark = (_collector->abstract_state() ==
2275                       CMSCollector::Sweeping);
2276 
2277   ResourceMark rm;
2278   HandleMark  hm;
2279 
2280   // Check integrity of CFL data structures
2281   _promoInfo.verify();
2282   _dictionary->verify();
2283   if (FLSVerifyIndexTable) {
2284     verifyIndexedFreeLists();
2285   }
2286   // Check integrity of all objects and free blocks in space
2287   {
2288     VerifyAllBlksClosure cl(this, span);
2289     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2290   }
2291   // Check that all references in the heap to FLS
2292   // are to valid objects in FLS or that references in
2293   // FLS are to valid objects elsewhere in the heap
2294   if (FLSVerifyAllHeapReferences)
2295   {
2296     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2297       _collector->markBitMap());
2298 
2299     // Iterate over all oops in the heap. Uses the _no_header version
2300     // since we are not interested in following the klass pointers.
2301     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2302   }
2303 
2304   if (VerifyObjectStartArray) {
2305     // Verify the block offset table
2306     _bt.verify();
2307   }
2308 }
2309 
2310 #ifndef PRODUCT
2311 void CompactibleFreeListSpace::verifyFreeLists() const {
2312   if (FLSVerifyLists) {
2313     _dictionary->verify();
2314     verifyIndexedFreeLists();
2315   } else {
2316     if (FLSVerifyDictionary) {
2317       _dictionary->verify();
2318     }
2319     if (FLSVerifyIndexTable) {
2320       verifyIndexedFreeLists();
2321     }
2322   }
2323 }
2324 #endif
2325 
2326 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2327   size_t i = 0;
2328   for (; i < IndexSetStart; i++) {
2329     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2330   }
2331   for (; i < IndexSetSize; i++) {
2332     verifyIndexedFreeList(i);
2333   }
2334 }
2335 
2336 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2337   FreeChunk* fc   =  _indexedFreeList[size].head();
2338   FreeChunk* tail =  _indexedFreeList[size].tail();
2339   size_t    num = _indexedFreeList[size].count();
2340   size_t      n = 0;
2341   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2342             "Slot should have been empty");
2343   for (; fc != NULL; fc = fc->next(), n++) {
2344     guarantee(fc->size() == size, "Size inconsistency");
2345     guarantee(fc->is_free(), "!free?");
2346     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2347     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2348   }
2349   guarantee(n == num, "Incorrect count");
2350 }
2351 
2352 #ifndef PRODUCT
2353 void CompactibleFreeListSpace::check_free_list_consistency() const {
2354   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2355     "Some sizes can't be allocated without recourse to"
2356     " linear allocation buffers");
2357   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2358     "else MIN_TREE_CHUNK_SIZE is wrong");
2359   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2360   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2361 }
2362 #endif
2363 
2364 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2365   assert_lock_strong(&_freelistLock);
2366   LogTarget(Debug, gc, freelist, census) log;
2367   if (!log.is_enabled()) {
2368     return;
2369   }
2370   AdaptiveFreeList<FreeChunk> total;
2371   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2372   ResourceMark rm;
2373   LogStream ls(log);
2374   outputStream* out = &ls;
2375   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2376   size_t total_free = 0;
2377   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2378     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2379     total_free += fl->count() * fl->size();
2380     if (i % (40*IndexSetStride) == 0) {
2381       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2382     }
2383     fl->print_on(out);
2384     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2385     total.set_surplus(    total.surplus()     + fl->surplus()    );
2386     total.set_desired(    total.desired()     + fl->desired()    );
2387     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2388     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2389     total.set_count(      total.count()       + fl->count()      );
2390     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2391     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2392     total.set_split_births(total.split_births() + fl->split_births());
2393     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2394   }
2395   total.print_on(out, "TOTAL");
2396   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2397   log.print("growth: %8.5f  deficit: %8.5f",
2398             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2399                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2400             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2401   _dictionary->print_dict_census(out);
2402 }
2403 
2404 ///////////////////////////////////////////////////////////////////////////
2405 // CompactibleFreeListSpaceLAB
2406 ///////////////////////////////////////////////////////////////////////////
2407 
2408 #define VECTOR_257(x)                                                                                  \
2409   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2410   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2411      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2412      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2413      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2417      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2418      x }
2419 
2420 // Initialize with default setting for CMS, _not_
2421 // generic OldPLABSize, whose static default is different; if overridden at the
2422 // command-line, this will get reinitialized via a call to
2423 // modify_initialization() below.
2424 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2425   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2426 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2427 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2428 
2429 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2430   _cfls(cfls)
2431 {
2432   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2433   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2434        i < CompactibleFreeListSpace::IndexSetSize;
2435        i += CompactibleFreeListSpace::IndexSetStride) {
2436     _indexedFreeList[i].set_size(i);
2437     _num_blocks[i] = 0;
2438   }
2439 }
2440 
2441 static bool _CFLS_LAB_modified = false;
2442 
2443 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2444   assert(!_CFLS_LAB_modified, "Call only once");
2445   _CFLS_LAB_modified = true;
2446   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2447        i < CompactibleFreeListSpace::IndexSetSize;
2448        i += CompactibleFreeListSpace::IndexSetStride) {
2449     _blocks_to_claim[i].modify(n, wt, true /* force */);
2450   }
2451 }
2452 
2453 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2454   FreeChunk* res;
2455   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2456   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2457     // This locking manages sync with other large object allocations.
2458     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2459                     Mutex::_no_safepoint_check_flag);
2460     res = _cfls->getChunkFromDictionaryExact(word_sz);
2461     if (res == NULL) return NULL;
2462   } else {
2463     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2464     if (fl->count() == 0) {
2465       // Attempt to refill this local free list.
2466       get_from_global_pool(word_sz, fl);
2467       // If it didn't work, give up.
2468       if (fl->count() == 0) return NULL;
2469     }
2470     res = fl->get_chunk_at_head();
2471     assert(res != NULL, "Why was count non-zero?");
2472   }
2473   res->markNotFree();
2474   assert(!res->is_free(), "shouldn't be marked free");
2475   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2476   // mangle a just allocated object with a distinct pattern.
2477   debug_only(res->mangleAllocated(word_sz));
2478   return (HeapWord*)res;
2479 }
2480 
2481 // Get a chunk of blocks of the right size and update related
2482 // book-keeping stats
2483 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2484   // Get the #blocks we want to claim
2485   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2486   assert(n_blks > 0, "Error");
2487   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2488   // In some cases, when the application has a phase change,
2489   // there may be a sudden and sharp shift in the object survival
2490   // profile, and updating the counts at the end of a scavenge
2491   // may not be quick enough, giving rise to large scavenge pauses
2492   // during these phase changes. It is beneficial to detect such
2493   // changes on-the-fly during a scavenge and avoid such a phase-change
2494   // pothole. The following code is a heuristic attempt to do that.
2495   // It is protected by a product flag until we have gained
2496   // enough experience with this heuristic and fine-tuned its behavior.
2497   // WARNING: This might increase fragmentation if we overreact to
2498   // small spikes, so some kind of historical smoothing based on
2499   // previous experience with the greater reactivity might be useful.
2500   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2501   // default.
2502   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2503     //
2504     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2505     // which is why there is a cast to double.
2506     //
2507     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2508     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2509     n_blks = MIN2(n_blks, CMSOldPLABMax);
2510   }
2511   assert(n_blks > 0, "Error");
2512   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2513   // Update stats table entry for this block size
2514   _num_blocks[word_sz] += fl->count();
2515 }
2516 
2517 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2518   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2519        i < CompactibleFreeListSpace::IndexSetSize;
2520        i += CompactibleFreeListSpace::IndexSetStride) {
2521     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2522            "Counter inconsistency");
2523     if (_global_num_workers[i] > 0) {
2524       // Need to smooth wrt historical average
2525       if (ResizeOldPLAB) {
2526         _blocks_to_claim[i].sample(
2527           MAX2(CMSOldPLABMin,
2528           MIN2(CMSOldPLABMax,
2529                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2530       }
2531       // Reset counters for next round
2532       _global_num_workers[i] = 0;
2533       _global_num_blocks[i] = 0;
2534       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2535     }
2536   }
2537 }
2538 
2539 // If this is changed in the future to allow parallel
2540 // access, one would need to take the FL locks and,
2541 // depending on how it is used, stagger access from
2542 // parallel threads to reduce contention.
2543 void CompactibleFreeListSpaceLAB::retire(int tid) {
2544   // We run this single threaded with the world stopped;
2545   // so no need for locks and such.
2546   NOT_PRODUCT(Thread* t = Thread::current();)
2547   assert(Thread::current()->is_VM_thread(), "Error");
2548   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2549        i < CompactibleFreeListSpace::IndexSetSize;
2550        i += CompactibleFreeListSpace::IndexSetStride) {
2551     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2552            "Can't retire more than what we obtained");
2553     if (_num_blocks[i] > 0) {
2554       size_t num_retire =  _indexedFreeList[i].count();
2555       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2556       {
2557         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2558         //                Mutex::_no_safepoint_check_flag);
2559 
2560         // Update globals stats for num_blocks used
2561         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2562         _global_num_workers[i]++;
2563         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2564         if (num_retire > 0) {
2565           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2566           // Reset this list.
2567           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2568           _indexedFreeList[i].set_size(i);
2569         }
2570       }
2571       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2572                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2573       // Reset stats for next round
2574       _num_blocks[i]         = 0;
2575     }
2576   }
2577 }
2578 
2579 // Used by par_get_chunk_of_blocks() for the chunks from the
2580 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2581 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2582 // to the free list "fl".  "n" is the maximum number of chunks to
2583 // be added to "fl".
2584 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2585 
2586   // We'll try all multiples of word_sz in the indexed set, starting with
2587   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2588   // then try getting a big chunk and splitting it.
2589   {
2590     bool found;
2591     int  k;
2592     size_t cur_sz;
2593     for (k = 1, cur_sz = k * word_sz, found = false;
2594          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2595          (CMSSplitIndexedFreeListBlocks || k <= 1);
2596          k++, cur_sz = k * word_sz) {
2597       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2598       fl_for_cur_sz.set_size(cur_sz);
2599       {
2600         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2601                         Mutex::_no_safepoint_check_flag);
2602         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2603         if (gfl->count() != 0) {
2604           // nn is the number of chunks of size cur_sz that
2605           // we'd need to split k-ways each, in order to create
2606           // "n" chunks of size word_sz each.
2607           const size_t nn = MAX2(n/k, (size_t)1);
2608           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2609           found = true;
2610           if (k > 1) {
2611             // Update split death stats for the cur_sz-size blocks list:
2612             // we increment the split death count by the number of blocks
2613             // we just took from the cur_sz-size blocks list and which
2614             // we will be splitting below.
2615             ssize_t deaths = gfl->split_deaths() +
2616                              fl_for_cur_sz.count();
2617             gfl->set_split_deaths(deaths);
2618           }
2619         }
2620       }
2621       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2622       if (found) {
2623         if (k == 1) {
2624           fl->prepend(&fl_for_cur_sz);
2625         } else {
2626           // Divide each block on fl_for_cur_sz up k ways.
2627           FreeChunk* fc;
2628           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2629             // Must do this in reverse order, so that anybody attempting to
2630             // access the main chunk sees it as a single free block until we
2631             // change it.
2632             size_t fc_size = fc->size();
2633             assert(fc->is_free(), "Error");
2634             for (int i = k-1; i >= 0; i--) {
2635               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2636               assert((i != 0) ||
2637                         ((fc == ffc) && ffc->is_free() &&
2638                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2639                         "Counting error");
2640               ffc->set_size(word_sz);
2641               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2642               ffc->link_next(NULL);
2643               // Above must occur before BOT is updated below.
2644               OrderAccess::storestore();
2645               // splitting from the right, fc_size == i * word_sz
2646               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2647               fc_size -= word_sz;
2648               assert(fc_size == i*word_sz, "Error");
2649               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2650               _bt.verify_single_block((HeapWord*)fc, fc_size);
2651               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2652               // Push this on "fl".
2653               fl->return_chunk_at_head(ffc);
2654             }
2655             // TRAP
2656             assert(fl->tail()->next() == NULL, "List invariant.");
2657           }
2658         }
2659         // Update birth stats for this block size.
2660         size_t num = fl->count();
2661         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2662                         Mutex::_no_safepoint_check_flag);
2663         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2664         _indexedFreeList[word_sz].set_split_births(births);
2665         return true;
2666       }
2667     }
2668     return found;
2669   }
2670 }
2671 
2672 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2673 
2674   FreeChunk* fc = NULL;
2675   FreeChunk* rem_fc = NULL;
2676   size_t rem;
2677   {
2678     MutexLockerEx x(parDictionaryAllocLock(),
2679                     Mutex::_no_safepoint_check_flag);
2680     while (n > 0) {
2681       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2682       if (fc != NULL) {
2683         break;
2684       } else {
2685         n--;
2686       }
2687     }
2688     if (fc == NULL) return NULL;
2689     // Otherwise, split up that block.
2690     assert((ssize_t)n >= 1, "Control point invariant");
2691     assert(fc->is_free(), "Error: should be a free block");
2692     _bt.verify_single_block((HeapWord*)fc, fc->size());
2693     const size_t nn = fc->size() / word_sz;
2694     n = MIN2(nn, n);
2695     assert((ssize_t)n >= 1, "Control point invariant");
2696     rem = fc->size() - n * word_sz;
2697     // If there is a remainder, and it's too small, allocate one fewer.
2698     if (rem > 0 && rem < MinChunkSize) {
2699       n--; rem += word_sz;
2700     }
2701     // Note that at this point we may have n == 0.
2702     assert((ssize_t)n >= 0, "Control point invariant");
2703 
2704     // If n is 0, the chunk fc that was found is not large
2705     // enough to leave a viable remainder.  We are unable to
2706     // allocate even one block.  Return fc to the
2707     // dictionary and return, leaving "fl" empty.
2708     if (n == 0) {
2709       returnChunkToDictionary(fc);
2710       return NULL;
2711     }
2712 
2713     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2714     dictionary()->dict_census_update(fc->size(),
2715                                      true /*split*/,
2716                                      false /*birth*/);
2717 
2718     // First return the remainder, if any.
2719     // Note that we hold the lock until we decide if we're going to give
2720     // back the remainder to the dictionary, since a concurrent allocation
2721     // may otherwise see the heap as empty.  (We're willing to take that
2722     // hit if the block is a small block.)
2723     if (rem > 0) {
2724       size_t prefix_size = n * word_sz;
2725       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2726       rem_fc->set_size(rem);
2727       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2728       rem_fc->link_next(NULL);
2729       // Above must occur before BOT is updated below.
2730       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2731       OrderAccess::storestore();
2732       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2733       assert(fc->is_free(), "Error");
2734       fc->set_size(prefix_size);
2735       if (rem >= IndexSetSize) {
2736         returnChunkToDictionary(rem_fc);
2737         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2738         rem_fc = NULL;
2739       }
2740       // Otherwise, return it to the small list below.
2741     }
2742   }
2743   if (rem_fc != NULL) {
2744     MutexLockerEx x(_indexedFreeListParLocks[rem],
2745                     Mutex::_no_safepoint_check_flag);
2746     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2747     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2748     smallSplitBirth(rem);
2749   }
2750   assert(n * word_sz == fc->size(),
2751          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2752          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2753          fc->size(), n, word_sz);
2754   return fc;
2755 }
2756 
2757 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2758 
2759   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2760 
2761   if (fc == NULL) {
2762     return;
2763   }
2764 
2765   size_t n = fc->size() / word_sz;
2766 
2767   assert((ssize_t)n > 0, "Consistency");
2768   // Now do the splitting up.
2769   // Must do this in reverse order, so that anybody attempting to
2770   // access the main chunk sees it as a single free block until we
2771   // change it.
2772   size_t fc_size = n * word_sz;
2773   // All but first chunk in this loop
2774   for (ssize_t i = n-1; i > 0; i--) {
2775     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2776     ffc->set_size(word_sz);
2777     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2778     ffc->link_next(NULL);
2779     // Above must occur before BOT is updated below.
2780     OrderAccess::storestore();
2781     // splitting from the right, fc_size == (n - i + 1) * wordsize
2782     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2783     fc_size -= word_sz;
2784     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2785     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2786     _bt.verify_single_block((HeapWord*)fc, fc_size);
2787     // Push this on "fl".
2788     fl->return_chunk_at_head(ffc);
2789   }
2790   // First chunk
2791   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2792   // The blocks above should show their new sizes before the first block below
2793   fc->set_size(word_sz);
2794   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2795   fc->link_next(NULL);
2796   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2797   _bt.verify_single_block((HeapWord*)fc, fc->size());
2798   fl->return_chunk_at_head(fc);
2799 
2800   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2801   {
2802     // Update the stats for this block size.
2803     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2804                     Mutex::_no_safepoint_check_flag);
2805     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2806     _indexedFreeList[word_sz].set_split_births(births);
2807     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2808     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2809   }
2810 
2811   // TRAP
2812   assert(fl->tail()->next() == NULL, "List invariant.");
2813 }
2814 
2815 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2816   assert(fl->count() == 0, "Precondition.");
2817   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2818          "Precondition");
2819 
2820   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2821     // Got it
2822     return;
2823   }
2824 
2825   // Otherwise, we'll split a block from the dictionary.
2826   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2827 }
2828 
2829 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2830   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
2831   return ergo_max;
2832 }
2833 
2834 // Set up the space's par_seq_tasks structure for work claiming
2835 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2836 // XXX Need to suitably abstract and generalize this and the next
2837 // method into one.
2838 void
2839 CompactibleFreeListSpace::
2840 initialize_sequential_subtasks_for_rescan(int n_threads) {
2841   // The "size" of each task is fixed according to rescan_task_size.
2842   assert(n_threads > 0, "Unexpected n_threads argument");
2843   const size_t task_size = rescan_task_size();
2844   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2845   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2846   assert(n_tasks == 0 ||
2847          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2848           (used_region().start() + n_tasks*task_size >= used_region().end())),
2849          "n_tasks calculation incorrect");
2850   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2851   assert(!pst->valid(), "Clobbering existing data?");
2852   // Sets the condition for completion of the subtask (how many threads
2853   // need to finish in order to be done).
2854   pst->set_n_threads(n_threads);
2855   pst->set_n_tasks((int)n_tasks);
2856 }
2857 
2858 // Set up the space's par_seq_tasks structure for work claiming
2859 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2860 void
2861 CompactibleFreeListSpace::
2862 initialize_sequential_subtasks_for_marking(int n_threads,
2863                                            HeapWord* low) {
2864   // The "size" of each task is fixed according to rescan_task_size.
2865   assert(n_threads > 0, "Unexpected n_threads argument");
2866   const size_t task_size = marking_task_size();
2867   assert(task_size > CardTableModRefBS::card_size_in_words &&
2868          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2869          "Otherwise arithmetic below would be incorrect");
2870   MemRegion span = _old_gen->reserved();
2871   if (low != NULL) {
2872     if (span.contains(low)) {
2873       // Align low down to  a card boundary so that
2874       // we can use block_offset_careful() on span boundaries.
2875       HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
2876       // Clip span prefix at aligned_low
2877       span = span.intersection(MemRegion(aligned_low, span.end()));
2878     } else if (low > span.end()) {
2879       span = MemRegion(low, low);  // Null region
2880     } // else use entire span
2881   }
2882   assert(span.is_empty() ||
2883          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2884         "span should start at a card boundary");
2885   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2886   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2887   assert(n_tasks == 0 ||
2888          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2889           (span.start() + n_tasks*task_size >= span.end())),
2890          "n_tasks calculation incorrect");
2891   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2892   assert(!pst->valid(), "Clobbering existing data?");
2893   // Sets the condition for completion of the subtask (how many threads
2894   // need to finish in order to be done).
2895   pst->set_n_threads(n_threads);
2896   pst->set_n_tasks((int)n_tasks);
2897 }