1 /*
   2  * Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/logStream.inline.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/align.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 /////////////////////////////////////////////////////////////////////////
  50 //// CompactibleFreeListSpace
  51 /////////////////////////////////////////////////////////////////////////
  52 
  53 // highest ranked  free list lock rank
  54 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  55 
  56 // Defaults are 0 so things will break badly if incorrectly initialized.
  57 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  58 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  59 
  60 size_t MinChunkSize = 0;
  61 
  62 void CompactibleFreeListSpace::set_cms_values() {
  63   // Set CMS global values
  64   assert(MinChunkSize == 0, "already set");
  65 
  66   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  67   // for chunks to contain a FreeChunk.
  68   size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  69   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  70 
  71   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  72   IndexSetStart  = MinChunkSize;
  73   IndexSetStride = MinObjAlignment;
  74 }
  75 
  76 // Constructor
  77 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  85                 Monitor::_safepoint_check_sometimes),
  86   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  87                           "CompactibleFreeListSpace._dict_par_lock", true,
  88                           Monitor::_safepoint_check_never),
  89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSRescanMultiple),
  91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSConcMarkMultiple),
  93   _collector(NULL),
  94   _preconsumptionDirtyCardClosure(NULL)
  95 {
  96   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  97          "FreeChunk is larger than expected");
  98   _bt.set_space(this);
  99   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 100 
 101   _dictionary = new AFLBinaryTreeDictionary(mr);
 102 
 103   assert(_dictionary != NULL, "CMS dictionary initialization");
 104   // The indexed free lists are initially all empty and are lazily
 105   // filled in on demand. Initialize the array elements to NULL.
 106   initializeIndexedFreeListArray();
 107 
 108   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 109                              SmallForLinearAlloc);
 110 
 111   // CMSIndexedFreeListReplenish should be at least 1
 112   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 113   _promoInfo.setSpace(this);
 114   if (UseCMSBestFit) {
 115     _fitStrategy = FreeBlockBestFitFirst;
 116   } else {
 117     _fitStrategy = FreeBlockStrategyNone;
 118   }
 119   check_free_list_consistency();
 120 
 121   // Initialize locks for parallel case.
 122   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 123     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 124                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 125     DEBUG_ONLY(
 126       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 127     )
 128   }
 129   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 130 }
 131 
 132 // Like CompactibleSpace forward() but always calls cross_threshold() to
 133 // update the block offset table.  Removed initialize_threshold call because
 134 // CFLS does not use a block offset array for contiguous spaces.
 135 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 136                                     CompactPoint* cp, HeapWord* compact_top) {
 137   // q is alive
 138   // First check if we should switch compaction space
 139   assert(this == cp->space, "'this' should be current compaction space.");
 140   size_t compaction_max_size = pointer_delta(end(), compact_top);
 141   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 142     "virtual adjustObjectSize_v() method is not correct");
 143   size_t adjusted_size = adjustObjectSize(size);
 144   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 145          "no small fragments allowed");
 146   assert(minimum_free_block_size() == MinChunkSize,
 147          "for de-virtualized reference below");
 148   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 149   if (adjusted_size + MinChunkSize > compaction_max_size &&
 150       adjusted_size != compaction_max_size) {
 151     do {
 152       // switch to next compaction space
 153       cp->space->set_compaction_top(compact_top);
 154       cp->space = cp->space->next_compaction_space();
 155       if (cp->space == NULL) {
 156         cp->gen = GenCollectedHeap::heap()->young_gen();
 157         assert(cp->gen != NULL, "compaction must succeed");
 158         cp->space = cp->gen->first_compaction_space();
 159         assert(cp->space != NULL, "generation must have a first compaction space");
 160       }
 161       compact_top = cp->space->bottom();
 162       cp->space->set_compaction_top(compact_top);
 163       // The correct adjusted_size may not be the same as that for this method
 164       // (i.e., cp->space may no longer be "this" so adjust the size again.
 165       // Use the virtual method which is not used above to save the virtual
 166       // dispatch.
 167       adjusted_size = cp->space->adjust_object_size_v(size);
 168       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 169       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 170     } while (adjusted_size > compaction_max_size);
 171   }
 172 
 173   // store the forwarding pointer into the mark word
 174   if ((HeapWord*)q != compact_top) {
 175     q->forward_to(oop(compact_top));
 176     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 177   } else {
 178     // if the object isn't moving we can just set the mark to the default
 179     // mark and handle it specially later on.
 180     q->init_mark();
 181     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 182   }
 183 
 184   compact_top += adjusted_size;
 185 
 186   // we need to update the offset table so that the beginnings of objects can be
 187   // found during scavenge.  Note that we are updating the offset table based on
 188   // where the object will be once the compaction phase finishes.
 189 
 190   // Always call cross_threshold().  A contiguous space can only call it when
 191   // the compaction_top exceeds the current threshold but not for an
 192   // non-contiguous space.
 193   cp->threshold =
 194     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 195   return compact_top;
 196 }
 197 
 198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 199 // and use of single_block instead of alloc_block.  The name here is not really
 200 // appropriate - maybe a more general name could be invented for both the
 201 // contiguous and noncontiguous spaces.
 202 
 203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 204   _bt.single_block(start, the_end);
 205   return end();
 206 }
 207 
 208 // Initialize them to NULL.
 209 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 210   for (size_t i = 0; i < IndexSetSize; i++) {
 211     // Note that on platforms where objects are double word aligned,
 212     // the odd array elements are not used.  It is convenient, however,
 213     // to map directly from the object size to the array element.
 214     _indexedFreeList[i].reset(IndexSetSize);
 215     _indexedFreeList[i].set_size(i);
 216     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 217     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 219     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 220   }
 221 }
 222 
 223 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 224   return adjustObjectSize(oop(addr)->size());
 225 }
 226 
 227 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 228   for (size_t i = 1; i < IndexSetSize; i++) {
 229     assert(_indexedFreeList[i].size() == (size_t) i,
 230       "Indexed free list sizes are incorrect");
 231     _indexedFreeList[i].reset(IndexSetSize);
 232     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 233     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 235     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 236   }
 237 }
 238 
 239 void CompactibleFreeListSpace::reset(MemRegion mr) {
 240   resetIndexedFreeListArray();
 241   dictionary()->reset();
 242   if (BlockOffsetArrayUseUnallocatedBlock) {
 243     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 244     // Everything's allocated until proven otherwise.
 245     _bt.set_unallocated_block(end());
 246   }
 247   if (!mr.is_empty()) {
 248     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 249     _bt.single_block(mr.start(), mr.word_size());
 250     FreeChunk* fc = (FreeChunk*) mr.start();
 251     fc->set_size(mr.word_size());
 252     if (mr.word_size() >= IndexSetSize ) {
 253       returnChunkToDictionary(fc);
 254     } else {
 255       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 256       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 257     }
 258     coalBirth(mr.word_size());
 259   }
 260   _promoInfo.reset();
 261   _smallLinearAllocBlock._ptr = NULL;
 262   _smallLinearAllocBlock._word_size = 0;
 263 }
 264 
 265 void CompactibleFreeListSpace::reset_after_compaction() {
 266   // Reset the space to the new reality - one free chunk.
 267   MemRegion mr(compaction_top(), end());
 268   reset(mr);
 269   // Now refill the linear allocation block(s) if possible.
 270   refillLinearAllocBlocksIfNeeded();
 271 }
 272 
 273 // Walks the entire dictionary, returning a coterminal
 274 // chunk, if it exists. Use with caution since it involves
 275 // a potentially complete walk of a potentially large tree.
 276 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 277 
 278   assert_lock_strong(&_freelistLock);
 279 
 280   return dictionary()->find_chunk_ends_at(end());
 281 }
 282 
 283 
 284 #ifndef PRODUCT
 285 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 286   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 287     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 288   }
 289 }
 290 
 291 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 292   size_t sum = 0;
 293   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 294     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 295   }
 296   return sum;
 297 }
 298 
 299 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 300   size_t count = 0;
 301   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 302     debug_only(
 303       ssize_t total_list_count = 0;
 304       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 305          fc = fc->next()) {
 306         total_list_count++;
 307       }
 308       assert(total_list_count ==  _indexedFreeList[i].count(),
 309         "Count in list is incorrect");
 310     )
 311     count += _indexedFreeList[i].count();
 312   }
 313   return count;
 314 }
 315 
 316 size_t CompactibleFreeListSpace::totalCount() {
 317   size_t num = totalCountInIndexedFreeLists();
 318   num +=  dictionary()->total_count();
 319   if (_smallLinearAllocBlock._word_size != 0) {
 320     num++;
 321   }
 322   return num;
 323 }
 324 #endif
 325 
 326 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 327   FreeChunk* fc = (FreeChunk*) p;
 328   return fc->is_free();
 329 }
 330 
 331 size_t CompactibleFreeListSpace::used() const {
 332   return capacity() - free();
 333 }
 334 
 335 size_t CompactibleFreeListSpace::free() const {
 336   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 337   // if you do this while the structures are in flux you
 338   // may get an approximate answer only; for instance
 339   // because there is concurrent allocation either
 340   // directly by mutators or for promotion during a GC.
 341   // It's "MT-safe", however, in the sense that you are guaranteed
 342   // not to crash and burn, for instance, because of walking
 343   // pointers that could disappear as you were walking them.
 344   // The approximation is because the various components
 345   // that are read below are not read atomically (and
 346   // further the computation of totalSizeInIndexedFreeLists()
 347   // is itself a non-atomic computation. The normal use of
 348   // this is during a resize operation at the end of GC
 349   // and at that time you are guaranteed to get the
 350   // correct actual value. However, for instance, this is
 351   // also read completely asynchronously by the "perf-sampler"
 352   // that supports jvmstat, and you are apt to see the values
 353   // flicker in such cases.
 354   assert(_dictionary != NULL, "No _dictionary?");
 355   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 356           totalSizeInIndexedFreeLists() +
 357           _smallLinearAllocBlock._word_size) * HeapWordSize;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 361   assert(_dictionary != NULL, "No _dictionary?");
 362   assert_locked();
 363   size_t res = _dictionary->max_chunk_size();
 364   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 365                        (size_t) SmallForLinearAlloc - 1));
 366   // XXX the following could potentially be pretty slow;
 367   // should one, pessimistically for the rare cases when res
 368   // calculated above is less than IndexSetSize,
 369   // just return res calculated above? My reasoning was that
 370   // those cases will be so rare that the extra time spent doesn't
 371   // really matter....
 372   // Note: do not change the loop test i >= res + IndexSetStride
 373   // to i > res below, because i is unsigned and res may be zero.
 374   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 375        i -= IndexSetStride) {
 376     if (_indexedFreeList[i].head() != NULL) {
 377       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 378       return i;
 379     }
 380   }
 381   return res;
 382 }
 383 
 384 void LinearAllocBlock::print_on(outputStream* st) const {
 385   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 386             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 387             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 388 }
 389 
 390 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 391   st->print_cr("COMPACTIBLE FREELIST SPACE");
 392   st->print_cr(" Space:");
 393   Space::print_on(st);
 394 
 395   st->print_cr("promoInfo:");
 396   _promoInfo.print_on(st);
 397 
 398   st->print_cr("_smallLinearAllocBlock");
 399   _smallLinearAllocBlock.print_on(st);
 400 
 401   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 402 
 403   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 404 }
 405 
 406 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 407 const {
 408   reportIndexedFreeListStatistics(st);
 409   st->print_cr("Layout of Indexed Freelists");
 410   st->print_cr("---------------------------");
 411   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 412   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 413     _indexedFreeList[i].print_on(st);
 414     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 415       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 416                    p2i(fc), p2i((HeapWord*)fc + i),
 417                    fc->cantCoalesce() ? "\t CC" : "");
 418     }
 419   }
 420 }
 421 
 422 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 423 const {
 424   _promoInfo.print_on(st);
 425 }
 426 
 427 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 428 const {
 429   _dictionary->report_statistics(st);
 430   st->print_cr("Layout of Freelists in Tree");
 431   st->print_cr("---------------------------");
 432   _dictionary->print_free_lists(st);
 433 }
 434 
 435 class BlkPrintingClosure: public BlkClosure {
 436   const CMSCollector*             _collector;
 437   const CompactibleFreeListSpace* _sp;
 438   const CMSBitMap*                _live_bit_map;
 439   const bool                      _post_remark;
 440   outputStream*                   _st;
 441 public:
 442   BlkPrintingClosure(const CMSCollector* collector,
 443                      const CompactibleFreeListSpace* sp,
 444                      const CMSBitMap* live_bit_map,
 445                      outputStream* st):
 446     _collector(collector),
 447     _sp(sp),
 448     _live_bit_map(live_bit_map),
 449     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 450     _st(st) { }
 451   size_t do_blk(HeapWord* addr);
 452 };
 453 
 454 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 455   size_t sz = _sp->block_size_no_stall(addr, _collector);
 456   assert(sz != 0, "Should always be able to compute a size");
 457   if (_sp->block_is_obj(addr)) {
 458     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 459     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 460       p2i(addr),
 461       dead ? "dead" : "live",
 462       sz,
 463       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 464     if (CMSPrintObjectsInDump && !dead) {
 465       oop(addr)->print_on(_st);
 466       _st->print_cr("--------------------------------------");
 467     }
 468   } else { // free block
 469     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 470       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 471     if (CMSPrintChunksInDump) {
 472       ((FreeChunk*)addr)->print_on(_st);
 473       _st->print_cr("--------------------------------------");
 474     }
 475   }
 476   return sz;
 477 }
 478 
 479 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 480   st->print_cr("=========================");
 481   st->print_cr("Block layout in CMS Heap:");
 482   st->print_cr("=========================");
 483   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 484   blk_iterate(&bpcl);
 485 
 486   st->print_cr("=======================================");
 487   st->print_cr("Order & Layout of Promotion Info Blocks");
 488   st->print_cr("=======================================");
 489   print_promo_info_blocks(st);
 490 
 491   st->print_cr("===========================");
 492   st->print_cr("Order of Indexed Free Lists");
 493   st->print_cr("=========================");
 494   print_indexed_free_lists(st);
 495 
 496   st->print_cr("=================================");
 497   st->print_cr("Order of Free Lists in Dictionary");
 498   st->print_cr("=================================");
 499   print_dictionary_free_lists(st);
 500 }
 501 
 502 
 503 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 504   assert_lock_strong(&_freelistLock);
 505   Log(gc, freelist, stats) log;
 506   if (!log.is_debug()) {
 507     return;
 508   }
 509   log.debug("%s", title);
 510 
 511   LogStream out(log.debug());
 512   _dictionary->report_statistics(&out);
 513 
 514   if (log.is_trace()) {
 515     LogStream trace_out(log.trace());
 516     reportIndexedFreeListStatistics(&trace_out);
 517     size_t total_size = totalSizeInIndexedFreeLists() +
 518                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 519     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 520   }
 521 }
 522 
 523 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 524   assert_lock_strong(&_freelistLock);
 525   st->print_cr("Statistics for IndexedFreeLists:");
 526   st->print_cr("--------------------------------");
 527   size_t total_size = totalSizeInIndexedFreeLists();
 528   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 529   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 530   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 531   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 532   if (free_blocks != 0) {
 533     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 534   }
 535 }
 536 
 537 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 538   size_t res = 0;
 539   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 540     debug_only(
 541       ssize_t recount = 0;
 542       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 543          fc = fc->next()) {
 544         recount += 1;
 545       }
 546       assert(recount == _indexedFreeList[i].count(),
 547         "Incorrect count in list");
 548     )
 549     res += _indexedFreeList[i].count();
 550   }
 551   return res;
 552 }
 553 
 554 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 555   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 556     if (_indexedFreeList[i].head() != NULL) {
 557       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 558       return (size_t)i;
 559     }
 560   }
 561   return 0;
 562 }
 563 
 564 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 565   HeapWord* prevEnd = end();
 566   assert(prevEnd != value, "unnecessary set_end call");
 567   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 568         "New end is below unallocated block");
 569   _end = value;
 570   if (prevEnd != NULL) {
 571     // Resize the underlying block offset table.
 572     _bt.resize(pointer_delta(value, bottom()));
 573     if (value <= prevEnd) {
 574       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 575              "New end is below unallocated block");
 576     } else {
 577       // Now, take this new chunk and add it to the free blocks.
 578       // Note that the BOT has not yet been updated for this block.
 579       size_t newFcSize = pointer_delta(value, prevEnd);
 580       // Add the block to the free lists, if possible coalescing it
 581       // with the last free block, and update the BOT and census data.
 582       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 583     }
 584   }
 585 }
 586 
 587 class FreeListSpaceDCTOC : public FilteringDCTOC {
 588   CompactibleFreeListSpace* _cfls;
 589   CMSCollector* _collector;
 590   bool _parallel;
 591 protected:
 592   // Override.
 593 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 594   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 595                                        HeapWord* bottom, HeapWord* top, \
 596                                        ClosureType* cl);                \
 597       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 598                                        HeapWord* bottom, HeapWord* top, \
 599                                        ClosureType* cl);                \
 600     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 601                                        HeapWord* bottom, HeapWord* top, \
 602                                        ClosureType* cl)
 603   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 604   walk_mem_region_with_cl_DECL(FilteringClosure);
 605 
 606 public:
 607   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 608                      CMSCollector* collector,
 609                      ExtendedOopClosure* cl,
 610                      CardTableModRefBS::PrecisionStyle precision,
 611                      HeapWord* boundary,
 612                      bool parallel) :
 613     FilteringDCTOC(sp, cl, precision, boundary),
 614     _cfls(sp), _collector(collector), _parallel(parallel) {}
 615 };
 616 
 617 // We de-virtualize the block-related calls below, since we know that our
 618 // space is a CompactibleFreeListSpace.
 619 
 620 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 621 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 622                                                  HeapWord* bottom,              \
 623                                                  HeapWord* top,                 \
 624                                                  ClosureType* cl) {             \
 625    if (_parallel) {                                                             \
 626      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 627    } else {                                                                     \
 628      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 629    }                                                                            \
 630 }                                                                               \
 631 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 632                                                      HeapWord* bottom,          \
 633                                                      HeapWord* top,             \
 634                                                      ClosureType* cl) {         \
 635   /* Skip parts that are before "mr", in case "block_start" sent us             \
 636      back too far. */                                                           \
 637   HeapWord* mr_start = mr.start();                                              \
 638   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 639   HeapWord* next = bottom + bot_size;                                           \
 640   while (next < mr_start) {                                                     \
 641     bottom = next;                                                              \
 642     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 643     next = bottom + bot_size;                                                   \
 644   }                                                                             \
 645                                                                                 \
 646   while (bottom < top) {                                                        \
 647     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 648         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 649                     oop(bottom)) &&                                             \
 650         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 651       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 652       bottom += _cfls->adjustObjectSize(word_sz);                               \
 653     } else {                                                                    \
 654       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 655     }                                                                           \
 656   }                                                                             \
 657 }                                                                               \
 658 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 659                                                        HeapWord* bottom,        \
 660                                                        HeapWord* top,           \
 661                                                        ClosureType* cl) {       \
 662   /* Skip parts that are before "mr", in case "block_start" sent us             \
 663      back too far. */                                                           \
 664   HeapWord* mr_start = mr.start();                                              \
 665   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 666   HeapWord* next = bottom + bot_size;                                           \
 667   while (next < mr_start) {                                                     \
 668     bottom = next;                                                              \
 669     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 670     next = bottom + bot_size;                                                   \
 671   }                                                                             \
 672                                                                                 \
 673   while (bottom < top) {                                                        \
 674     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 675         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 676                     oop(bottom)) &&                                             \
 677         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 678       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 679       bottom += _cfls->adjustObjectSize(word_sz);                               \
 680     } else {                                                                    \
 681       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 682     }                                                                           \
 683   }                                                                             \
 684 }
 685 
 686 // (There are only two of these, rather than N, because the split is due
 687 // only to the introduction of the FilteringClosure, a local part of the
 688 // impl of this abstraction.)
 689 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 690 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 691 
 692 DirtyCardToOopClosure*
 693 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 694                                       CardTableModRefBS::PrecisionStyle precision,
 695                                       HeapWord* boundary,
 696                                       bool parallel) {
 697   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 698 }
 699 
 700 
 701 // Note on locking for the space iteration functions:
 702 // since the collector's iteration activities are concurrent with
 703 // allocation activities by mutators, absent a suitable mutual exclusion
 704 // mechanism the iterators may go awry. For instance a block being iterated
 705 // may suddenly be allocated or divided up and part of it allocated and
 706 // so on.
 707 
 708 // Apply the given closure to each block in the space.
 709 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 710   assert_lock_strong(freelistLock());
 711   HeapWord *cur, *limit;
 712   for (cur = bottom(), limit = end(); cur < limit;
 713        cur += cl->do_blk_careful(cur));
 714 }
 715 
 716 // Apply the given closure to each block in the space.
 717 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 718   assert_lock_strong(freelistLock());
 719   HeapWord *cur, *limit;
 720   for (cur = bottom(), limit = end(); cur < limit;
 721        cur += cl->do_blk(cur));
 722 }
 723 
 724 // Apply the given closure to each oop in the space.
 725 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 726   assert_lock_strong(freelistLock());
 727   HeapWord *cur, *limit;
 728   size_t curSize;
 729   for (cur = bottom(), limit = end(); cur < limit;
 730        cur += curSize) {
 731     curSize = block_size(cur);
 732     if (block_is_obj(cur)) {
 733       oop(cur)->oop_iterate(cl);
 734     }
 735   }
 736 }
 737 
 738 // NOTE: In the following methods, in order to safely be able to
 739 // apply the closure to an object, we need to be sure that the
 740 // object has been initialized. We are guaranteed that an object
 741 // is initialized if we are holding the Heap_lock with the
 742 // world stopped.
 743 void CompactibleFreeListSpace::verify_objects_initialized() const {
 744   if (is_init_completed()) {
 745     assert_locked_or_safepoint(Heap_lock);
 746     if (Universe::is_fully_initialized()) {
 747       guarantee(SafepointSynchronize::is_at_safepoint(),
 748                 "Required for objects to be initialized");
 749     }
 750   } // else make a concession at vm start-up
 751 }
 752 
 753 // Apply the given closure to each object in the space
 754 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 755   assert_lock_strong(freelistLock());
 756   NOT_PRODUCT(verify_objects_initialized());
 757   HeapWord *cur, *limit;
 758   size_t curSize;
 759   for (cur = bottom(), limit = end(); cur < limit;
 760        cur += curSize) {
 761     curSize = block_size(cur);
 762     if (block_is_obj(cur)) {
 763       blk->do_object(oop(cur));
 764     }
 765   }
 766 }
 767 
 768 // Apply the given closure to each live object in the space
 769 //   The usage of CompactibleFreeListSpace
 770 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 771 // objects in the space with references to objects that are no longer
 772 // valid.  For example, an object may reference another object
 773 // that has already been sweep up (collected).  This method uses
 774 // obj_is_alive() to determine whether it is safe to apply the closure to
 775 // an object.  See obj_is_alive() for details on how liveness of an
 776 // object is decided.
 777 
 778 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 779   assert_lock_strong(freelistLock());
 780   NOT_PRODUCT(verify_objects_initialized());
 781   HeapWord *cur, *limit;
 782   size_t curSize;
 783   for (cur = bottom(), limit = end(); cur < limit;
 784        cur += curSize) {
 785     curSize = block_size(cur);
 786     if (block_is_obj(cur) && obj_is_alive(cur)) {
 787       blk->do_object(oop(cur));
 788     }
 789   }
 790 }
 791 
 792 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 793                                                   UpwardsObjectClosure* cl) {
 794   assert_locked(freelistLock());
 795   NOT_PRODUCT(verify_objects_initialized());
 796   assert(!mr.is_empty(), "Should be non-empty");
 797   // We use MemRegion(bottom(), end()) rather than used_region() below
 798   // because the two are not necessarily equal for some kinds of
 799   // spaces, in particular, certain kinds of free list spaces.
 800   // We could use the more complicated but more precise:
 801   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
 802   // but the slight imprecision seems acceptable in the assertion check.
 803   assert(MemRegion(bottom(), end()).contains(mr),
 804          "Should be within used space");
 805   HeapWord* prev = cl->previous();   // max address from last time
 806   if (prev >= mr.end()) { // nothing to do
 807     return;
 808   }
 809   // This assert will not work when we go from cms space to perm
 810   // space, and use same closure. Easy fix deferred for later. XXX YSR
 811   // assert(prev == NULL || contains(prev), "Should be within space");
 812 
 813   bool last_was_obj_array = false;
 814   HeapWord *blk_start_addr, *region_start_addr;
 815   if (prev > mr.start()) {
 816     region_start_addr = prev;
 817     blk_start_addr    = prev;
 818     // The previous invocation may have pushed "prev" beyond the
 819     // last allocated block yet there may be still be blocks
 820     // in this region due to a particular coalescing policy.
 821     // Relax the assertion so that the case where the unallocated
 822     // block is maintained and "prev" is beyond the unallocated
 823     // block does not cause the assertion to fire.
 824     assert((BlockOffsetArrayUseUnallocatedBlock &&
 825             (!is_in(prev))) ||
 826            (blk_start_addr == block_start(region_start_addr)), "invariant");
 827   } else {
 828     region_start_addr = mr.start();
 829     blk_start_addr    = block_start(region_start_addr);
 830   }
 831   HeapWord* region_end_addr = mr.end();
 832   MemRegion derived_mr(region_start_addr, region_end_addr);
 833   while (blk_start_addr < region_end_addr) {
 834     const size_t size = block_size(blk_start_addr);
 835     if (block_is_obj(blk_start_addr)) {
 836       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 837     } else {
 838       last_was_obj_array = false;
 839     }
 840     blk_start_addr += size;
 841   }
 842   if (!last_was_obj_array) {
 843     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 844            "Should be within (closed) used space");
 845     assert(blk_start_addr > prev, "Invariant");
 846     cl->set_previous(blk_start_addr); // min address for next time
 847   }
 848 }
 849 
 850 // Callers of this iterator beware: The closure application should
 851 // be robust in the face of uninitialized objects and should (always)
 852 // return a correct size so that the next addr + size below gives us a
 853 // valid block boundary. [See for instance,
 854 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 855 // in ConcurrentMarkSweepGeneration.cpp.]
 856 HeapWord*
 857 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 858   ObjectClosureCareful* cl) {
 859   assert_lock_strong(freelistLock());
 860   // Can't use used_region() below because it may not necessarily
 861   // be the same as [bottom(),end()); although we could
 862   // use [used_region().start(),align_up(used_region().end(),CardSize)),
 863   // that appears too cumbersome, so we just do the simpler check
 864   // in the assertion below.
 865   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 866          "mr should be non-empty and within used space");
 867   HeapWord *addr, *end;
 868   size_t size;
 869   for (addr = block_start_careful(mr.start()), end  = mr.end();
 870        addr < end; addr += size) {
 871     FreeChunk* fc = (FreeChunk*)addr;
 872     if (fc->is_free()) {
 873       // Since we hold the free list lock, which protects direct
 874       // allocation in this generation by mutators, a free object
 875       // will remain free throughout this iteration code.
 876       size = fc->size();
 877     } else {
 878       // Note that the object need not necessarily be initialized,
 879       // because (for instance) the free list lock does NOT protect
 880       // object initialization. The closure application below must
 881       // therefore be correct in the face of uninitialized objects.
 882       size = cl->do_object_careful_m(oop(addr), mr);
 883       if (size == 0) {
 884         // An unparsable object found. Signal early termination.
 885         return addr;
 886       }
 887     }
 888   }
 889   return NULL;
 890 }
 891 
 892 
 893 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 894   NOT_PRODUCT(verify_objects_initialized());
 895   return _bt.block_start(p);
 896 }
 897 
 898 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 899   return _bt.block_start_careful(p);
 900 }
 901 
 902 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 903   NOT_PRODUCT(verify_objects_initialized());
 904   // This must be volatile, or else there is a danger that the compiler
 905   // will compile the code below into a sometimes-infinite loop, by keeping
 906   // the value read the first time in a register.
 907   while (true) {
 908     // We must do this until we get a consistent view of the object.
 909     if (FreeChunk::indicatesFreeChunk(p)) {
 910       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 911       size_t res = fc->size();
 912 
 913       // Bugfix for systems with weak memory model (PPC64/IA64). The
 914       // block's free bit was set and we have read the size of the
 915       // block. Acquire and check the free bit again. If the block is
 916       // still free, the read size is correct.
 917       OrderAccess::acquire();
 918 
 919       // If the object is still a free chunk, return the size, else it
 920       // has been allocated so try again.
 921       if (FreeChunk::indicatesFreeChunk(p)) {
 922         assert(res != 0, "Block size should not be 0");
 923         return res;
 924       }
 925     } else {
 926       // Ensure klass read before size.
 927       Klass* k = oop(p)->klass_or_null_acquire();
 928       if (k != NULL) {
 929         assert(k->is_klass(), "Should really be klass oop.");
 930         oop o = (oop)p;
 931         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 932 
 933         size_t res = o->size_given_klass(k);
 934         res = adjustObjectSize(res);
 935         assert(res != 0, "Block size should not be 0");
 936         return res;
 937       }
 938     }
 939   }
 940 }
 941 
 942 // TODO: Now that is_parsable is gone, we should combine these two functions.
 943 // A variant of the above that uses the Printezis bits for
 944 // unparsable but allocated objects. This avoids any possible
 945 // stalls waiting for mutators to initialize objects, and is
 946 // thus potentially faster than the variant above. However,
 947 // this variant may return a zero size for a block that is
 948 // under mutation and for which a consistent size cannot be
 949 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 950 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 951                                                      const CMSCollector* c)
 952 const {
 953   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 954   // This must be volatile, or else there is a danger that the compiler
 955   // will compile the code below into a sometimes-infinite loop, by keeping
 956   // the value read the first time in a register.
 957   DEBUG_ONLY(uint loops = 0;)
 958   while (true) {
 959     // We must do this until we get a consistent view of the object.
 960     if (FreeChunk::indicatesFreeChunk(p)) {
 961       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 962       size_t res = fc->size();
 963 
 964       // Bugfix for systems with weak memory model (PPC64/IA64). The
 965       // free bit of the block was set and we have read the size of
 966       // the block. Acquire and check the free bit again. If the
 967       // block is still free, the read size is correct.
 968       OrderAccess::acquire();
 969 
 970       if (FreeChunk::indicatesFreeChunk(p)) {
 971         assert(res != 0, "Block size should not be 0");
 972         assert(loops == 0, "Should be 0");
 973         return res;
 974       }
 975     } else {
 976       // Ensure klass read before size.
 977       Klass* k = oop(p)->klass_or_null_acquire();
 978       if (k != NULL) {
 979         assert(k->is_klass(), "Should really be klass oop.");
 980         oop o = (oop)p;
 981         assert(o->is_oop(), "Should be an oop");
 982 
 983         size_t res = o->size_given_klass(k);
 984         res = adjustObjectSize(res);
 985         assert(res != 0, "Block size should not be 0");
 986         return res;
 987       } else {
 988         // May return 0 if P-bits not present.
 989         return c->block_size_if_printezis_bits(p);
 990       }
 991     }
 992     assert(loops == 0, "Can loop at most once");
 993     DEBUG_ONLY(loops++;)
 994   }
 995 }
 996 
 997 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 998   NOT_PRODUCT(verify_objects_initialized());
 999   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1000   FreeChunk* fc = (FreeChunk*)p;
1001   if (fc->is_free()) {
1002     return fc->size();
1003   } else {
1004     // Ignore mark word because this may be a recently promoted
1005     // object whose mark word is used to chain together grey
1006     // objects (the last one would have a null value).
1007     assert(oop(p)->is_oop(true), "Should be an oop");
1008     return adjustObjectSize(oop(p)->size());
1009   }
1010 }
1011 
1012 // This implementation assumes that the property of "being an object" is
1013 // stable.  But being a free chunk may not be (because of parallel
1014 // promotion.)
1015 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1016   FreeChunk* fc = (FreeChunk*)p;
1017   assert(is_in_reserved(p), "Should be in space");
1018   if (FreeChunk::indicatesFreeChunk(p)) return false;
1019   Klass* k = oop(p)->klass_or_null_acquire();
1020   if (k != NULL) {
1021     // Ignore mark word because it may have been used to
1022     // chain together promoted objects (the last one
1023     // would have a null value).
1024     assert(oop(p)->is_oop(true), "Should be an oop");
1025     return true;
1026   } else {
1027     return false;  // Was not an object at the start of collection.
1028   }
1029 }
1030 
1031 // Check if the object is alive. This fact is checked either by consulting
1032 // the main marking bitmap in the sweeping phase or, if it's a permanent
1033 // generation and we're not in the sweeping phase, by checking the
1034 // perm_gen_verify_bit_map where we store the "deadness" information if
1035 // we did not sweep the perm gen in the most recent previous GC cycle.
1036 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1037   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1038          "Else races are possible");
1039   assert(block_is_obj(p), "The address should point to an object");
1040 
1041   // If we're sweeping, we use object liveness information from the main bit map
1042   // for both perm gen and old gen.
1043   // We don't need to lock the bitmap (live_map or dead_map below), because
1044   // EITHER we are in the middle of the sweeping phase, and the
1045   // main marking bit map (live_map below) is locked,
1046   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1047   // is stable, because it's mutated only in the sweeping phase.
1048   // NOTE: This method is also used by jmap where, if class unloading is
1049   // off, the results can return "false" for legitimate perm objects,
1050   // when we are not in the midst of a sweeping phase, which can result
1051   // in jmap not reporting certain perm gen objects. This will be moot
1052   // if/when the perm gen goes away in the future.
1053   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1054     CMSBitMap* live_map = _collector->markBitMap();
1055     return live_map->par_isMarked((HeapWord*) p);
1056   }
1057   return true;
1058 }
1059 
1060 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1061   FreeChunk* fc = (FreeChunk*)p;
1062   assert(is_in_reserved(p), "Should be in space");
1063   assert(_bt.block_start(p) == p, "Should be a block boundary");
1064   if (!fc->is_free()) {
1065     // Ignore mark word because it may have been used to
1066     // chain together promoted objects (the last one
1067     // would have a null value).
1068     assert(oop(p)->is_oop(true), "Should be an oop");
1069     return true;
1070   }
1071   return false;
1072 }
1073 
1074 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1075 // approximate answer if you don't hold the freelistlock when you call this.
1076 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1077   size_t size = 0;
1078   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1079     debug_only(
1080       // We may be calling here without the lock in which case we
1081       // won't do this modest sanity check.
1082       if (freelistLock()->owned_by_self()) {
1083         size_t total_list_size = 0;
1084         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1085           fc = fc->next()) {
1086           total_list_size += i;
1087         }
1088         assert(total_list_size == i * _indexedFreeList[i].count(),
1089                "Count in list is incorrect");
1090       }
1091     )
1092     size += i * _indexedFreeList[i].count();
1093   }
1094   return size;
1095 }
1096 
1097 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1098   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1099   return allocate(size);
1100 }
1101 
1102 HeapWord*
1103 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1104   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1105 }
1106 
1107 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1108   assert_lock_strong(freelistLock());
1109   HeapWord* res = NULL;
1110   assert(size == adjustObjectSize(size),
1111          "use adjustObjectSize() before calling into allocate()");
1112 
1113   res = allocate_adaptive_freelists(size);
1114 
1115   if (res != NULL) {
1116     // check that res does lie in this space!
1117     assert(is_in_reserved(res), "Not in this space!");
1118     assert(is_aligned((void*)res), "alignment check");
1119 
1120     FreeChunk* fc = (FreeChunk*)res;
1121     fc->markNotFree();
1122     assert(!fc->is_free(), "shouldn't be marked free");
1123     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1124     // Verify that the block offset table shows this to
1125     // be a single block, but not one which is unallocated.
1126     _bt.verify_single_block(res, size);
1127     _bt.verify_not_unallocated(res, size);
1128     // mangle a just allocated object with a distinct pattern.
1129     debug_only(fc->mangleAllocated(size));
1130   }
1131 
1132   return res;
1133 }
1134 
1135 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1136   assert_lock_strong(freelistLock());
1137   HeapWord* res = NULL;
1138   assert(size == adjustObjectSize(size),
1139          "use adjustObjectSize() before calling into allocate()");
1140 
1141   // Strategy
1142   //   if small
1143   //     exact size from small object indexed list if small
1144   //     small or large linear allocation block (linAB) as appropriate
1145   //     take from lists of greater sized chunks
1146   //   else
1147   //     dictionary
1148   //     small or large linear allocation block if it has the space
1149   // Try allocating exact size from indexTable first
1150   if (size < IndexSetSize) {
1151     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1152     if(res != NULL) {
1153       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1154         "Not removed from free list");
1155       // no block offset table adjustment is necessary on blocks in
1156       // the indexed lists.
1157 
1158     // Try allocating from the small LinAB
1159     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1160         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1161         // if successful, the above also adjusts block offset table
1162         // Note that this call will refill the LinAB to
1163         // satisfy the request.  This is different that
1164         // evm.
1165         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1166     } else {
1167       // Raid the exact free lists larger than size, even if they are not
1168       // overpopulated.
1169       res = (HeapWord*) getChunkFromGreater(size);
1170     }
1171   } else {
1172     // Big objects get allocated directly from the dictionary.
1173     res = (HeapWord*) getChunkFromDictionaryExact(size);
1174     if (res == NULL) {
1175       // Try hard not to fail since an allocation failure will likely
1176       // trigger a synchronous GC.  Try to get the space from the
1177       // allocation blocks.
1178       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1179     }
1180   }
1181 
1182   return res;
1183 }
1184 
1185 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1186 // when promoting obj.
1187 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1188   // Depending on the object size, expansion may require refilling either a
1189   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1190   // is added because the dictionary may over-allocate to avoid fragmentation.
1191   size_t space = obj_size;
1192   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1193   return space;
1194 }
1195 
1196 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1197   FreeChunk* ret;
1198 
1199   assert(numWords >= MinChunkSize, "Size is less than minimum");
1200   assert(linearAllocationWouldFail() || bestFitFirst(),
1201     "Should not be here");
1202 
1203   size_t i;
1204   size_t currSize = numWords + MinChunkSize;
1205   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1206   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1207     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1208     if (fl->head()) {
1209       ret = getFromListGreater(fl, numWords);
1210       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1211       return ret;
1212     }
1213   }
1214 
1215   currSize = MAX2((size_t)SmallForDictionary,
1216                   (size_t)(numWords + MinChunkSize));
1217 
1218   /* Try to get a chunk that satisfies request, while avoiding
1219      fragmentation that can't be handled. */
1220   {
1221     ret =  dictionary()->get_chunk(currSize);
1222     if (ret != NULL) {
1223       assert(ret->size() - numWords >= MinChunkSize,
1224              "Chunk is too small");
1225       _bt.allocated((HeapWord*)ret, ret->size());
1226       /* Carve returned chunk. */
1227       (void) splitChunkAndReturnRemainder(ret, numWords);
1228       /* Label this as no longer a free chunk. */
1229       assert(ret->is_free(), "This chunk should be free");
1230       ret->link_prev(NULL);
1231     }
1232     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1233     return ret;
1234   }
1235   ShouldNotReachHere();
1236 }
1237 
1238 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1239   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1240   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1241 }
1242 
1243 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1244   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1245          (_smallLinearAllocBlock._word_size == fc->size()),
1246          "Linear allocation block shows incorrect size");
1247   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1248           (_smallLinearAllocBlock._word_size == fc->size()));
1249 }
1250 
1251 // Check if the purported free chunk is present either as a linear
1252 // allocation block, the size-indexed table of (smaller) free blocks,
1253 // or the larger free blocks kept in the binary tree dictionary.
1254 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1255   if (verify_chunk_is_linear_alloc_block(fc)) {
1256     return true;
1257   } else if (fc->size() < IndexSetSize) {
1258     return verifyChunkInIndexedFreeLists(fc);
1259   } else {
1260     return dictionary()->verify_chunk_in_free_list(fc);
1261   }
1262 }
1263 
1264 #ifndef PRODUCT
1265 void CompactibleFreeListSpace::assert_locked() const {
1266   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1267 }
1268 
1269 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1270   CMSLockVerifier::assert_locked(lock);
1271 }
1272 #endif
1273 
1274 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1275   // In the parallel case, the main thread holds the free list lock
1276   // on behalf the parallel threads.
1277   FreeChunk* fc;
1278   {
1279     // If GC is parallel, this might be called by several threads.
1280     // This should be rare enough that the locking overhead won't affect
1281     // the sequential code.
1282     MutexLockerEx x(parDictionaryAllocLock(),
1283                     Mutex::_no_safepoint_check_flag);
1284     fc = getChunkFromDictionary(size);
1285   }
1286   if (fc != NULL) {
1287     fc->dontCoalesce();
1288     assert(fc->is_free(), "Should be free, but not coalescable");
1289     // Verify that the block offset table shows this to
1290     // be a single block, but not one which is unallocated.
1291     _bt.verify_single_block((HeapWord*)fc, fc->size());
1292     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1293   }
1294   return fc;
1295 }
1296 
1297 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1298   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1299   assert_locked();
1300 
1301   // if we are tracking promotions, then first ensure space for
1302   // promotion (including spooling space for saving header if necessary).
1303   // then allocate and copy, then track promoted info if needed.
1304   // When tracking (see PromotionInfo::track()), the mark word may
1305   // be displaced and in this case restoration of the mark word
1306   // occurs in the (oop_since_save_marks_)iterate phase.
1307   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1308     return NULL;
1309   }
1310   // Call the allocate(size_t, bool) form directly to avoid the
1311   // additional call through the allocate(size_t) form.  Having
1312   // the compile inline the call is problematic because allocate(size_t)
1313   // is a virtual method.
1314   HeapWord* res = allocate(adjustObjectSize(obj_size));
1315   if (res != NULL) {
1316     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1317     // if we should be tracking promotions, do so.
1318     if (_promoInfo.tracking()) {
1319         _promoInfo.track((PromotedObject*)res);
1320     }
1321   }
1322   return oop(res);
1323 }
1324 
1325 HeapWord*
1326 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1327   assert_locked();
1328   assert(size >= MinChunkSize, "minimum chunk size");
1329   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1330     "maximum from smallLinearAllocBlock");
1331   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1332 }
1333 
1334 HeapWord*
1335 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1336                                                        size_t size) {
1337   assert_locked();
1338   assert(size >= MinChunkSize, "too small");
1339   HeapWord* res = NULL;
1340   // Try to do linear allocation from blk, making sure that
1341   if (blk->_word_size == 0) {
1342     // We have probably been unable to fill this either in the prologue or
1343     // when it was exhausted at the last linear allocation. Bail out until
1344     // next time.
1345     assert(blk->_ptr == NULL, "consistency check");
1346     return NULL;
1347   }
1348   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1349   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1350   if (res != NULL) return res;
1351 
1352   // about to exhaust this linear allocation block
1353   if (blk->_word_size == size) { // exactly satisfied
1354     res = blk->_ptr;
1355     _bt.allocated(res, blk->_word_size);
1356   } else if (size + MinChunkSize <= blk->_refillSize) {
1357     size_t sz = blk->_word_size;
1358     // Update _unallocated_block if the size is such that chunk would be
1359     // returned to the indexed free list.  All other chunks in the indexed
1360     // free lists are allocated from the dictionary so that _unallocated_block
1361     // has already been adjusted for them.  Do it here so that the cost
1362     // for all chunks added back to the indexed free lists.
1363     if (sz < SmallForDictionary) {
1364       _bt.allocated(blk->_ptr, sz);
1365     }
1366     // Return the chunk that isn't big enough, and then refill below.
1367     addChunkToFreeLists(blk->_ptr, sz);
1368     split_birth(sz);
1369     // Don't keep statistics on adding back chunk from a LinAB.
1370   } else {
1371     // A refilled block would not satisfy the request.
1372     return NULL;
1373   }
1374 
1375   blk->_ptr = NULL; blk->_word_size = 0;
1376   refillLinearAllocBlock(blk);
1377   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1378          "block was replenished");
1379   if (res != NULL) {
1380     split_birth(size);
1381     repairLinearAllocBlock(blk);
1382   } else if (blk->_ptr != NULL) {
1383     res = blk->_ptr;
1384     size_t blk_size = blk->_word_size;
1385     blk->_word_size -= size;
1386     blk->_ptr  += size;
1387     split_birth(size);
1388     repairLinearAllocBlock(blk);
1389     // Update BOT last so that other (parallel) GC threads see a consistent
1390     // view of the BOT and free blocks.
1391     // Above must occur before BOT is updated below.
1392     OrderAccess::storestore();
1393     _bt.split_block(res, blk_size, size);  // adjust block offset table
1394   }
1395   return res;
1396 }
1397 
1398 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1399                                         LinearAllocBlock* blk,
1400                                         size_t size) {
1401   assert_locked();
1402   assert(size >= MinChunkSize, "too small");
1403 
1404   HeapWord* res = NULL;
1405   // This is the common case.  Keep it simple.
1406   if (blk->_word_size >= size + MinChunkSize) {
1407     assert(blk->_ptr != NULL, "consistency check");
1408     res = blk->_ptr;
1409     // Note that the BOT is up-to-date for the linAB before allocation.  It
1410     // indicates the start of the linAB.  The split_block() updates the
1411     // BOT for the linAB after the allocation (indicates the start of the
1412     // next chunk to be allocated).
1413     size_t blk_size = blk->_word_size;
1414     blk->_word_size -= size;
1415     blk->_ptr  += size;
1416     split_birth(size);
1417     repairLinearAllocBlock(blk);
1418     // Update BOT last so that other (parallel) GC threads see a consistent
1419     // view of the BOT and free blocks.
1420     // Above must occur before BOT is updated below.
1421     OrderAccess::storestore();
1422     _bt.split_block(res, blk_size, size);  // adjust block offset table
1423     _bt.allocated(res, size);
1424   }
1425   return res;
1426 }
1427 
1428 FreeChunk*
1429 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1430   assert_locked();
1431   assert(size < SmallForDictionary, "just checking");
1432   FreeChunk* res;
1433   res = _indexedFreeList[size].get_chunk_at_head();
1434   if (res == NULL) {
1435     res = getChunkFromIndexedFreeListHelper(size);
1436   }
1437   _bt.verify_not_unallocated((HeapWord*) res, size);
1438   assert(res == NULL || res->size() == size, "Incorrect block size");
1439   return res;
1440 }
1441 
1442 FreeChunk*
1443 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1444   bool replenish) {
1445   assert_locked();
1446   FreeChunk* fc = NULL;
1447   if (size < SmallForDictionary) {
1448     assert(_indexedFreeList[size].head() == NULL ||
1449       _indexedFreeList[size].surplus() <= 0,
1450       "List for this size should be empty or under populated");
1451     // Try best fit in exact lists before replenishing the list
1452     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1453       // Replenish list.
1454       //
1455       // Things tried that failed.
1456       //   Tried allocating out of the two LinAB's first before
1457       // replenishing lists.
1458       //   Tried small linAB of size 256 (size in indexed list)
1459       // and replenishing indexed lists from the small linAB.
1460       //
1461       FreeChunk* newFc = NULL;
1462       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1463       if (replenish_size < SmallForDictionary) {
1464         // Do not replenish from an underpopulated size.
1465         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1466             _indexedFreeList[replenish_size].head() != NULL) {
1467           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1468         } else if (bestFitFirst()) {
1469           newFc = bestFitSmall(replenish_size);
1470         }
1471       }
1472       if (newFc == NULL && replenish_size > size) {
1473         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1474         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1475       }
1476       // Note: The stats update re split-death of block obtained above
1477       // will be recorded below precisely when we know we are going to
1478       // be actually splitting it into more than one pieces below.
1479       if (newFc != NULL) {
1480         if  (replenish || CMSReplenishIntermediate) {
1481           // Replenish this list and return one block to caller.
1482           size_t i;
1483           FreeChunk *curFc, *nextFc;
1484           size_t num_blk = newFc->size() / size;
1485           assert(num_blk >= 1, "Smaller than requested?");
1486           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1487           if (num_blk > 1) {
1488             // we are sure we will be splitting the block just obtained
1489             // into multiple pieces; record the split-death of the original
1490             splitDeath(replenish_size);
1491           }
1492           // carve up and link blocks 0, ..., num_blk - 2
1493           // The last chunk is not added to the lists but is returned as the
1494           // free chunk.
1495           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1496                i = 0;
1497                i < (num_blk - 1);
1498                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1499                i++) {
1500             curFc->set_size(size);
1501             // Don't record this as a return in order to try and
1502             // determine the "returns" from a GC.
1503             _bt.verify_not_unallocated((HeapWord*) fc, size);
1504             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1505             _bt.mark_block((HeapWord*)curFc, size);
1506             split_birth(size);
1507             // Don't record the initial population of the indexed list
1508             // as a split birth.
1509           }
1510 
1511           // check that the arithmetic was OK above
1512           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1513             "inconsistency in carving newFc");
1514           curFc->set_size(size);
1515           _bt.mark_block((HeapWord*)curFc, size);
1516           split_birth(size);
1517           fc = curFc;
1518         } else {
1519           // Return entire block to caller
1520           fc = newFc;
1521         }
1522       }
1523     }
1524   } else {
1525     // Get a free chunk from the free chunk dictionary to be returned to
1526     // replenish the indexed free list.
1527     fc = getChunkFromDictionaryExact(size);
1528   }
1529   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1530   return fc;
1531 }
1532 
1533 FreeChunk*
1534 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1535   assert_locked();
1536   FreeChunk* fc = _dictionary->get_chunk(size);
1537   if (fc == NULL) {
1538     return NULL;
1539   }
1540   _bt.allocated((HeapWord*)fc, fc->size());
1541   if (fc->size() >= size + MinChunkSize) {
1542     fc = splitChunkAndReturnRemainder(fc, size);
1543   }
1544   assert(fc->size() >= size, "chunk too small");
1545   assert(fc->size() < size + MinChunkSize, "chunk too big");
1546   _bt.verify_single_block((HeapWord*)fc, fc->size());
1547   return fc;
1548 }
1549 
1550 FreeChunk*
1551 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1552   assert_locked();
1553   FreeChunk* fc = _dictionary->get_chunk(size);
1554   if (fc == NULL) {
1555     return fc;
1556   }
1557   _bt.allocated((HeapWord*)fc, fc->size());
1558   if (fc->size() == size) {
1559     _bt.verify_single_block((HeapWord*)fc, size);
1560     return fc;
1561   }
1562   assert(fc->size() > size, "get_chunk() guarantee");
1563   if (fc->size() < size + MinChunkSize) {
1564     // Return the chunk to the dictionary and go get a bigger one.
1565     returnChunkToDictionary(fc);
1566     fc = _dictionary->get_chunk(size + MinChunkSize);
1567     if (fc == NULL) {
1568       return NULL;
1569     }
1570     _bt.allocated((HeapWord*)fc, fc->size());
1571   }
1572   assert(fc->size() >= size + MinChunkSize, "tautology");
1573   fc = splitChunkAndReturnRemainder(fc, size);
1574   assert(fc->size() == size, "chunk is wrong size");
1575   _bt.verify_single_block((HeapWord*)fc, size);
1576   return fc;
1577 }
1578 
1579 void
1580 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1581   assert_locked();
1582 
1583   size_t size = chunk->size();
1584   _bt.verify_single_block((HeapWord*)chunk, size);
1585   // adjust _unallocated_block downward, as necessary
1586   _bt.freed((HeapWord*)chunk, size);
1587   _dictionary->return_chunk(chunk);
1588 #ifndef PRODUCT
1589   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1590     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1591     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1592     tl->verify_stats();
1593   }
1594 #endif // PRODUCT
1595 }
1596 
1597 void
1598 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1599   assert_locked();
1600   size_t size = fc->size();
1601   _bt.verify_single_block((HeapWord*) fc, size);
1602   _bt.verify_not_unallocated((HeapWord*) fc, size);
1603   _indexedFreeList[size].return_chunk_at_tail(fc);
1604 #ifndef PRODUCT
1605   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1606      _indexedFreeList[size].verify_stats();
1607   }
1608 #endif // PRODUCT
1609 }
1610 
1611 // Add chunk to end of last block -- if it's the largest
1612 // block -- and update BOT and census data. We would
1613 // of course have preferred to coalesce it with the
1614 // last block, but it's currently less expensive to find the
1615 // largest block than it is to find the last.
1616 void
1617 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1618   HeapWord* chunk, size_t     size) {
1619   // check that the chunk does lie in this space!
1620   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1621   // One of the parallel gc task threads may be here
1622   // whilst others are allocating.
1623   Mutex* lock = &_parDictionaryAllocLock;
1624   FreeChunk* ec;
1625   {
1626     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1627     ec = dictionary()->find_largest_dict();  // get largest block
1628     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1629       // It's a coterminal block - we can coalesce.
1630       size_t old_size = ec->size();
1631       coalDeath(old_size);
1632       removeChunkFromDictionary(ec);
1633       size += old_size;
1634     } else {
1635       ec = (FreeChunk*)chunk;
1636     }
1637   }
1638   ec->set_size(size);
1639   debug_only(ec->mangleFreed(size));
1640   if (size < SmallForDictionary) {
1641     lock = _indexedFreeListParLocks[size];
1642   }
1643   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1644   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1645   // record the birth under the lock since the recording involves
1646   // manipulation of the list on which the chunk lives and
1647   // if the chunk is allocated and is the last on the list,
1648   // the list can go away.
1649   coalBirth(size);
1650 }
1651 
1652 void
1653 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1654                                               size_t     size) {
1655   // check that the chunk does lie in this space!
1656   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1657   assert_locked();
1658   _bt.verify_single_block(chunk, size);
1659 
1660   FreeChunk* fc = (FreeChunk*) chunk;
1661   fc->set_size(size);
1662   debug_only(fc->mangleFreed(size));
1663   if (size < SmallForDictionary) {
1664     returnChunkToFreeList(fc);
1665   } else {
1666     returnChunkToDictionary(fc);
1667   }
1668 }
1669 
1670 void
1671 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1672   size_t size, bool coalesced) {
1673   assert_locked();
1674   assert(chunk != NULL, "null chunk");
1675   if (coalesced) {
1676     // repair BOT
1677     _bt.single_block(chunk, size);
1678   }
1679   addChunkToFreeLists(chunk, size);
1680 }
1681 
1682 // We _must_ find the purported chunk on our free lists;
1683 // we assert if we don't.
1684 void
1685 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1686   size_t size = fc->size();
1687   assert_locked();
1688   debug_only(verifyFreeLists());
1689   if (size < SmallForDictionary) {
1690     removeChunkFromIndexedFreeList(fc);
1691   } else {
1692     removeChunkFromDictionary(fc);
1693   }
1694   _bt.verify_single_block((HeapWord*)fc, size);
1695   debug_only(verifyFreeLists());
1696 }
1697 
1698 void
1699 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1700   size_t size = fc->size();
1701   assert_locked();
1702   assert(fc != NULL, "null chunk");
1703   _bt.verify_single_block((HeapWord*)fc, size);
1704   _dictionary->remove_chunk(fc);
1705   // adjust _unallocated_block upward, as necessary
1706   _bt.allocated((HeapWord*)fc, size);
1707 }
1708 
1709 void
1710 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1711   assert_locked();
1712   size_t size = fc->size();
1713   _bt.verify_single_block((HeapWord*)fc, size);
1714   NOT_PRODUCT(
1715     if (FLSVerifyIndexTable) {
1716       verifyIndexedFreeList(size);
1717     }
1718   )
1719   _indexedFreeList[size].remove_chunk(fc);
1720   NOT_PRODUCT(
1721     if (FLSVerifyIndexTable) {
1722       verifyIndexedFreeList(size);
1723     }
1724   )
1725 }
1726 
1727 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1728   /* A hint is the next larger size that has a surplus.
1729      Start search at a size large enough to guarantee that
1730      the excess is >= MIN_CHUNK. */
1731   size_t start = align_object_size(numWords + MinChunkSize);
1732   if (start < IndexSetSize) {
1733     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1734     size_t    hint = _indexedFreeList[start].hint();
1735     while (hint < IndexSetSize) {
1736       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1737       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1738       if (fl->surplus() > 0 && fl->head() != NULL) {
1739         // Found a list with surplus, reset original hint
1740         // and split out a free chunk which is returned.
1741         _indexedFreeList[start].set_hint(hint);
1742         FreeChunk* res = getFromListGreater(fl, numWords);
1743         assert(res == NULL || res->is_free(),
1744           "Should be returning a free chunk");
1745         return res;
1746       }
1747       hint = fl->hint(); /* keep looking */
1748     }
1749     /* None found. */
1750     it[start].set_hint(IndexSetSize);
1751   }
1752   return NULL;
1753 }
1754 
1755 /* Requires fl->size >= numWords + MinChunkSize */
1756 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1757   size_t numWords) {
1758   FreeChunk *curr = fl->head();
1759   size_t oldNumWords = curr->size();
1760   assert(numWords >= MinChunkSize, "Word size is too small");
1761   assert(curr != NULL, "List is empty");
1762   assert(oldNumWords >= numWords + MinChunkSize,
1763         "Size of chunks in the list is too small");
1764 
1765   fl->remove_chunk(curr);
1766   // recorded indirectly by splitChunkAndReturnRemainder -
1767   // smallSplit(oldNumWords, numWords);
1768   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1769   // Does anything have to be done for the remainder in terms of
1770   // fixing the card table?
1771   assert(new_chunk == NULL || new_chunk->is_free(),
1772     "Should be returning a free chunk");
1773   return new_chunk;
1774 }
1775 
1776 FreeChunk*
1777 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1778   size_t new_size) {
1779   assert_locked();
1780   size_t size = chunk->size();
1781   assert(size > new_size, "Split from a smaller block?");
1782   assert(is_aligned(chunk), "alignment problem");
1783   assert(size == adjustObjectSize(size), "alignment problem");
1784   size_t rem_sz = size - new_size;
1785   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1786   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1787   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1788   assert(is_aligned(ffc), "alignment problem");
1789   ffc->set_size(rem_sz);
1790   ffc->link_next(NULL);
1791   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1792   // Above must occur before BOT is updated below.
1793   // adjust block offset table
1794   OrderAccess::storestore();
1795   assert(chunk->is_free() && ffc->is_free(), "Error");
1796   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1797   if (rem_sz < SmallForDictionary) {
1798     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1799     bool is_par = Thread::current()->is_GC_task_thread();
1800     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1801     returnChunkToFreeList(ffc);
1802     split(size, rem_sz);
1803     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1804   } else {
1805     returnChunkToDictionary(ffc);
1806     split(size, rem_sz);
1807   }
1808   chunk->set_size(new_size);
1809   return chunk;
1810 }
1811 
1812 void
1813 CompactibleFreeListSpace::sweep_completed() {
1814   // Now that space is probably plentiful, refill linear
1815   // allocation blocks as needed.
1816   refillLinearAllocBlocksIfNeeded();
1817 }
1818 
1819 void
1820 CompactibleFreeListSpace::gc_prologue() {
1821   assert_locked();
1822   reportFreeListStatistics("Before GC:");
1823   refillLinearAllocBlocksIfNeeded();
1824 }
1825 
1826 void
1827 CompactibleFreeListSpace::gc_epilogue() {
1828   assert_locked();
1829   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1830   _promoInfo.stopTrackingPromotions();
1831   repairLinearAllocationBlocks();
1832   reportFreeListStatistics("After GC:");
1833 }
1834 
1835 // Iteration support, mostly delegated from a CMS generation
1836 
1837 void CompactibleFreeListSpace::save_marks() {
1838   assert(Thread::current()->is_VM_thread(),
1839          "Global variable should only be set when single-threaded");
1840   // Mark the "end" of the used space at the time of this call;
1841   // note, however, that promoted objects from this point
1842   // on are tracked in the _promoInfo below.
1843   set_saved_mark_word(unallocated_block());
1844 #ifdef ASSERT
1845   // Check the sanity of save_marks() etc.
1846   MemRegion ur    = used_region();
1847   MemRegion urasm = used_region_at_save_marks();
1848   assert(ur.contains(urasm),
1849          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1850          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1851          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1852 #endif
1853   // inform allocator that promotions should be tracked.
1854   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1855   _promoInfo.startTrackingPromotions();
1856 }
1857 
1858 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1859   assert(_promoInfo.tracking(), "No preceding save_marks?");
1860   return _promoInfo.noPromotions();
1861 }
1862 
1863 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1864                                                                             \
1865 void CompactibleFreeListSpace::                                             \
1866 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1867   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1868   /*                                                                        \
1869    * This also restores any displaced headers and removes the elements from \
1870    * the iteration set as they are processed, so that we have a clean slate \
1871    * at the end of the iteration. Note, thus, that if new objects are       \
1872    * promoted as a result of the iteration they are iterated over as well.  \
1873    */                                                                       \
1874   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1875 }
1876 
1877 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1878 
1879 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1880   return _smallLinearAllocBlock._word_size == 0;
1881 }
1882 
1883 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1884   // Fix up linear allocation blocks to look like free blocks
1885   repairLinearAllocBlock(&_smallLinearAllocBlock);
1886 }
1887 
1888 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1889   assert_locked();
1890   if (blk->_ptr != NULL) {
1891     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1892            "Minimum block size requirement");
1893     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1894     fc->set_size(blk->_word_size);
1895     fc->link_prev(NULL);   // mark as free
1896     fc->dontCoalesce();
1897     assert(fc->is_free(), "just marked it free");
1898     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1899   }
1900 }
1901 
1902 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1903   assert_locked();
1904   if (_smallLinearAllocBlock._ptr == NULL) {
1905     assert(_smallLinearAllocBlock._word_size == 0,
1906       "Size of linAB should be zero if the ptr is NULL");
1907     // Reset the linAB refill and allocation size limit.
1908     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1909   }
1910   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1911 }
1912 
1913 void
1914 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1915   assert_locked();
1916   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1917          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1918          "blk invariant");
1919   if (blk->_ptr == NULL) {
1920     refillLinearAllocBlock(blk);
1921   }
1922 }
1923 
1924 void
1925 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1926   assert_locked();
1927   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1928          "linear allocation block should be empty");
1929   FreeChunk* fc;
1930   if (blk->_refillSize < SmallForDictionary &&
1931       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1932     // A linAB's strategy might be to use small sizes to reduce
1933     // fragmentation but still get the benefits of allocation from a
1934     // linAB.
1935   } else {
1936     fc = getChunkFromDictionary(blk->_refillSize);
1937   }
1938   if (fc != NULL) {
1939     blk->_ptr  = (HeapWord*)fc;
1940     blk->_word_size = fc->size();
1941     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1942   }
1943 }
1944 
1945 // Support for compaction
1946 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1947   scan_and_forward(this, cp);
1948   // Prepare_for_compaction() uses the space between live objects
1949   // so that later phase can skip dead space quickly.  So verification
1950   // of the free lists doesn't work after.
1951 }
1952 
1953 void CompactibleFreeListSpace::adjust_pointers() {
1954   // In other versions of adjust_pointers(), a bail out
1955   // based on the amount of live data in the generation
1956   // (i.e., if 0, bail out) may be used.
1957   // Cannot test used() == 0 here because the free lists have already
1958   // been mangled by the compaction.
1959 
1960   scan_and_adjust_pointers(this);
1961   // See note about verification in prepare_for_compaction().
1962 }
1963 
1964 void CompactibleFreeListSpace::compact() {
1965   scan_and_compact(this);
1966 }
1967 
1968 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1969 // where fbs is free block sizes
1970 double CompactibleFreeListSpace::flsFrag() const {
1971   size_t itabFree = totalSizeInIndexedFreeLists();
1972   double frag = 0.0;
1973   size_t i;
1974 
1975   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1976     double sz  = i;
1977     frag      += _indexedFreeList[i].count() * (sz * sz);
1978   }
1979 
1980   double totFree = itabFree +
1981                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1982   if (totFree > 0) {
1983     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1984             (totFree * totFree));
1985     frag = (double)1.0  - frag;
1986   } else {
1987     assert(frag == 0.0, "Follows from totFree == 0");
1988   }
1989   return frag;
1990 }
1991 
1992 void CompactibleFreeListSpace::beginSweepFLCensus(
1993   float inter_sweep_current,
1994   float inter_sweep_estimate,
1995   float intra_sweep_estimate) {
1996   assert_locked();
1997   size_t i;
1998   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1999     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2000     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2001     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2002     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2003     fl->set_before_sweep(fl->count());
2004     fl->set_bfr_surp(fl->surplus());
2005   }
2006   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2007                                     inter_sweep_current,
2008                                     inter_sweep_estimate,
2009                                     intra_sweep_estimate);
2010 }
2011 
2012 void CompactibleFreeListSpace::setFLSurplus() {
2013   assert_locked();
2014   size_t i;
2015   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2016     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2017     fl->set_surplus(fl->count() -
2018                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2019   }
2020 }
2021 
2022 void CompactibleFreeListSpace::setFLHints() {
2023   assert_locked();
2024   size_t i;
2025   size_t h = IndexSetSize;
2026   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2027     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2028     fl->set_hint(h);
2029     if (fl->surplus() > 0) {
2030       h = i;
2031     }
2032   }
2033 }
2034 
2035 void CompactibleFreeListSpace::clearFLCensus() {
2036   assert_locked();
2037   size_t i;
2038   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2039     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2040     fl->set_prev_sweep(fl->count());
2041     fl->set_coal_births(0);
2042     fl->set_coal_deaths(0);
2043     fl->set_split_births(0);
2044     fl->set_split_deaths(0);
2045   }
2046 }
2047 
2048 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2049   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2050   setFLSurplus();
2051   setFLHints();
2052   printFLCensus(sweep_count);
2053   clearFLCensus();
2054   assert_locked();
2055   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2056 }
2057 
2058 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2059   if (size < SmallForDictionary) {
2060     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2061     return (fl->coal_desired() < 0) ||
2062            ((int)fl->count() > fl->coal_desired());
2063   } else {
2064     return dictionary()->coal_dict_over_populated(size);
2065   }
2066 }
2067 
2068 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2069   assert(size < SmallForDictionary, "Size too large for indexed list");
2070   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2071   fl->increment_coal_births();
2072   fl->increment_surplus();
2073 }
2074 
2075 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2076   assert(size < SmallForDictionary, "Size too large for indexed list");
2077   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2078   fl->increment_coal_deaths();
2079   fl->decrement_surplus();
2080 }
2081 
2082 void CompactibleFreeListSpace::coalBirth(size_t size) {
2083   if (size  < SmallForDictionary) {
2084     smallCoalBirth(size);
2085   } else {
2086     dictionary()->dict_census_update(size,
2087                                    false /* split */,
2088                                    true /* birth */);
2089   }
2090 }
2091 
2092 void CompactibleFreeListSpace::coalDeath(size_t size) {
2093   if(size  < SmallForDictionary) {
2094     smallCoalDeath(size);
2095   } else {
2096     dictionary()->dict_census_update(size,
2097                                    false /* split */,
2098                                    false /* birth */);
2099   }
2100 }
2101 
2102 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2103   assert(size < SmallForDictionary, "Size too large for indexed list");
2104   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2105   fl->increment_split_births();
2106   fl->increment_surplus();
2107 }
2108 
2109 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2110   assert(size < SmallForDictionary, "Size too large for indexed list");
2111   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2112   fl->increment_split_deaths();
2113   fl->decrement_surplus();
2114 }
2115 
2116 void CompactibleFreeListSpace::split_birth(size_t size) {
2117   if (size  < SmallForDictionary) {
2118     smallSplitBirth(size);
2119   } else {
2120     dictionary()->dict_census_update(size,
2121                                    true /* split */,
2122                                    true /* birth */);
2123   }
2124 }
2125 
2126 void CompactibleFreeListSpace::splitDeath(size_t size) {
2127   if (size  < SmallForDictionary) {
2128     smallSplitDeath(size);
2129   } else {
2130     dictionary()->dict_census_update(size,
2131                                    true /* split */,
2132                                    false /* birth */);
2133   }
2134 }
2135 
2136 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2137   size_t to2 = from - to1;
2138   splitDeath(from);
2139   split_birth(to1);
2140   split_birth(to2);
2141 }
2142 
2143 void CompactibleFreeListSpace::print() const {
2144   print_on(tty);
2145 }
2146 
2147 void CompactibleFreeListSpace::prepare_for_verify() {
2148   assert_locked();
2149   repairLinearAllocationBlocks();
2150   // Verify that the SpoolBlocks look like free blocks of
2151   // appropriate sizes... To be done ...
2152 }
2153 
2154 class VerifyAllBlksClosure: public BlkClosure {
2155  private:
2156   const CompactibleFreeListSpace* _sp;
2157   const MemRegion                 _span;
2158   HeapWord*                       _last_addr;
2159   size_t                          _last_size;
2160   bool                            _last_was_obj;
2161   bool                            _last_was_live;
2162 
2163  public:
2164   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2165     MemRegion span) :  _sp(sp), _span(span),
2166                        _last_addr(NULL), _last_size(0),
2167                        _last_was_obj(false), _last_was_live(false) { }
2168 
2169   virtual size_t do_blk(HeapWord* addr) {
2170     size_t res;
2171     bool   was_obj  = false;
2172     bool   was_live = false;
2173     if (_sp->block_is_obj(addr)) {
2174       was_obj = true;
2175       oop p = oop(addr);
2176       guarantee(p->is_oop(), "Should be an oop");
2177       res = _sp->adjustObjectSize(p->size());
2178       if (_sp->obj_is_alive(addr)) {
2179         was_live = true;
2180         p->verify();
2181       }
2182     } else {
2183       FreeChunk* fc = (FreeChunk*)addr;
2184       res = fc->size();
2185       if (FLSVerifyLists && !fc->cantCoalesce()) {
2186         guarantee(_sp->verify_chunk_in_free_list(fc),
2187                   "Chunk should be on a free list");
2188       }
2189     }
2190     if (res == 0) {
2191       Log(gc, verify) log;
2192       log.error("Livelock: no rank reduction!");
2193       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2194                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2195         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2196         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2197       ResourceMark rm;
2198       _sp->print_on(log.error_stream());
2199       guarantee(false, "Verification failed.");
2200     }
2201     _last_addr = addr;
2202     _last_size = res;
2203     _last_was_obj  = was_obj;
2204     _last_was_live = was_live;
2205     return res;
2206   }
2207 };
2208 
2209 class VerifyAllOopsClosure: public OopClosure {
2210  private:
2211   const CMSCollector*             _collector;
2212   const CompactibleFreeListSpace* _sp;
2213   const MemRegion                 _span;
2214   const bool                      _past_remark;
2215   const CMSBitMap*                _bit_map;
2216 
2217  protected:
2218   void do_oop(void* p, oop obj) {
2219     if (_span.contains(obj)) { // the interior oop points into CMS heap
2220       if (!_span.contains(p)) { // reference from outside CMS heap
2221         // Should be a valid object; the first disjunct below allows
2222         // us to sidestep an assertion in block_is_obj() that insists
2223         // that p be in _sp. Note that several generations (and spaces)
2224         // are spanned by _span (CMS heap) above.
2225         guarantee(!_sp->is_in_reserved(obj) ||
2226                   _sp->block_is_obj((HeapWord*)obj),
2227                   "Should be an object");
2228         guarantee(obj->is_oop(), "Should be an oop");
2229         obj->verify();
2230         if (_past_remark) {
2231           // Remark has been completed, the object should be marked
2232           _bit_map->isMarked((HeapWord*)obj);
2233         }
2234       } else { // reference within CMS heap
2235         if (_past_remark) {
2236           // Remark has been completed -- so the referent should have
2237           // been marked, if referring object is.
2238           if (_bit_map->isMarked(_collector->block_start(p))) {
2239             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2240           }
2241         }
2242       }
2243     } else if (_sp->is_in_reserved(p)) {
2244       // the reference is from FLS, and points out of FLS
2245       guarantee(obj->is_oop(), "Should be an oop");
2246       obj->verify();
2247     }
2248   }
2249 
2250   template <class T> void do_oop_work(T* p) {
2251     T heap_oop = oopDesc::load_heap_oop(p);
2252     if (!oopDesc::is_null(heap_oop)) {
2253       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2254       do_oop(p, obj);
2255     }
2256   }
2257 
2258  public:
2259   VerifyAllOopsClosure(const CMSCollector* collector,
2260     const CompactibleFreeListSpace* sp, MemRegion span,
2261     bool past_remark, CMSBitMap* bit_map) :
2262     _collector(collector), _sp(sp), _span(span),
2263     _past_remark(past_remark), _bit_map(bit_map) { }
2264 
2265   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2266   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2267 };
2268 
2269 void CompactibleFreeListSpace::verify() const {
2270   assert_lock_strong(&_freelistLock);
2271   verify_objects_initialized();
2272   MemRegion span = _collector->_span;
2273   bool past_remark = (_collector->abstract_state() ==
2274                       CMSCollector::Sweeping);
2275 
2276   ResourceMark rm;
2277   HandleMark  hm;
2278 
2279   // Check integrity of CFL data structures
2280   _promoInfo.verify();
2281   _dictionary->verify();
2282   if (FLSVerifyIndexTable) {
2283     verifyIndexedFreeLists();
2284   }
2285   // Check integrity of all objects and free blocks in space
2286   {
2287     VerifyAllBlksClosure cl(this, span);
2288     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2289   }
2290   // Check that all references in the heap to FLS
2291   // are to valid objects in FLS or that references in
2292   // FLS are to valid objects elsewhere in the heap
2293   if (FLSVerifyAllHeapReferences)
2294   {
2295     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2296       _collector->markBitMap());
2297 
2298     // Iterate over all oops in the heap. Uses the _no_header version
2299     // since we are not interested in following the klass pointers.
2300     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2301   }
2302 
2303   if (VerifyObjectStartArray) {
2304     // Verify the block offset table
2305     _bt.verify();
2306   }
2307 }
2308 
2309 #ifndef PRODUCT
2310 void CompactibleFreeListSpace::verifyFreeLists() const {
2311   if (FLSVerifyLists) {
2312     _dictionary->verify();
2313     verifyIndexedFreeLists();
2314   } else {
2315     if (FLSVerifyDictionary) {
2316       _dictionary->verify();
2317     }
2318     if (FLSVerifyIndexTable) {
2319       verifyIndexedFreeLists();
2320     }
2321   }
2322 }
2323 #endif
2324 
2325 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2326   size_t i = 0;
2327   for (; i < IndexSetStart; i++) {
2328     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2329   }
2330   for (; i < IndexSetSize; i++) {
2331     verifyIndexedFreeList(i);
2332   }
2333 }
2334 
2335 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2336   FreeChunk* fc   =  _indexedFreeList[size].head();
2337   FreeChunk* tail =  _indexedFreeList[size].tail();
2338   size_t    num = _indexedFreeList[size].count();
2339   size_t      n = 0;
2340   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2341             "Slot should have been empty");
2342   for (; fc != NULL; fc = fc->next(), n++) {
2343     guarantee(fc->size() == size, "Size inconsistency");
2344     guarantee(fc->is_free(), "!free?");
2345     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2346     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2347   }
2348   guarantee(n == num, "Incorrect count");
2349 }
2350 
2351 #ifndef PRODUCT
2352 void CompactibleFreeListSpace::check_free_list_consistency() const {
2353   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2354     "Some sizes can't be allocated without recourse to"
2355     " linear allocation buffers");
2356   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2357     "else MIN_TREE_CHUNK_SIZE is wrong");
2358   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2359   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2360 }
2361 #endif
2362 
2363 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2364   assert_lock_strong(&_freelistLock);
2365   LogTarget(Debug, gc, freelist, census) log;
2366   if (!log.is_enabled()) {
2367     return;
2368   }
2369   AdaptiveFreeList<FreeChunk> total;
2370   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2371   ResourceMark rm;
2372   outputStream* out = log.stream();
2373   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2374   size_t total_free = 0;
2375   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2376     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2377     total_free += fl->count() * fl->size();
2378     if (i % (40*IndexSetStride) == 0) {
2379       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2380     }
2381     fl->print_on(out);
2382     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2383     total.set_surplus(    total.surplus()     + fl->surplus()    );
2384     total.set_desired(    total.desired()     + fl->desired()    );
2385     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2386     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2387     total.set_count(      total.count()       + fl->count()      );
2388     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2389     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2390     total.set_split_births(total.split_births() + fl->split_births());
2391     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2392   }
2393   total.print_on(out, "TOTAL");
2394   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2395   log.print("growth: %8.5f  deficit: %8.5f",
2396             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2397                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2398             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2399   _dictionary->print_dict_census(out);
2400 }
2401 
2402 ///////////////////////////////////////////////////////////////////////////
2403 // CompactibleFreeListSpaceLAB
2404 ///////////////////////////////////////////////////////////////////////////
2405 
2406 #define VECTOR_257(x)                                                                                  \
2407   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2408   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2409      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2410      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2411      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2412      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2413      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x }
2417 
2418 // Initialize with default setting for CMS, _not_
2419 // generic OldPLABSize, whose static default is different; if overridden at the
2420 // command-line, this will get reinitialized via a call to
2421 // modify_initialization() below.
2422 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2423   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2424 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2425 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2426 
2427 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2428   _cfls(cfls)
2429 {
2430   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2431   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2432        i < CompactibleFreeListSpace::IndexSetSize;
2433        i += CompactibleFreeListSpace::IndexSetStride) {
2434     _indexedFreeList[i].set_size(i);
2435     _num_blocks[i] = 0;
2436   }
2437 }
2438 
2439 static bool _CFLS_LAB_modified = false;
2440 
2441 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2442   assert(!_CFLS_LAB_modified, "Call only once");
2443   _CFLS_LAB_modified = true;
2444   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2445        i < CompactibleFreeListSpace::IndexSetSize;
2446        i += CompactibleFreeListSpace::IndexSetStride) {
2447     _blocks_to_claim[i].modify(n, wt, true /* force */);
2448   }
2449 }
2450 
2451 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2452   FreeChunk* res;
2453   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2454   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2455     // This locking manages sync with other large object allocations.
2456     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2457                     Mutex::_no_safepoint_check_flag);
2458     res = _cfls->getChunkFromDictionaryExact(word_sz);
2459     if (res == NULL) return NULL;
2460   } else {
2461     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2462     if (fl->count() == 0) {
2463       // Attempt to refill this local free list.
2464       get_from_global_pool(word_sz, fl);
2465       // If it didn't work, give up.
2466       if (fl->count() == 0) return NULL;
2467     }
2468     res = fl->get_chunk_at_head();
2469     assert(res != NULL, "Why was count non-zero?");
2470   }
2471   res->markNotFree();
2472   assert(!res->is_free(), "shouldn't be marked free");
2473   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2474   // mangle a just allocated object with a distinct pattern.
2475   debug_only(res->mangleAllocated(word_sz));
2476   return (HeapWord*)res;
2477 }
2478 
2479 // Get a chunk of blocks of the right size and update related
2480 // book-keeping stats
2481 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2482   // Get the #blocks we want to claim
2483   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2484   assert(n_blks > 0, "Error");
2485   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2486   // In some cases, when the application has a phase change,
2487   // there may be a sudden and sharp shift in the object survival
2488   // profile, and updating the counts at the end of a scavenge
2489   // may not be quick enough, giving rise to large scavenge pauses
2490   // during these phase changes. It is beneficial to detect such
2491   // changes on-the-fly during a scavenge and avoid such a phase-change
2492   // pothole. The following code is a heuristic attempt to do that.
2493   // It is protected by a product flag until we have gained
2494   // enough experience with this heuristic and fine-tuned its behavior.
2495   // WARNING: This might increase fragmentation if we overreact to
2496   // small spikes, so some kind of historical smoothing based on
2497   // previous experience with the greater reactivity might be useful.
2498   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2499   // default.
2500   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2501     //
2502     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2503     // which is why there is a cast to double.
2504     //
2505     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2506     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2507     n_blks = MIN2(n_blks, CMSOldPLABMax);
2508   }
2509   assert(n_blks > 0, "Error");
2510   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2511   // Update stats table entry for this block size
2512   _num_blocks[word_sz] += fl->count();
2513 }
2514 
2515 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2516   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2517        i < CompactibleFreeListSpace::IndexSetSize;
2518        i += CompactibleFreeListSpace::IndexSetStride) {
2519     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2520            "Counter inconsistency");
2521     if (_global_num_workers[i] > 0) {
2522       // Need to smooth wrt historical average
2523       if (ResizeOldPLAB) {
2524         _blocks_to_claim[i].sample(
2525           MAX2(CMSOldPLABMin,
2526           MIN2(CMSOldPLABMax,
2527                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2528       }
2529       // Reset counters for next round
2530       _global_num_workers[i] = 0;
2531       _global_num_blocks[i] = 0;
2532       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2533     }
2534   }
2535 }
2536 
2537 // If this is changed in the future to allow parallel
2538 // access, one would need to take the FL locks and,
2539 // depending on how it is used, stagger access from
2540 // parallel threads to reduce contention.
2541 void CompactibleFreeListSpaceLAB::retire(int tid) {
2542   // We run this single threaded with the world stopped;
2543   // so no need for locks and such.
2544   NOT_PRODUCT(Thread* t = Thread::current();)
2545   assert(Thread::current()->is_VM_thread(), "Error");
2546   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2547        i < CompactibleFreeListSpace::IndexSetSize;
2548        i += CompactibleFreeListSpace::IndexSetStride) {
2549     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2550            "Can't retire more than what we obtained");
2551     if (_num_blocks[i] > 0) {
2552       size_t num_retire =  _indexedFreeList[i].count();
2553       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2554       {
2555         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2556         //                Mutex::_no_safepoint_check_flag);
2557 
2558         // Update globals stats for num_blocks used
2559         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2560         _global_num_workers[i]++;
2561         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2562         if (num_retire > 0) {
2563           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2564           // Reset this list.
2565           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2566           _indexedFreeList[i].set_size(i);
2567         }
2568       }
2569       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2570                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2571       // Reset stats for next round
2572       _num_blocks[i]         = 0;
2573     }
2574   }
2575 }
2576 
2577 // Used by par_get_chunk_of_blocks() for the chunks from the
2578 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2579 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2580 // to the free list "fl".  "n" is the maximum number of chunks to
2581 // be added to "fl".
2582 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2583 
2584   // We'll try all multiples of word_sz in the indexed set, starting with
2585   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2586   // then try getting a big chunk and splitting it.
2587   {
2588     bool found;
2589     int  k;
2590     size_t cur_sz;
2591     for (k = 1, cur_sz = k * word_sz, found = false;
2592          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2593          (CMSSplitIndexedFreeListBlocks || k <= 1);
2594          k++, cur_sz = k * word_sz) {
2595       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2596       fl_for_cur_sz.set_size(cur_sz);
2597       {
2598         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2599                         Mutex::_no_safepoint_check_flag);
2600         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2601         if (gfl->count() != 0) {
2602           // nn is the number of chunks of size cur_sz that
2603           // we'd need to split k-ways each, in order to create
2604           // "n" chunks of size word_sz each.
2605           const size_t nn = MAX2(n/k, (size_t)1);
2606           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2607           found = true;
2608           if (k > 1) {
2609             // Update split death stats for the cur_sz-size blocks list:
2610             // we increment the split death count by the number of blocks
2611             // we just took from the cur_sz-size blocks list and which
2612             // we will be splitting below.
2613             ssize_t deaths = gfl->split_deaths() +
2614                              fl_for_cur_sz.count();
2615             gfl->set_split_deaths(deaths);
2616           }
2617         }
2618       }
2619       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2620       if (found) {
2621         if (k == 1) {
2622           fl->prepend(&fl_for_cur_sz);
2623         } else {
2624           // Divide each block on fl_for_cur_sz up k ways.
2625           FreeChunk* fc;
2626           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2627             // Must do this in reverse order, so that anybody attempting to
2628             // access the main chunk sees it as a single free block until we
2629             // change it.
2630             size_t fc_size = fc->size();
2631             assert(fc->is_free(), "Error");
2632             for (int i = k-1; i >= 0; i--) {
2633               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2634               assert((i != 0) ||
2635                         ((fc == ffc) && ffc->is_free() &&
2636                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2637                         "Counting error");
2638               ffc->set_size(word_sz);
2639               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2640               ffc->link_next(NULL);
2641               // Above must occur before BOT is updated below.
2642               OrderAccess::storestore();
2643               // splitting from the right, fc_size == i * word_sz
2644               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2645               fc_size -= word_sz;
2646               assert(fc_size == i*word_sz, "Error");
2647               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2648               _bt.verify_single_block((HeapWord*)fc, fc_size);
2649               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2650               // Push this on "fl".
2651               fl->return_chunk_at_head(ffc);
2652             }
2653             // TRAP
2654             assert(fl->tail()->next() == NULL, "List invariant.");
2655           }
2656         }
2657         // Update birth stats for this block size.
2658         size_t num = fl->count();
2659         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2660                         Mutex::_no_safepoint_check_flag);
2661         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2662         _indexedFreeList[word_sz].set_split_births(births);
2663         return true;
2664       }
2665     }
2666     return found;
2667   }
2668 }
2669 
2670 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2671 
2672   FreeChunk* fc = NULL;
2673   FreeChunk* rem_fc = NULL;
2674   size_t rem;
2675   {
2676     MutexLockerEx x(parDictionaryAllocLock(),
2677                     Mutex::_no_safepoint_check_flag);
2678     while (n > 0) {
2679       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2680       if (fc != NULL) {
2681         break;
2682       } else {
2683         n--;
2684       }
2685     }
2686     if (fc == NULL) return NULL;
2687     // Otherwise, split up that block.
2688     assert((ssize_t)n >= 1, "Control point invariant");
2689     assert(fc->is_free(), "Error: should be a free block");
2690     _bt.verify_single_block((HeapWord*)fc, fc->size());
2691     const size_t nn = fc->size() / word_sz;
2692     n = MIN2(nn, n);
2693     assert((ssize_t)n >= 1, "Control point invariant");
2694     rem = fc->size() - n * word_sz;
2695     // If there is a remainder, and it's too small, allocate one fewer.
2696     if (rem > 0 && rem < MinChunkSize) {
2697       n--; rem += word_sz;
2698     }
2699     // Note that at this point we may have n == 0.
2700     assert((ssize_t)n >= 0, "Control point invariant");
2701 
2702     // If n is 0, the chunk fc that was found is not large
2703     // enough to leave a viable remainder.  We are unable to
2704     // allocate even one block.  Return fc to the
2705     // dictionary and return, leaving "fl" empty.
2706     if (n == 0) {
2707       returnChunkToDictionary(fc);
2708       return NULL;
2709     }
2710 
2711     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2712     dictionary()->dict_census_update(fc->size(),
2713                                      true /*split*/,
2714                                      false /*birth*/);
2715 
2716     // First return the remainder, if any.
2717     // Note that we hold the lock until we decide if we're going to give
2718     // back the remainder to the dictionary, since a concurrent allocation
2719     // may otherwise see the heap as empty.  (We're willing to take that
2720     // hit if the block is a small block.)
2721     if (rem > 0) {
2722       size_t prefix_size = n * word_sz;
2723       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2724       rem_fc->set_size(rem);
2725       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2726       rem_fc->link_next(NULL);
2727       // Above must occur before BOT is updated below.
2728       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2729       OrderAccess::storestore();
2730       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2731       assert(fc->is_free(), "Error");
2732       fc->set_size(prefix_size);
2733       if (rem >= IndexSetSize) {
2734         returnChunkToDictionary(rem_fc);
2735         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2736         rem_fc = NULL;
2737       }
2738       // Otherwise, return it to the small list below.
2739     }
2740   }
2741   if (rem_fc != NULL) {
2742     MutexLockerEx x(_indexedFreeListParLocks[rem],
2743                     Mutex::_no_safepoint_check_flag);
2744     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2745     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2746     smallSplitBirth(rem);
2747   }
2748   assert(n * word_sz == fc->size(),
2749          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2750          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2751          fc->size(), n, word_sz);
2752   return fc;
2753 }
2754 
2755 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2756 
2757   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2758 
2759   if (fc == NULL) {
2760     return;
2761   }
2762 
2763   size_t n = fc->size() / word_sz;
2764 
2765   assert((ssize_t)n > 0, "Consistency");
2766   // Now do the splitting up.
2767   // Must do this in reverse order, so that anybody attempting to
2768   // access the main chunk sees it as a single free block until we
2769   // change it.
2770   size_t fc_size = n * word_sz;
2771   // All but first chunk in this loop
2772   for (ssize_t i = n-1; i > 0; i--) {
2773     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2774     ffc->set_size(word_sz);
2775     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2776     ffc->link_next(NULL);
2777     // Above must occur before BOT is updated below.
2778     OrderAccess::storestore();
2779     // splitting from the right, fc_size == (n - i + 1) * wordsize
2780     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2781     fc_size -= word_sz;
2782     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2783     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2784     _bt.verify_single_block((HeapWord*)fc, fc_size);
2785     // Push this on "fl".
2786     fl->return_chunk_at_head(ffc);
2787   }
2788   // First chunk
2789   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2790   // The blocks above should show their new sizes before the first block below
2791   fc->set_size(word_sz);
2792   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2793   fc->link_next(NULL);
2794   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2795   _bt.verify_single_block((HeapWord*)fc, fc->size());
2796   fl->return_chunk_at_head(fc);
2797 
2798   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2799   {
2800     // Update the stats for this block size.
2801     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2802                     Mutex::_no_safepoint_check_flag);
2803     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2804     _indexedFreeList[word_sz].set_split_births(births);
2805     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2806     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2807   }
2808 
2809   // TRAP
2810   assert(fl->tail()->next() == NULL, "List invariant.");
2811 }
2812 
2813 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2814   assert(fl->count() == 0, "Precondition.");
2815   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2816          "Precondition");
2817 
2818   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2819     // Got it
2820     return;
2821   }
2822 
2823   // Otherwise, we'll split a block from the dictionary.
2824   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2825 }
2826 
2827 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2828   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
2829   return ergo_max;
2830 }
2831 
2832 // Set up the space's par_seq_tasks structure for work claiming
2833 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2834 // XXX Need to suitably abstract and generalize this and the next
2835 // method into one.
2836 void
2837 CompactibleFreeListSpace::
2838 initialize_sequential_subtasks_for_rescan(int n_threads) {
2839   // The "size" of each task is fixed according to rescan_task_size.
2840   assert(n_threads > 0, "Unexpected n_threads argument");
2841   const size_t task_size = rescan_task_size();
2842   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2843   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2844   assert(n_tasks == 0 ||
2845          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2846           (used_region().start() + n_tasks*task_size >= used_region().end())),
2847          "n_tasks calculation incorrect");
2848   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2849   assert(!pst->valid(), "Clobbering existing data?");
2850   // Sets the condition for completion of the subtask (how many threads
2851   // need to finish in order to be done).
2852   pst->set_n_threads(n_threads);
2853   pst->set_n_tasks((int)n_tasks);
2854 }
2855 
2856 // Set up the space's par_seq_tasks structure for work claiming
2857 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2858 void
2859 CompactibleFreeListSpace::
2860 initialize_sequential_subtasks_for_marking(int n_threads,
2861                                            HeapWord* low) {
2862   // The "size" of each task is fixed according to rescan_task_size.
2863   assert(n_threads > 0, "Unexpected n_threads argument");
2864   const size_t task_size = marking_task_size();
2865   assert(task_size > CardTableModRefBS::card_size_in_words &&
2866          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2867          "Otherwise arithmetic below would be incorrect");
2868   MemRegion span = _old_gen->reserved();
2869   if (low != NULL) {
2870     if (span.contains(low)) {
2871       // Align low down to  a card boundary so that
2872       // we can use block_offset_careful() on span boundaries.
2873       HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
2874       // Clip span prefix at aligned_low
2875       span = span.intersection(MemRegion(aligned_low, span.end()));
2876     } else if (low > span.end()) {
2877       span = MemRegion(low, low);  // Null region
2878     } // else use entire span
2879   }
2880   assert(span.is_empty() ||
2881          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2882         "span should start at a card boundary");
2883   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2884   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2885   assert(n_tasks == 0 ||
2886          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2887           (span.start() + n_tasks*task_size >= span.end())),
2888          "n_tasks calculation incorrect");
2889   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2890   assert(!pst->valid(), "Clobbering existing data?");
2891   // Sets the condition for completion of the subtask (how many threads
2892   // need to finish in order to be done).
2893   pst->set_n_threads(n_threads);
2894   pst->set_n_tasks((int)n_tasks);
2895 }