1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/logStream.inline.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/align.hpp"
  47 #include "utilities/copy.hpp"
  48 
  49 /////////////////////////////////////////////////////////////////////////
  50 //// CompactibleFreeListSpace
  51 /////////////////////////////////////////////////////////////////////////
  52 
  53 // highest ranked  free list lock rank
  54 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  55 
  56 // Defaults are 0 so things will break badly if incorrectly initialized.
  57 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  58 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  59 
  60 size_t MinChunkSize = 0;
  61 
  62 void CompactibleFreeListSpace::set_cms_values() {
  63   // Set CMS global values
  64   assert(MinChunkSize == 0, "already set");
  65 
  66   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  67   // for chunks to contain a FreeChunk.
  68   size_t min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  69   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  70 
  71   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  72   IndexSetStart  = MinChunkSize;
  73   IndexSetStride = MinObjAlignment;
  74 }
  75 
  76 // Constructor
  77 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  85                 Monitor::_safepoint_check_sometimes),
  86   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  87                           "CompactibleFreeListSpace._dict_par_lock", true,
  88                           Monitor::_safepoint_check_never),
  89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSRescanMultiple),
  91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSConcMarkMultiple),
  93   _collector(NULL),
  94   _preconsumptionDirtyCardClosure(NULL)
  95 {
  96   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  97          "FreeChunk is larger than expected");
  98   _bt.set_space(this);
  99   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 100 
 101   _dictionary = new AFLBinaryTreeDictionary(mr);
 102 
 103   assert(_dictionary != NULL, "CMS dictionary initialization");
 104   // The indexed free lists are initially all empty and are lazily
 105   // filled in on demand. Initialize the array elements to NULL.
 106   initializeIndexedFreeListArray();
 107 
 108   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 109                              SmallForLinearAlloc);
 110 
 111   // CMSIndexedFreeListReplenish should be at least 1
 112   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 113   _promoInfo.setSpace(this);
 114   if (UseCMSBestFit) {
 115     _fitStrategy = FreeBlockBestFitFirst;
 116   } else {
 117     _fitStrategy = FreeBlockStrategyNone;
 118   }
 119   check_free_list_consistency();
 120 
 121   // Initialize locks for parallel case.
 122   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 123     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 124                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 125     DEBUG_ONLY(
 126       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 127     )
 128   }
 129   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 130 }
 131 
 132 // Like CompactibleSpace forward() but always calls cross_threshold() to
 133 // update the block offset table.  Removed initialize_threshold call because
 134 // CFLS does not use a block offset array for contiguous spaces.
 135 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 136                                     CompactPoint* cp, HeapWord* compact_top) {
 137   // q is alive
 138   // First check if we should switch compaction space
 139   assert(this == cp->space, "'this' should be current compaction space.");
 140   size_t compaction_max_size = pointer_delta(end(), compact_top);
 141   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 142     "virtual adjustObjectSize_v() method is not correct");
 143   size_t adjusted_size = adjustObjectSize(size);
 144   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 145          "no small fragments allowed");
 146   assert(minimum_free_block_size() == MinChunkSize,
 147          "for de-virtualized reference below");
 148   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 149   if (adjusted_size + MinChunkSize > compaction_max_size &&
 150       adjusted_size != compaction_max_size) {
 151     do {
 152       // switch to next compaction space
 153       cp->space->set_compaction_top(compact_top);
 154       cp->space = cp->space->next_compaction_space();
 155       if (cp->space == NULL) {
 156         cp->gen = GenCollectedHeap::heap()->young_gen();
 157         assert(cp->gen != NULL, "compaction must succeed");
 158         cp->space = cp->gen->first_compaction_space();
 159         assert(cp->space != NULL, "generation must have a first compaction space");
 160       }
 161       compact_top = cp->space->bottom();
 162       cp->space->set_compaction_top(compact_top);
 163       // The correct adjusted_size may not be the same as that for this method
 164       // (i.e., cp->space may no longer be "this" so adjust the size again.
 165       // Use the virtual method which is not used above to save the virtual
 166       // dispatch.
 167       adjusted_size = cp->space->adjust_object_size_v(size);
 168       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 169       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 170     } while (adjusted_size > compaction_max_size);
 171   }
 172 
 173   // store the forwarding pointer into the mark word
 174   if ((HeapWord*)q != compact_top) {
 175     q->forward_to(oop(compact_top));
 176     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 177   } else {
 178     // if the object isn't moving we can just set the mark to the default
 179     // mark and handle it specially later on.
 180     q->init_mark();
 181     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 182   }
 183 
 184   compact_top += adjusted_size;
 185 
 186   // we need to update the offset table so that the beginnings of objects can be
 187   // found during scavenge.  Note that we are updating the offset table based on
 188   // where the object will be once the compaction phase finishes.
 189 
 190   // Always call cross_threshold().  A contiguous space can only call it when
 191   // the compaction_top exceeds the current threshold but not for an
 192   // non-contiguous space.
 193   cp->threshold =
 194     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 195   return compact_top;
 196 }
 197 
 198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 199 // and use of single_block instead of alloc_block.  The name here is not really
 200 // appropriate - maybe a more general name could be invented for both the
 201 // contiguous and noncontiguous spaces.
 202 
 203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 204   _bt.single_block(start, the_end);
 205   return end();
 206 }
 207 
 208 // Initialize them to NULL.
 209 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 210   for (size_t i = 0; i < IndexSetSize; i++) {
 211     // Note that on platforms where objects are double word aligned,
 212     // the odd array elements are not used.  It is convenient, however,
 213     // to map directly from the object size to the array element.
 214     _indexedFreeList[i].reset(IndexSetSize);
 215     _indexedFreeList[i].set_size(i);
 216     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 217     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 219     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 220   }
 221 }
 222 
 223 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 224   return adjustObjectSize(oop(addr)->size());
 225 }
 226 
 227 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 228   for (size_t i = 1; i < IndexSetSize; i++) {
 229     assert(_indexedFreeList[i].size() == (size_t) i,
 230       "Indexed free list sizes are incorrect");
 231     _indexedFreeList[i].reset(IndexSetSize);
 232     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 233     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 235     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 236   }
 237 }
 238 
 239 void CompactibleFreeListSpace::reset(MemRegion mr) {
 240   resetIndexedFreeListArray();
 241   dictionary()->reset();
 242   if (BlockOffsetArrayUseUnallocatedBlock) {
 243     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 244     // Everything's allocated until proven otherwise.
 245     _bt.set_unallocated_block(end());
 246   }
 247   if (!mr.is_empty()) {
 248     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 249     _bt.single_block(mr.start(), mr.word_size());
 250     FreeChunk* fc = (FreeChunk*) mr.start();
 251     fc->set_size(mr.word_size());
 252     if (mr.word_size() >= IndexSetSize ) {
 253       returnChunkToDictionary(fc);
 254     } else {
 255       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 256       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 257     }
 258     coalBirth(mr.word_size());
 259   }
 260   _promoInfo.reset();
 261   _smallLinearAllocBlock._ptr = NULL;
 262   _smallLinearAllocBlock._word_size = 0;
 263 }
 264 
 265 void CompactibleFreeListSpace::reset_after_compaction() {
 266   // Reset the space to the new reality - one free chunk.
 267   MemRegion mr(compaction_top(), end());
 268   reset(mr);
 269   // Now refill the linear allocation block(s) if possible.
 270   refillLinearAllocBlocksIfNeeded();
 271 }
 272 
 273 // Walks the entire dictionary, returning a coterminal
 274 // chunk, if it exists. Use with caution since it involves
 275 // a potentially complete walk of a potentially large tree.
 276 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 277 
 278   assert_lock_strong(&_freelistLock);
 279 
 280   return dictionary()->find_chunk_ends_at(end());
 281 }
 282 
 283 
 284 #ifndef PRODUCT
 285 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 286   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 287     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 288   }
 289 }
 290 
 291 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 292   size_t sum = 0;
 293   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 294     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 295   }
 296   return sum;
 297 }
 298 
 299 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 300   size_t count = 0;
 301   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 302     debug_only(
 303       ssize_t total_list_count = 0;
 304       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 305          fc = fc->next()) {
 306         total_list_count++;
 307       }
 308       assert(total_list_count ==  _indexedFreeList[i].count(),
 309         "Count in list is incorrect");
 310     )
 311     count += _indexedFreeList[i].count();
 312   }
 313   return count;
 314 }
 315 
 316 size_t CompactibleFreeListSpace::totalCount() {
 317   size_t num = totalCountInIndexedFreeLists();
 318   num +=  dictionary()->total_count();
 319   if (_smallLinearAllocBlock._word_size != 0) {
 320     num++;
 321   }
 322   return num;
 323 }
 324 #endif
 325 
 326 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 327   FreeChunk* fc = (FreeChunk*) p;
 328   return fc->is_free();
 329 }
 330 
 331 size_t CompactibleFreeListSpace::used() const {
 332   return capacity() - free();
 333 }
 334 
 335 size_t CompactibleFreeListSpace::free() const {
 336   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 337   // if you do this while the structures are in flux you
 338   // may get an approximate answer only; for instance
 339   // because there is concurrent allocation either
 340   // directly by mutators or for promotion during a GC.
 341   // It's "MT-safe", however, in the sense that you are guaranteed
 342   // not to crash and burn, for instance, because of walking
 343   // pointers that could disappear as you were walking them.
 344   // The approximation is because the various components
 345   // that are read below are not read atomically (and
 346   // further the computation of totalSizeInIndexedFreeLists()
 347   // is itself a non-atomic computation. The normal use of
 348   // this is during a resize operation at the end of GC
 349   // and at that time you are guaranteed to get the
 350   // correct actual value. However, for instance, this is
 351   // also read completely asynchronously by the "perf-sampler"
 352   // that supports jvmstat, and you are apt to see the values
 353   // flicker in such cases.
 354   assert(_dictionary != NULL, "No _dictionary?");
 355   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 356           totalSizeInIndexedFreeLists() +
 357           _smallLinearAllocBlock._word_size) * HeapWordSize;
 358 }
 359 
 360 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 361   assert(_dictionary != NULL, "No _dictionary?");
 362   assert_locked();
 363   size_t res = _dictionary->max_chunk_size();
 364   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 365                        (size_t) SmallForLinearAlloc - 1));
 366   // XXX the following could potentially be pretty slow;
 367   // should one, pessimistically for the rare cases when res
 368   // calculated above is less than IndexSetSize,
 369   // just return res calculated above? My reasoning was that
 370   // those cases will be so rare that the extra time spent doesn't
 371   // really matter....
 372   // Note: do not change the loop test i >= res + IndexSetStride
 373   // to i > res below, because i is unsigned and res may be zero.
 374   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 375        i -= IndexSetStride) {
 376     if (_indexedFreeList[i].head() != NULL) {
 377       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 378       return i;
 379     }
 380   }
 381   return res;
 382 }
 383 
 384 void LinearAllocBlock::print_on(outputStream* st) const {
 385   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 386             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 387             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 388 }
 389 
 390 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 391   st->print_cr("COMPACTIBLE FREELIST SPACE");
 392   st->print_cr(" Space:");
 393   Space::print_on(st);
 394 
 395   st->print_cr("promoInfo:");
 396   _promoInfo.print_on(st);
 397 
 398   st->print_cr("_smallLinearAllocBlock");
 399   _smallLinearAllocBlock.print_on(st);
 400 
 401   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 402 
 403   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 404 }
 405 
 406 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 407 const {
 408   reportIndexedFreeListStatistics(st);
 409   st->print_cr("Layout of Indexed Freelists");
 410   st->print_cr("---------------------------");
 411   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 412   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 413     _indexedFreeList[i].print_on(st);
 414     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 415       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 416                    p2i(fc), p2i((HeapWord*)fc + i),
 417                    fc->cantCoalesce() ? "\t CC" : "");
 418     }
 419   }
 420 }
 421 
 422 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 423 const {
 424   _promoInfo.print_on(st);
 425 }
 426 
 427 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 428 const {
 429   _dictionary->report_statistics(st);
 430   st->print_cr("Layout of Freelists in Tree");
 431   st->print_cr("---------------------------");
 432   _dictionary->print_free_lists(st);
 433 }
 434 
 435 class BlkPrintingClosure: public BlkClosure {
 436   const CMSCollector*             _collector;
 437   const CompactibleFreeListSpace* _sp;
 438   const CMSBitMap*                _live_bit_map;
 439   const bool                      _post_remark;
 440   outputStream*                   _st;
 441 public:
 442   BlkPrintingClosure(const CMSCollector* collector,
 443                      const CompactibleFreeListSpace* sp,
 444                      const CMSBitMap* live_bit_map,
 445                      outputStream* st):
 446     _collector(collector),
 447     _sp(sp),
 448     _live_bit_map(live_bit_map),
 449     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 450     _st(st) { }
 451   size_t do_blk(HeapWord* addr);
 452 };
 453 
 454 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 455   size_t sz = _sp->block_size_no_stall(addr, _collector);
 456   assert(sz != 0, "Should always be able to compute a size");
 457   if (_sp->block_is_obj(addr)) {
 458     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 459     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 460       p2i(addr),
 461       dead ? "dead" : "live",
 462       sz,
 463       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 464     if (CMSPrintObjectsInDump && !dead) {
 465       oop(addr)->print_on(_st);
 466       _st->print_cr("--------------------------------------");
 467     }
 468   } else { // free block
 469     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 470       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 471     if (CMSPrintChunksInDump) {
 472       ((FreeChunk*)addr)->print_on(_st);
 473       _st->print_cr("--------------------------------------");
 474     }
 475   }
 476   return sz;
 477 }
 478 
 479 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 480   st->print_cr("=========================");
 481   st->print_cr("Block layout in CMS Heap:");
 482   st->print_cr("=========================");
 483   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 484   blk_iterate(&bpcl);
 485 
 486   st->print_cr("=======================================");
 487   st->print_cr("Order & Layout of Promotion Info Blocks");
 488   st->print_cr("=======================================");
 489   print_promo_info_blocks(st);
 490 
 491   st->print_cr("===========================");
 492   st->print_cr("Order of Indexed Free Lists");
 493   st->print_cr("=========================");
 494   print_indexed_free_lists(st);
 495 
 496   st->print_cr("=================================");
 497   st->print_cr("Order of Free Lists in Dictionary");
 498   st->print_cr("=================================");
 499   print_dictionary_free_lists(st);
 500 }
 501 
 502 
 503 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 504   assert_lock_strong(&_freelistLock);
 505   Log(gc, freelist, stats) log;
 506   if (!log.is_debug()) {
 507     return;
 508   }
 509   log.debug("%s", title);
 510 
 511   LogStream out(log.debug());
 512   _dictionary->report_statistics(&out);
 513 
 514   if (log.is_trace()) {
 515     LogStream trace_out(log.trace());
 516     reportIndexedFreeListStatistics(&trace_out);
 517     size_t total_size = totalSizeInIndexedFreeLists() +
 518                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 519     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 520   }
 521 }
 522 
 523 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 524   assert_lock_strong(&_freelistLock);
 525   st->print_cr("Statistics for IndexedFreeLists:");
 526   st->print_cr("--------------------------------");
 527   size_t total_size = totalSizeInIndexedFreeLists();
 528   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 529   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 530   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 531   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 532   if (free_blocks != 0) {
 533     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 534   }
 535 }
 536 
 537 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 538   size_t res = 0;
 539   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 540     debug_only(
 541       ssize_t recount = 0;
 542       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 543          fc = fc->next()) {
 544         recount += 1;
 545       }
 546       assert(recount == _indexedFreeList[i].count(),
 547         "Incorrect count in list");
 548     )
 549     res += _indexedFreeList[i].count();
 550   }
 551   return res;
 552 }
 553 
 554 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 555   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 556     if (_indexedFreeList[i].head() != NULL) {
 557       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 558       return (size_t)i;
 559     }
 560   }
 561   return 0;
 562 }
 563 
 564 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 565   HeapWord* prevEnd = end();
 566   assert(prevEnd != value, "unnecessary set_end call");
 567   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 568         "New end is below unallocated block");
 569   _end = value;
 570   if (prevEnd != NULL) {
 571     // Resize the underlying block offset table.
 572     _bt.resize(pointer_delta(value, bottom()));
 573     if (value <= prevEnd) {
 574       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 575              "New end is below unallocated block");
 576     } else {
 577       // Now, take this new chunk and add it to the free blocks.
 578       // Note that the BOT has not yet been updated for this block.
 579       size_t newFcSize = pointer_delta(value, prevEnd);
 580       // Add the block to the free lists, if possible coalescing it
 581       // with the last free block, and update the BOT and census data.
 582       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 583     }
 584   }
 585 }
 586 
 587 class FreeListSpaceDCTOC : public FilteringDCTOC {
 588   CompactibleFreeListSpace* _cfls;
 589   CMSCollector* _collector;
 590   bool _parallel;
 591 protected:
 592   // Override.
 593 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 594   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 595                                        HeapWord* bottom, HeapWord* top, \
 596                                        ClosureType* cl);                \
 597       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 598                                        HeapWord* bottom, HeapWord* top, \
 599                                        ClosureType* cl);                \
 600     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 601                                        HeapWord* bottom, HeapWord* top, \
 602                                        ClosureType* cl)
 603   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 604   walk_mem_region_with_cl_DECL(FilteringClosure);
 605 
 606 public:
 607   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 608                      CMSCollector* collector,
 609                      ExtendedOopClosure* cl,
 610                      CardTableModRefBS::PrecisionStyle precision,
 611                      HeapWord* boundary,
 612                      bool parallel) :
 613     FilteringDCTOC(sp, cl, precision, boundary),
 614     _cfls(sp), _collector(collector), _parallel(parallel) {}
 615 };
 616 
 617 // We de-virtualize the block-related calls below, since we know that our
 618 // space is a CompactibleFreeListSpace.
 619 
 620 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 621 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 622                                                  HeapWord* bottom,              \
 623                                                  HeapWord* top,                 \
 624                                                  ClosureType* cl) {             \
 625    if (_parallel) {                                                             \
 626      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 627    } else {                                                                     \
 628      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 629    }                                                                            \
 630 }                                                                               \
 631 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 632                                                      HeapWord* bottom,          \
 633                                                      HeapWord* top,             \
 634                                                      ClosureType* cl) {         \
 635   /* Skip parts that are before "mr", in case "block_start" sent us             \
 636      back too far. */                                                           \
 637   HeapWord* mr_start = mr.start();                                              \
 638   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 639   HeapWord* next = bottom + bot_size;                                           \
 640   while (next < mr_start) {                                                     \
 641     bottom = next;                                                              \
 642     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 643     next = bottom + bot_size;                                                   \
 644   }                                                                             \
 645                                                                                 \
 646   while (bottom < top) {                                                        \
 647     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 648         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 649                     oop(bottom)) &&                                             \
 650         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 651       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 652       bottom += _cfls->adjustObjectSize(word_sz);                               \
 653     } else {                                                                    \
 654       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 655     }                                                                           \
 656   }                                                                             \
 657 }                                                                               \
 658 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 659                                                        HeapWord* bottom,        \
 660                                                        HeapWord* top,           \
 661                                                        ClosureType* cl) {       \
 662   /* Skip parts that are before "mr", in case "block_start" sent us             \
 663      back too far. */                                                           \
 664   HeapWord* mr_start = mr.start();                                              \
 665   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 666   HeapWord* next = bottom + bot_size;                                           \
 667   while (next < mr_start) {                                                     \
 668     bottom = next;                                                              \
 669     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 670     next = bottom + bot_size;                                                   \
 671   }                                                                             \
 672                                                                                 \
 673   while (bottom < top) {                                                        \
 674     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 675         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 676                     oop(bottom)) &&                                             \
 677         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 678       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 679       bottom += _cfls->adjustObjectSize(word_sz);                               \
 680     } else {                                                                    \
 681       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 682     }                                                                           \
 683   }                                                                             \
 684 }
 685 
 686 // (There are only two of these, rather than N, because the split is due
 687 // only to the introduction of the FilteringClosure, a local part of the
 688 // impl of this abstraction.)
 689 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 690 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 691 
 692 DirtyCardToOopClosure*
 693 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 694                                       CardTableModRefBS::PrecisionStyle precision,
 695                                       HeapWord* boundary,
 696                                       bool parallel) {
 697   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 698 }
 699 
 700 
 701 // Note on locking for the space iteration functions:
 702 // since the collector's iteration activities are concurrent with
 703 // allocation activities by mutators, absent a suitable mutual exclusion
 704 // mechanism the iterators may go awry. For instance a block being iterated
 705 // may suddenly be allocated or divided up and part of it allocated and
 706 // so on.
 707 
 708 // Apply the given closure to each block in the space.
 709 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 710   assert_lock_strong(freelistLock());
 711   HeapWord *cur, *limit;
 712   for (cur = bottom(), limit = end(); cur < limit;
 713        cur += cl->do_blk_careful(cur));
 714 }
 715 
 716 // Apply the given closure to each block in the space.
 717 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 718   assert_lock_strong(freelistLock());
 719   HeapWord *cur, *limit;
 720   for (cur = bottom(), limit = end(); cur < limit;
 721        cur += cl->do_blk(cur));
 722 }
 723 
 724 // Apply the given closure to each oop in the space.
 725 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 726   assert_lock_strong(freelistLock());
 727   HeapWord *cur, *limit;
 728   size_t curSize;
 729   for (cur = bottom(), limit = end(); cur < limit;
 730        cur += curSize) {
 731     curSize = block_size(cur);
 732     if (block_is_obj(cur)) {
 733       oop(cur)->oop_iterate(cl);
 734     }
 735   }
 736 }
 737 
 738 // NOTE: In the following methods, in order to safely be able to
 739 // apply the closure to an object, we need to be sure that the
 740 // object has been initialized. We are guaranteed that an object
 741 // is initialized if we are holding the Heap_lock with the
 742 // world stopped.
 743 void CompactibleFreeListSpace::verify_objects_initialized() const {
 744   if (is_init_completed()) {
 745     assert_locked_or_safepoint(Heap_lock);
 746     if (Universe::is_fully_initialized()) {
 747       guarantee(SafepointSynchronize::is_at_safepoint(),
 748                 "Required for objects to be initialized");
 749     }
 750   } // else make a concession at vm start-up
 751 }
 752 
 753 // Apply the given closure to each object in the space
 754 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 755   assert_lock_strong(freelistLock());
 756   NOT_PRODUCT(verify_objects_initialized());
 757   HeapWord *cur, *limit;
 758   size_t curSize;
 759   for (cur = bottom(), limit = end(); cur < limit;
 760        cur += curSize) {
 761     curSize = block_size(cur);
 762     if (block_is_obj(cur)) {
 763       blk->do_object(oop(cur));
 764     }
 765   }
 766 }
 767 
 768 // Apply the given closure to each live object in the space
 769 //   The usage of CompactibleFreeListSpace
 770 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 771 // objects in the space with references to objects that are no longer
 772 // valid.  For example, an object may reference another object
 773 // that has already been sweep up (collected).  This method uses
 774 // obj_is_alive() to determine whether it is safe to apply the closure to
 775 // an object.  See obj_is_alive() for details on how liveness of an
 776 // object is decided.
 777 
 778 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 779   assert_lock_strong(freelistLock());
 780   NOT_PRODUCT(verify_objects_initialized());
 781   HeapWord *cur, *limit;
 782   size_t curSize;
 783   for (cur = bottom(), limit = end(); cur < limit;
 784        cur += curSize) {
 785     curSize = block_size(cur);
 786     if (block_is_obj(cur) && obj_is_alive(cur)) {
 787       blk->do_object(oop(cur));
 788     }
 789   }
 790 }
 791 
 792 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 793                                                   UpwardsObjectClosure* cl) {
 794   assert_locked(freelistLock());
 795   NOT_PRODUCT(verify_objects_initialized());
 796   assert(!mr.is_empty(), "Should be non-empty");
 797   // We use MemRegion(bottom(), end()) rather than used_region() below
 798   // because the two are not necessarily equal for some kinds of
 799   // spaces, in particular, certain kinds of free list spaces.
 800   // We could use the more complicated but more precise:
 801   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
 802   // but the slight imprecision seems acceptable in the assertion check.
 803   assert(MemRegion(bottom(), end()).contains(mr),
 804          "Should be within used space");
 805   HeapWord* prev = cl->previous();   // max address from last time
 806   if (prev >= mr.end()) { // nothing to do
 807     return;
 808   }
 809   // This assert will not work when we go from cms space to perm
 810   // space, and use same closure. Easy fix deferred for later. XXX YSR
 811   // assert(prev == NULL || contains(prev), "Should be within space");
 812 
 813   bool last_was_obj_array = false;
 814   HeapWord *blk_start_addr, *region_start_addr;
 815   if (prev > mr.start()) {
 816     region_start_addr = prev;
 817     blk_start_addr    = prev;
 818     // The previous invocation may have pushed "prev" beyond the
 819     // last allocated block yet there may be still be blocks
 820     // in this region due to a particular coalescing policy.
 821     // Relax the assertion so that the case where the unallocated
 822     // block is maintained and "prev" is beyond the unallocated
 823     // block does not cause the assertion to fire.
 824     assert((BlockOffsetArrayUseUnallocatedBlock &&
 825             (!is_in(prev))) ||
 826            (blk_start_addr == block_start(region_start_addr)), "invariant");
 827   } else {
 828     region_start_addr = mr.start();
 829     blk_start_addr    = block_start(region_start_addr);
 830   }
 831   HeapWord* region_end_addr = mr.end();
 832   MemRegion derived_mr(region_start_addr, region_end_addr);
 833   while (blk_start_addr < region_end_addr) {
 834     const size_t size = block_size(blk_start_addr);
 835     if (block_is_obj(blk_start_addr)) {
 836       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 837     } else {
 838       last_was_obj_array = false;
 839     }
 840     blk_start_addr += size;
 841   }
 842   if (!last_was_obj_array) {
 843     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 844            "Should be within (closed) used space");
 845     assert(blk_start_addr > prev, "Invariant");
 846     cl->set_previous(blk_start_addr); // min address for next time
 847   }
 848 }
 849 
 850 // Callers of this iterator beware: The closure application should
 851 // be robust in the face of uninitialized objects and should (always)
 852 // return a correct size so that the next addr + size below gives us a
 853 // valid block boundary. [See for instance,
 854 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 855 // in ConcurrentMarkSweepGeneration.cpp.]
 856 HeapWord*
 857 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 858   ObjectClosureCareful* cl) {
 859   assert_lock_strong(freelistLock());
 860   // Can't use used_region() below because it may not necessarily
 861   // be the same as [bottom(),end()); although we could
 862   // use [used_region().start(),align_up(used_region().end(),CardSize)),
 863   // that appears too cumbersome, so we just do the simpler check
 864   // in the assertion below.
 865   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 866          "mr should be non-empty and within used space");
 867   HeapWord *addr, *end;
 868   size_t size;
 869   for (addr = block_start_careful(mr.start()), end  = mr.end();
 870        addr < end; addr += size) {
 871     FreeChunk* fc = (FreeChunk*)addr;
 872     if (fc->is_free()) {
 873       // Since we hold the free list lock, which protects direct
 874       // allocation in this generation by mutators, a free object
 875       // will remain free throughout this iteration code.
 876       size = fc->size();
 877     } else {
 878       // Note that the object need not necessarily be initialized,
 879       // because (for instance) the free list lock does NOT protect
 880       // object initialization. The closure application below must
 881       // therefore be correct in the face of uninitialized objects.
 882       size = cl->do_object_careful_m(oop(addr), mr);
 883       if (size == 0) {
 884         // An unparsable object found. Signal early termination.
 885         return addr;
 886       }
 887     }
 888   }
 889   return NULL;
 890 }
 891 
 892 
 893 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 894   NOT_PRODUCT(verify_objects_initialized());
 895   return _bt.block_start(p);
 896 }
 897 
 898 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 899   return _bt.block_start_careful(p);
 900 }
 901 
 902 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 903   NOT_PRODUCT(verify_objects_initialized());
 904   // This must be volatile, or else there is a danger that the compiler
 905   // will compile the code below into a sometimes-infinite loop, by keeping
 906   // the value read the first time in a register.
 907   while (true) {
 908     // We must do this until we get a consistent view of the object.
 909     if (FreeChunk::indicatesFreeChunk(p)) {
 910       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 911       size_t res = fc->size();
 912 
 913       // Bugfix for systems with weak memory model (PPC64/IA64). The
 914       // block's free bit was set and we have read the size of the
 915       // block. Acquire and check the free bit again. If the block is
 916       // still free, the read size is correct.
 917       OrderAccess::acquire();
 918 
 919       // If the object is still a free chunk, return the size, else it
 920       // has been allocated so try again.
 921       if (FreeChunk::indicatesFreeChunk(p)) {
 922         assert(res != 0, "Block size should not be 0");
 923         return res;
 924       }
 925     } else {
 926       // Ensure klass read before size.
 927       Klass* k = oop(p)->klass_or_null_acquire();
 928       if (k != NULL) {
 929         assert(k->is_klass(), "Should really be klass oop.");
 930         oop o = (oop)p;
 931         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 932 
 933         size_t res = o->size_given_klass(k);
 934         res = adjustObjectSize(res);
 935         assert(res != 0, "Block size should not be 0");
 936         return res;
 937       }
 938     }
 939   }
 940 }
 941 
 942 // TODO: Now that is_parsable is gone, we should combine these two functions.
 943 // A variant of the above that uses the Printezis bits for
 944 // unparsable but allocated objects. This avoids any possible
 945 // stalls waiting for mutators to initialize objects, and is
 946 // thus potentially faster than the variant above. However,
 947 // this variant may return a zero size for a block that is
 948 // under mutation and for which a consistent size cannot be
 949 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 950 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 951                                                      const CMSCollector* c)
 952 const {
 953   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 954   // This must be volatile, or else there is a danger that the compiler
 955   // will compile the code below into a sometimes-infinite loop, by keeping
 956   // the value read the first time in a register.
 957   DEBUG_ONLY(uint loops = 0;)
 958   while (true) {
 959     // We must do this until we get a consistent view of the object.
 960     if (FreeChunk::indicatesFreeChunk(p)) {
 961       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 962       size_t res = fc->size();
 963 
 964       // Bugfix for systems with weak memory model (PPC64/IA64). The
 965       // free bit of the block was set and we have read the size of
 966       // the block. Acquire and check the free bit again. If the
 967       // block is still free, the read size is correct.
 968       OrderAccess::acquire();
 969 
 970       if (FreeChunk::indicatesFreeChunk(p)) {
 971         assert(res != 0, "Block size should not be 0");
 972         assert(loops == 0, "Should be 0");
 973         return res;
 974       }
 975     } else {
 976       // Ensure klass read before size.
 977       Klass* k = oop(p)->klass_or_null_acquire();
 978       if (k != NULL) {
 979         assert(k->is_klass(), "Should really be klass oop.");
 980         oop o = (oop)p;
 981         assert(o->is_oop(), "Should be an oop");
 982 
 983         size_t res = o->size_given_klass(k);
 984         res = adjustObjectSize(res);
 985         assert(res != 0, "Block size should not be 0");
 986         return res;
 987       } else {
 988         // May return 0 if P-bits not present.
 989         return c->block_size_if_printezis_bits(p);
 990       }
 991     }
 992     assert(loops == 0, "Can loop at most once");
 993     DEBUG_ONLY(loops++;)
 994   }
 995 }
 996 
 997 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 998   NOT_PRODUCT(verify_objects_initialized());
 999   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1000   FreeChunk* fc = (FreeChunk*)p;
1001   if (fc->is_free()) {
1002     return fc->size();
1003   } else {
1004     // Ignore mark word because this may be a recently promoted
1005     // object whose mark word is used to chain together grey
1006     // objects (the last one would have a null value).
1007     assert(oop(p)->is_oop(true), "Should be an oop");
1008     return adjustObjectSize(oop(p)->size());
1009   }
1010 }
1011 
1012 // This implementation assumes that the property of "being an object" is
1013 // stable.  But being a free chunk may not be (because of parallel
1014 // promotion.)
1015 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1016   FreeChunk* fc = (FreeChunk*)p;
1017   assert(is_in_reserved(p), "Should be in space");
1018   if (FreeChunk::indicatesFreeChunk(p)) return false;
1019   Klass* k = oop(p)->klass_or_null_acquire();
1020   if (k != NULL) {
1021     // Ignore mark word because it may have been used to
1022     // chain together promoted objects (the last one
1023     // would have a null value).
1024     assert(oop(p)->is_oop(true), "Should be an oop");
1025     return true;
1026   } else {
1027     return false;  // Was not an object at the start of collection.
1028   }
1029 }
1030 
1031 // Check if the object is alive. This fact is checked either by consulting
1032 // the main marking bitmap in the sweeping phase or, if it's a permanent
1033 // generation and we're not in the sweeping phase, by checking the
1034 // perm_gen_verify_bit_map where we store the "deadness" information if
1035 // we did not sweep the perm gen in the most recent previous GC cycle.
1036 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1037   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1038          "Else races are possible");
1039   assert(block_is_obj(p), "The address should point to an object");
1040 
1041   // If we're sweeping, we use object liveness information from the main bit map
1042   // for both perm gen and old gen.
1043   // We don't need to lock the bitmap (live_map or dead_map below), because
1044   // EITHER we are in the middle of the sweeping phase, and the
1045   // main marking bit map (live_map below) is locked,
1046   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1047   // is stable, because it's mutated only in the sweeping phase.
1048   // NOTE: This method is also used by jmap where, if class unloading is
1049   // off, the results can return "false" for legitimate perm objects,
1050   // when we are not in the midst of a sweeping phase, which can result
1051   // in jmap not reporting certain perm gen objects. This will be moot
1052   // if/when the perm gen goes away in the future.
1053   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1054     CMSBitMap* live_map = _collector->markBitMap();
1055     return live_map->par_isMarked((HeapWord*) p);
1056   }
1057   return true;
1058 }
1059 
1060 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1061   FreeChunk* fc = (FreeChunk*)p;
1062   assert(is_in_reserved(p), "Should be in space");
1063   assert(_bt.block_start(p) == p, "Should be a block boundary");
1064   if (!fc->is_free()) {
1065     // Ignore mark word because it may have been used to
1066     // chain together promoted objects (the last one
1067     // would have a null value).
1068     assert(oop(p)->is_oop(true), "Should be an oop");
1069     return true;
1070   }
1071   return false;
1072 }
1073 
1074 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1075 // approximate answer if you don't hold the freelistlock when you call this.
1076 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1077   size_t size = 0;
1078   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1079     debug_only(
1080       // We may be calling here without the lock in which case we
1081       // won't do this modest sanity check.
1082       if (freelistLock()->owned_by_self()) {
1083         size_t total_list_size = 0;
1084         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1085           fc = fc->next()) {
1086           total_list_size += i;
1087         }
1088         assert(total_list_size == i * _indexedFreeList[i].count(),
1089                "Count in list is incorrect");
1090       }
1091     )
1092     size += i * _indexedFreeList[i].count();
1093   }
1094   return size;
1095 }
1096 
1097 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1098   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1099   return allocate(size);
1100 }
1101 
1102 HeapWord*
1103 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1104   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1105 }
1106 
1107 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1108   assert_lock_strong(freelistLock());
1109   HeapWord* res = NULL;
1110   assert(size == adjustObjectSize(size),
1111          "use adjustObjectSize() before calling into allocate()");
1112 
1113   res = allocate_adaptive_freelists(size);
1114 
1115   if (res != NULL) {
1116     // check that res does lie in this space!
1117     assert(is_in_reserved(res), "Not in this space!");
1118     assert(is_aligned((void*)res), "alignment check");
1119 
1120     FreeChunk* fc = (FreeChunk*)res;
1121     fc->markNotFree();
1122     assert(!fc->is_free(), "shouldn't be marked free");
1123     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1124     // Verify that the block offset table shows this to
1125     // be a single block, but not one which is unallocated.
1126     _bt.verify_single_block(res, size);
1127     _bt.verify_not_unallocated(res, size);
1128     // mangle a just allocated object with a distinct pattern.
1129     debug_only(fc->mangleAllocated(size));
1130   }
1131 
1132   return res;
1133 }
1134 
1135 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1136   assert_lock_strong(freelistLock());
1137   HeapWord* res = NULL;
1138   assert(size == adjustObjectSize(size),
1139          "use adjustObjectSize() before calling into allocate()");
1140 
1141   // Strategy
1142   //   if small
1143   //     exact size from small object indexed list if small
1144   //     small or large linear allocation block (linAB) as appropriate
1145   //     take from lists of greater sized chunks
1146   //   else
1147   //     dictionary
1148   //     small or large linear allocation block if it has the space
1149   // Try allocating exact size from indexTable first
1150   if (size < IndexSetSize) {
1151     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1152     if(res != NULL) {
1153       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1154         "Not removed from free list");
1155       // no block offset table adjustment is necessary on blocks in
1156       // the indexed lists.
1157 
1158     // Try allocating from the small LinAB
1159     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1160         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1161         // if successful, the above also adjusts block offset table
1162         // Note that this call will refill the LinAB to
1163         // satisfy the request.  This is different that
1164         // evm.
1165         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1166     } else {
1167       // Raid the exact free lists larger than size, even if they are not
1168       // overpopulated.
1169       res = (HeapWord*) getChunkFromGreater(size);
1170     }
1171   } else {
1172     // Big objects get allocated directly from the dictionary.
1173     res = (HeapWord*) getChunkFromDictionaryExact(size);
1174     if (res == NULL) {
1175       // Try hard not to fail since an allocation failure will likely
1176       // trigger a synchronous GC.  Try to get the space from the
1177       // allocation blocks.
1178       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1179     }
1180   }
1181 
1182   return res;
1183 }
1184 
1185 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1186 // when promoting obj.
1187 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1188   // Depending on the object size, expansion may require refilling either a
1189   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1190   // is added because the dictionary may over-allocate to avoid fragmentation.
1191   size_t space = obj_size;
1192   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1193   return space;
1194 }
1195 
1196 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1197   FreeChunk* ret;
1198 
1199   assert(numWords >= MinChunkSize, "Size is less than minimum");
1200   assert(linearAllocationWouldFail() || bestFitFirst(),
1201     "Should not be here");
1202 
1203   size_t i;
1204   size_t currSize = numWords + MinChunkSize;
1205   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1206   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1207     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1208     if (fl->head()) {
1209       ret = getFromListGreater(fl, numWords);
1210       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1211       return ret;
1212     }
1213   }
1214 
1215   currSize = MAX2((size_t)SmallForDictionary,
1216                   (size_t)(numWords + MinChunkSize));
1217 
1218   /* Try to get a chunk that satisfies request, while avoiding
1219      fragmentation that can't be handled. */
1220   {
1221     ret =  dictionary()->get_chunk(currSize);
1222     if (ret != NULL) {
1223       assert(ret->size() - numWords >= MinChunkSize,
1224              "Chunk is too small");
1225       _bt.allocated((HeapWord*)ret, ret->size());
1226       /* Carve returned chunk. */
1227       (void) splitChunkAndReturnRemainder(ret, numWords);
1228       /* Label this as no longer a free chunk. */
1229       assert(ret->is_free(), "This chunk should be free");
1230       ret->link_prev(NULL);
1231     }
1232     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1233     return ret;
1234   }
1235   ShouldNotReachHere();
1236 }
1237 
1238 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1239   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1240   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1241 }
1242 
1243 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1244   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1245          (_smallLinearAllocBlock._word_size == fc->size()),
1246          "Linear allocation block shows incorrect size");
1247   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1248           (_smallLinearAllocBlock._word_size == fc->size()));
1249 }
1250 
1251 // Check if the purported free chunk is present either as a linear
1252 // allocation block, the size-indexed table of (smaller) free blocks,
1253 // or the larger free blocks kept in the binary tree dictionary.
1254 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1255   if (verify_chunk_is_linear_alloc_block(fc)) {
1256     return true;
1257   } else if (fc->size() < IndexSetSize) {
1258     return verifyChunkInIndexedFreeLists(fc);
1259   } else {
1260     return dictionary()->verify_chunk_in_free_list(fc);
1261   }
1262 }
1263 
1264 #ifndef PRODUCT
1265 void CompactibleFreeListSpace::assert_locked() const {
1266   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1267 }
1268 
1269 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1270   CMSLockVerifier::assert_locked(lock);
1271 }
1272 #endif
1273 
1274 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1275   // In the parallel case, the main thread holds the free list lock
1276   // on behalf the parallel threads.
1277   FreeChunk* fc;
1278   {
1279     // If GC is parallel, this might be called by several threads.
1280     // This should be rare enough that the locking overhead won't affect
1281     // the sequential code.
1282     MutexLockerEx x(parDictionaryAllocLock(),
1283                     Mutex::_no_safepoint_check_flag);
1284     fc = getChunkFromDictionary(size);
1285   }
1286   if (fc != NULL) {
1287     fc->dontCoalesce();
1288     assert(fc->is_free(), "Should be free, but not coalescable");
1289     // Verify that the block offset table shows this to
1290     // be a single block, but not one which is unallocated.
1291     _bt.verify_single_block((HeapWord*)fc, fc->size());
1292     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1293   }
1294   return fc;
1295 }
1296 
1297 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1298   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1299   assert_locked();
1300 
1301   // if we are tracking promotions, then first ensure space for
1302   // promotion (including spooling space for saving header if necessary).
1303   // then allocate and copy, then track promoted info if needed.
1304   // When tracking (see PromotionInfo::track()), the mark word may
1305   // be displaced and in this case restoration of the mark word
1306   // occurs in the (oop_since_save_marks_)iterate phase.
1307   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1308     return NULL;
1309   }
1310   // Call the allocate(size_t, bool) form directly to avoid the
1311   // additional call through the allocate(size_t) form.  Having
1312   // the compile inline the call is problematic because allocate(size_t)
1313   // is a virtual method.
1314   HeapWord* res = allocate(adjustObjectSize(obj_size));
1315   if (res != NULL) {
1316     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1317     // if we should be tracking promotions, do so.
1318     if (_promoInfo.tracking()) {
1319         _promoInfo.track((PromotedObject*)res);
1320     }
1321   }
1322   return oop(res);
1323 }
1324 
1325 HeapWord*
1326 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1327   assert_locked();
1328   assert(size >= MinChunkSize, "minimum chunk size");
1329   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1330     "maximum from smallLinearAllocBlock");
1331   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1332 }
1333 
1334 HeapWord*
1335 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1336                                                        size_t size) {
1337   assert_locked();
1338   assert(size >= MinChunkSize, "too small");
1339   HeapWord* res = NULL;
1340   // Try to do linear allocation from blk, making sure that
1341   if (blk->_word_size == 0) {
1342     // We have probably been unable to fill this either in the prologue or
1343     // when it was exhausted at the last linear allocation. Bail out until
1344     // next time.
1345     assert(blk->_ptr == NULL, "consistency check");
1346     return NULL;
1347   }
1348   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1349   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1350   if (res != NULL) return res;
1351 
1352   // about to exhaust this linear allocation block
1353   if (blk->_word_size == size) { // exactly satisfied
1354     res = blk->_ptr;
1355     _bt.allocated(res, blk->_word_size);
1356   } else if (size + MinChunkSize <= blk->_refillSize) {
1357     size_t sz = blk->_word_size;
1358     // Update _unallocated_block if the size is such that chunk would be
1359     // returned to the indexed free list.  All other chunks in the indexed
1360     // free lists are allocated from the dictionary so that _unallocated_block
1361     // has already been adjusted for them.  Do it here so that the cost
1362     // for all chunks added back to the indexed free lists.
1363     if (sz < SmallForDictionary) {
1364       _bt.allocated(blk->_ptr, sz);
1365     }
1366     // Return the chunk that isn't big enough, and then refill below.
1367     addChunkToFreeLists(blk->_ptr, sz);
1368     split_birth(sz);
1369     // Don't keep statistics on adding back chunk from a LinAB.
1370   } else {
1371     // A refilled block would not satisfy the request.
1372     return NULL;
1373   }
1374 
1375   blk->_ptr = NULL; blk->_word_size = 0;
1376   refillLinearAllocBlock(blk);
1377   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1378          "block was replenished");
1379   if (res != NULL) {
1380     split_birth(size);
1381     repairLinearAllocBlock(blk);
1382   } else if (blk->_ptr != NULL) {
1383     res = blk->_ptr;
1384     size_t blk_size = blk->_word_size;
1385     blk->_word_size -= size;
1386     blk->_ptr  += size;
1387     split_birth(size);
1388     repairLinearAllocBlock(blk);
1389     // Update BOT last so that other (parallel) GC threads see a consistent
1390     // view of the BOT and free blocks.
1391     // Above must occur before BOT is updated below.
1392     OrderAccess::storestore();
1393     _bt.split_block(res, blk_size, size);  // adjust block offset table
1394   }
1395   return res;
1396 }
1397 
1398 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1399                                         LinearAllocBlock* blk,
1400                                         size_t size) {
1401   assert_locked();
1402   assert(size >= MinChunkSize, "too small");
1403 
1404   HeapWord* res = NULL;
1405   // This is the common case.  Keep it simple.
1406   if (blk->_word_size >= size + MinChunkSize) {
1407     assert(blk->_ptr != NULL, "consistency check");
1408     res = blk->_ptr;
1409     // Note that the BOT is up-to-date for the linAB before allocation.  It
1410     // indicates the start of the linAB.  The split_block() updates the
1411     // BOT for the linAB after the allocation (indicates the start of the
1412     // next chunk to be allocated).
1413     size_t blk_size = blk->_word_size;
1414     blk->_word_size -= size;
1415     blk->_ptr  += size;
1416     split_birth(size);
1417     repairLinearAllocBlock(blk);
1418     // Update BOT last so that other (parallel) GC threads see a consistent
1419     // view of the BOT and free blocks.
1420     // Above must occur before BOT is updated below.
1421     OrderAccess::storestore();
1422     _bt.split_block(res, blk_size, size);  // adjust block offset table
1423     _bt.allocated(res, size);
1424   }
1425   return res;
1426 }
1427 
1428 FreeChunk*
1429 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1430   assert_locked();
1431   assert(size < SmallForDictionary, "just checking");
1432   FreeChunk* res;
1433   res = _indexedFreeList[size].get_chunk_at_head();
1434   if (res == NULL) {
1435     res = getChunkFromIndexedFreeListHelper(size);
1436   }
1437   _bt.verify_not_unallocated((HeapWord*) res, size);
1438   assert(res == NULL || res->size() == size, "Incorrect block size");
1439   return res;
1440 }
1441 
1442 FreeChunk*
1443 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1444   bool replenish) {
1445   assert_locked();
1446   FreeChunk* fc = NULL;
1447   if (size < SmallForDictionary) {
1448     assert(_indexedFreeList[size].head() == NULL ||
1449       _indexedFreeList[size].surplus() <= 0,
1450       "List for this size should be empty or under populated");
1451     // Try best fit in exact lists before replenishing the list
1452     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1453       // Replenish list.
1454       //
1455       // Things tried that failed.
1456       //   Tried allocating out of the two LinAB's first before
1457       // replenishing lists.
1458       //   Tried small linAB of size 256 (size in indexed list)
1459       // and replenishing indexed lists from the small linAB.
1460       //
1461       FreeChunk* newFc = NULL;
1462       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1463       if (replenish_size < SmallForDictionary) {
1464         // Do not replenish from an underpopulated size.
1465         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1466             _indexedFreeList[replenish_size].head() != NULL) {
1467           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1468         } else if (bestFitFirst()) {
1469           newFc = bestFitSmall(replenish_size);
1470         }
1471       }
1472       if (newFc == NULL && replenish_size > size) {
1473         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1474         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1475       }
1476       // Note: The stats update re split-death of block obtained above
1477       // will be recorded below precisely when we know we are going to
1478       // be actually splitting it into more than one pieces below.
1479       if (newFc != NULL) {
1480         if  (replenish || CMSReplenishIntermediate) {
1481           // Replenish this list and return one block to caller.
1482           size_t i;
1483           FreeChunk *curFc, *nextFc;
1484           size_t num_blk = newFc->size() / size;
1485           assert(num_blk >= 1, "Smaller than requested?");
1486           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1487           if (num_blk > 1) {
1488             // we are sure we will be splitting the block just obtained
1489             // into multiple pieces; record the split-death of the original
1490             splitDeath(replenish_size);
1491           }
1492           // carve up and link blocks 0, ..., num_blk - 2
1493           // The last chunk is not added to the lists but is returned as the
1494           // free chunk.
1495           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1496                i = 0;
1497                i < (num_blk - 1);
1498                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1499                i++) {
1500             curFc->set_size(size);
1501             // Don't record this as a return in order to try and
1502             // determine the "returns" from a GC.
1503             _bt.verify_not_unallocated((HeapWord*) fc, size);
1504             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1505             _bt.mark_block((HeapWord*)curFc, size);
1506             split_birth(size);
1507             // Don't record the initial population of the indexed list
1508             // as a split birth.
1509           }
1510 
1511           // check that the arithmetic was OK above
1512           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1513             "inconsistency in carving newFc");
1514           curFc->set_size(size);
1515           _bt.mark_block((HeapWord*)curFc, size);
1516           split_birth(size);
1517           fc = curFc;
1518         } else {
1519           // Return entire block to caller
1520           fc = newFc;
1521         }
1522       }
1523     }
1524   } else {
1525     // Get a free chunk from the free chunk dictionary to be returned to
1526     // replenish the indexed free list.
1527     fc = getChunkFromDictionaryExact(size);
1528   }
1529   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1530   return fc;
1531 }
1532 
1533 FreeChunk*
1534 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1535   assert_locked();
1536   FreeChunk* fc = _dictionary->get_chunk(size,
1537                                          FreeBlockDictionary<FreeChunk>::atLeast);
1538   if (fc == NULL) {
1539     return NULL;
1540   }
1541   _bt.allocated((HeapWord*)fc, fc->size());
1542   if (fc->size() >= size + MinChunkSize) {
1543     fc = splitChunkAndReturnRemainder(fc, size);
1544   }
1545   assert(fc->size() >= size, "chunk too small");
1546   assert(fc->size() < size + MinChunkSize, "chunk too big");
1547   _bt.verify_single_block((HeapWord*)fc, fc->size());
1548   return fc;
1549 }
1550 
1551 FreeChunk*
1552 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1553   assert_locked();
1554   FreeChunk* fc = _dictionary->get_chunk(size,
1555                                          FreeBlockDictionary<FreeChunk>::atLeast);
1556   if (fc == NULL) {
1557     return fc;
1558   }
1559   _bt.allocated((HeapWord*)fc, fc->size());
1560   if (fc->size() == size) {
1561     _bt.verify_single_block((HeapWord*)fc, size);
1562     return fc;
1563   }
1564   assert(fc->size() > size, "get_chunk() guarantee");
1565   if (fc->size() < size + MinChunkSize) {
1566     // Return the chunk to the dictionary and go get a bigger one.
1567     returnChunkToDictionary(fc);
1568     fc = _dictionary->get_chunk(size + MinChunkSize,
1569                                 FreeBlockDictionary<FreeChunk>::atLeast);
1570     if (fc == NULL) {
1571       return NULL;
1572     }
1573     _bt.allocated((HeapWord*)fc, fc->size());
1574   }
1575   assert(fc->size() >= size + MinChunkSize, "tautology");
1576   fc = splitChunkAndReturnRemainder(fc, size);
1577   assert(fc->size() == size, "chunk is wrong size");
1578   _bt.verify_single_block((HeapWord*)fc, size);
1579   return fc;
1580 }
1581 
1582 void
1583 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1584   assert_locked();
1585 
1586   size_t size = chunk->size();
1587   _bt.verify_single_block((HeapWord*)chunk, size);
1588   // adjust _unallocated_block downward, as necessary
1589   _bt.freed((HeapWord*)chunk, size);
1590   _dictionary->return_chunk(chunk);
1591 #ifndef PRODUCT
1592   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1593     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1594     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1595     tl->verify_stats();
1596   }
1597 #endif // PRODUCT
1598 }
1599 
1600 void
1601 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1602   assert_locked();
1603   size_t size = fc->size();
1604   _bt.verify_single_block((HeapWord*) fc, size);
1605   _bt.verify_not_unallocated((HeapWord*) fc, size);
1606   _indexedFreeList[size].return_chunk_at_tail(fc);
1607 #ifndef PRODUCT
1608   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1609      _indexedFreeList[size].verify_stats();
1610   }
1611 #endif // PRODUCT
1612 }
1613 
1614 // Add chunk to end of last block -- if it's the largest
1615 // block -- and update BOT and census data. We would
1616 // of course have preferred to coalesce it with the
1617 // last block, but it's currently less expensive to find the
1618 // largest block than it is to find the last.
1619 void
1620 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1621   HeapWord* chunk, size_t     size) {
1622   // check that the chunk does lie in this space!
1623   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1624   // One of the parallel gc task threads may be here
1625   // whilst others are allocating.
1626   Mutex* lock = &_parDictionaryAllocLock;
1627   FreeChunk* ec;
1628   {
1629     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1630     ec = dictionary()->find_largest_dict();  // get largest block
1631     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1632       // It's a coterminal block - we can coalesce.
1633       size_t old_size = ec->size();
1634       coalDeath(old_size);
1635       removeChunkFromDictionary(ec);
1636       size += old_size;
1637     } else {
1638       ec = (FreeChunk*)chunk;
1639     }
1640   }
1641   ec->set_size(size);
1642   debug_only(ec->mangleFreed(size));
1643   if (size < SmallForDictionary) {
1644     lock = _indexedFreeListParLocks[size];
1645   }
1646   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1647   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1648   // record the birth under the lock since the recording involves
1649   // manipulation of the list on which the chunk lives and
1650   // if the chunk is allocated and is the last on the list,
1651   // the list can go away.
1652   coalBirth(size);
1653 }
1654 
1655 void
1656 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1657                                               size_t     size) {
1658   // check that the chunk does lie in this space!
1659   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1660   assert_locked();
1661   _bt.verify_single_block(chunk, size);
1662 
1663   FreeChunk* fc = (FreeChunk*) chunk;
1664   fc->set_size(size);
1665   debug_only(fc->mangleFreed(size));
1666   if (size < SmallForDictionary) {
1667     returnChunkToFreeList(fc);
1668   } else {
1669     returnChunkToDictionary(fc);
1670   }
1671 }
1672 
1673 void
1674 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1675   size_t size, bool coalesced) {
1676   assert_locked();
1677   assert(chunk != NULL, "null chunk");
1678   if (coalesced) {
1679     // repair BOT
1680     _bt.single_block(chunk, size);
1681   }
1682   addChunkToFreeLists(chunk, size);
1683 }
1684 
1685 // We _must_ find the purported chunk on our free lists;
1686 // we assert if we don't.
1687 void
1688 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1689   size_t size = fc->size();
1690   assert_locked();
1691   debug_only(verifyFreeLists());
1692   if (size < SmallForDictionary) {
1693     removeChunkFromIndexedFreeList(fc);
1694   } else {
1695     removeChunkFromDictionary(fc);
1696   }
1697   _bt.verify_single_block((HeapWord*)fc, size);
1698   debug_only(verifyFreeLists());
1699 }
1700 
1701 void
1702 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1703   size_t size = fc->size();
1704   assert_locked();
1705   assert(fc != NULL, "null chunk");
1706   _bt.verify_single_block((HeapWord*)fc, size);
1707   _dictionary->remove_chunk(fc);
1708   // adjust _unallocated_block upward, as necessary
1709   _bt.allocated((HeapWord*)fc, size);
1710 }
1711 
1712 void
1713 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1714   assert_locked();
1715   size_t size = fc->size();
1716   _bt.verify_single_block((HeapWord*)fc, size);
1717   NOT_PRODUCT(
1718     if (FLSVerifyIndexTable) {
1719       verifyIndexedFreeList(size);
1720     }
1721   )
1722   _indexedFreeList[size].remove_chunk(fc);
1723   NOT_PRODUCT(
1724     if (FLSVerifyIndexTable) {
1725       verifyIndexedFreeList(size);
1726     }
1727   )
1728 }
1729 
1730 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1731   /* A hint is the next larger size that has a surplus.
1732      Start search at a size large enough to guarantee that
1733      the excess is >= MIN_CHUNK. */
1734   size_t start = align_object_size(numWords + MinChunkSize);
1735   if (start < IndexSetSize) {
1736     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1737     size_t    hint = _indexedFreeList[start].hint();
1738     while (hint < IndexSetSize) {
1739       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1740       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1741       if (fl->surplus() > 0 && fl->head() != NULL) {
1742         // Found a list with surplus, reset original hint
1743         // and split out a free chunk which is returned.
1744         _indexedFreeList[start].set_hint(hint);
1745         FreeChunk* res = getFromListGreater(fl, numWords);
1746         assert(res == NULL || res->is_free(),
1747           "Should be returning a free chunk");
1748         return res;
1749       }
1750       hint = fl->hint(); /* keep looking */
1751     }
1752     /* None found. */
1753     it[start].set_hint(IndexSetSize);
1754   }
1755   return NULL;
1756 }
1757 
1758 /* Requires fl->size >= numWords + MinChunkSize */
1759 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1760   size_t numWords) {
1761   FreeChunk *curr = fl->head();
1762   size_t oldNumWords = curr->size();
1763   assert(numWords >= MinChunkSize, "Word size is too small");
1764   assert(curr != NULL, "List is empty");
1765   assert(oldNumWords >= numWords + MinChunkSize,
1766         "Size of chunks in the list is too small");
1767 
1768   fl->remove_chunk(curr);
1769   // recorded indirectly by splitChunkAndReturnRemainder -
1770   // smallSplit(oldNumWords, numWords);
1771   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1772   // Does anything have to be done for the remainder in terms of
1773   // fixing the card table?
1774   assert(new_chunk == NULL || new_chunk->is_free(),
1775     "Should be returning a free chunk");
1776   return new_chunk;
1777 }
1778 
1779 FreeChunk*
1780 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1781   size_t new_size) {
1782   assert_locked();
1783   size_t size = chunk->size();
1784   assert(size > new_size, "Split from a smaller block?");
1785   assert(is_aligned(chunk), "alignment problem");
1786   assert(size == adjustObjectSize(size), "alignment problem");
1787   size_t rem_sz = size - new_size;
1788   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1789   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1790   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1791   assert(is_aligned(ffc), "alignment problem");
1792   ffc->set_size(rem_sz);
1793   ffc->link_next(NULL);
1794   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1795   // Above must occur before BOT is updated below.
1796   // adjust block offset table
1797   OrderAccess::storestore();
1798   assert(chunk->is_free() && ffc->is_free(), "Error");
1799   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1800   if (rem_sz < SmallForDictionary) {
1801     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1802     bool is_par = Thread::current()->is_GC_task_thread();
1803     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1804     returnChunkToFreeList(ffc);
1805     split(size, rem_sz);
1806     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1807   } else {
1808     returnChunkToDictionary(ffc);
1809     split(size, rem_sz);
1810   }
1811   chunk->set_size(new_size);
1812   return chunk;
1813 }
1814 
1815 void
1816 CompactibleFreeListSpace::sweep_completed() {
1817   // Now that space is probably plentiful, refill linear
1818   // allocation blocks as needed.
1819   refillLinearAllocBlocksIfNeeded();
1820 }
1821 
1822 void
1823 CompactibleFreeListSpace::gc_prologue() {
1824   assert_locked();
1825   reportFreeListStatistics("Before GC:");
1826   refillLinearAllocBlocksIfNeeded();
1827 }
1828 
1829 void
1830 CompactibleFreeListSpace::gc_epilogue() {
1831   assert_locked();
1832   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1833   _promoInfo.stopTrackingPromotions();
1834   repairLinearAllocationBlocks();
1835   reportFreeListStatistics("After GC:");
1836 }
1837 
1838 // Iteration support, mostly delegated from a CMS generation
1839 
1840 void CompactibleFreeListSpace::save_marks() {
1841   assert(Thread::current()->is_VM_thread(),
1842          "Global variable should only be set when single-threaded");
1843   // Mark the "end" of the used space at the time of this call;
1844   // note, however, that promoted objects from this point
1845   // on are tracked in the _promoInfo below.
1846   set_saved_mark_word(unallocated_block());
1847 #ifdef ASSERT
1848   // Check the sanity of save_marks() etc.
1849   MemRegion ur    = used_region();
1850   MemRegion urasm = used_region_at_save_marks();
1851   assert(ur.contains(urasm),
1852          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1853          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1854          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1855 #endif
1856   // inform allocator that promotions should be tracked.
1857   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1858   _promoInfo.startTrackingPromotions();
1859 }
1860 
1861 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1862   assert(_promoInfo.tracking(), "No preceding save_marks?");
1863   return _promoInfo.noPromotions();
1864 }
1865 
1866 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1867                                                                             \
1868 void CompactibleFreeListSpace::                                             \
1869 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1870   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1871   /*                                                                        \
1872    * This also restores any displaced headers and removes the elements from \
1873    * the iteration set as they are processed, so that we have a clean slate \
1874    * at the end of the iteration. Note, thus, that if new objects are       \
1875    * promoted as a result of the iteration they are iterated over as well.  \
1876    */                                                                       \
1877   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1878 }
1879 
1880 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1881 
1882 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1883   return _smallLinearAllocBlock._word_size == 0;
1884 }
1885 
1886 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1887   // Fix up linear allocation blocks to look like free blocks
1888   repairLinearAllocBlock(&_smallLinearAllocBlock);
1889 }
1890 
1891 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1892   assert_locked();
1893   if (blk->_ptr != NULL) {
1894     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1895            "Minimum block size requirement");
1896     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1897     fc->set_size(blk->_word_size);
1898     fc->link_prev(NULL);   // mark as free
1899     fc->dontCoalesce();
1900     assert(fc->is_free(), "just marked it free");
1901     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1902   }
1903 }
1904 
1905 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1906   assert_locked();
1907   if (_smallLinearAllocBlock._ptr == NULL) {
1908     assert(_smallLinearAllocBlock._word_size == 0,
1909       "Size of linAB should be zero if the ptr is NULL");
1910     // Reset the linAB refill and allocation size limit.
1911     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1912   }
1913   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1914 }
1915 
1916 void
1917 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1918   assert_locked();
1919   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1920          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1921          "blk invariant");
1922   if (blk->_ptr == NULL) {
1923     refillLinearAllocBlock(blk);
1924   }
1925 }
1926 
1927 void
1928 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1929   assert_locked();
1930   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1931          "linear allocation block should be empty");
1932   FreeChunk* fc;
1933   if (blk->_refillSize < SmallForDictionary &&
1934       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1935     // A linAB's strategy might be to use small sizes to reduce
1936     // fragmentation but still get the benefits of allocation from a
1937     // linAB.
1938   } else {
1939     fc = getChunkFromDictionary(blk->_refillSize);
1940   }
1941   if (fc != NULL) {
1942     blk->_ptr  = (HeapWord*)fc;
1943     blk->_word_size = fc->size();
1944     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1945   }
1946 }
1947 
1948 // Support for compaction
1949 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1950   scan_and_forward(this, cp);
1951   // Prepare_for_compaction() uses the space between live objects
1952   // so that later phase can skip dead space quickly.  So verification
1953   // of the free lists doesn't work after.
1954 }
1955 
1956 void CompactibleFreeListSpace::adjust_pointers() {
1957   // In other versions of adjust_pointers(), a bail out
1958   // based on the amount of live data in the generation
1959   // (i.e., if 0, bail out) may be used.
1960   // Cannot test used() == 0 here because the free lists have already
1961   // been mangled by the compaction.
1962 
1963   scan_and_adjust_pointers(this);
1964   // See note about verification in prepare_for_compaction().
1965 }
1966 
1967 void CompactibleFreeListSpace::compact() {
1968   scan_and_compact(this);
1969 }
1970 
1971 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1972 // where fbs is free block sizes
1973 double CompactibleFreeListSpace::flsFrag() const {
1974   size_t itabFree = totalSizeInIndexedFreeLists();
1975   double frag = 0.0;
1976   size_t i;
1977 
1978   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1979     double sz  = i;
1980     frag      += _indexedFreeList[i].count() * (sz * sz);
1981   }
1982 
1983   double totFree = itabFree +
1984                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1985   if (totFree > 0) {
1986     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1987             (totFree * totFree));
1988     frag = (double)1.0  - frag;
1989   } else {
1990     assert(frag == 0.0, "Follows from totFree == 0");
1991   }
1992   return frag;
1993 }
1994 
1995 void CompactibleFreeListSpace::beginSweepFLCensus(
1996   float inter_sweep_current,
1997   float inter_sweep_estimate,
1998   float intra_sweep_estimate) {
1999   assert_locked();
2000   size_t i;
2001   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2002     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2003     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2004     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2005     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2006     fl->set_before_sweep(fl->count());
2007     fl->set_bfr_surp(fl->surplus());
2008   }
2009   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2010                                     inter_sweep_current,
2011                                     inter_sweep_estimate,
2012                                     intra_sweep_estimate);
2013 }
2014 
2015 void CompactibleFreeListSpace::setFLSurplus() {
2016   assert_locked();
2017   size_t i;
2018   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2019     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2020     fl->set_surplus(fl->count() -
2021                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2022   }
2023 }
2024 
2025 void CompactibleFreeListSpace::setFLHints() {
2026   assert_locked();
2027   size_t i;
2028   size_t h = IndexSetSize;
2029   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2030     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2031     fl->set_hint(h);
2032     if (fl->surplus() > 0) {
2033       h = i;
2034     }
2035   }
2036 }
2037 
2038 void CompactibleFreeListSpace::clearFLCensus() {
2039   assert_locked();
2040   size_t i;
2041   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2042     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2043     fl->set_prev_sweep(fl->count());
2044     fl->set_coal_births(0);
2045     fl->set_coal_deaths(0);
2046     fl->set_split_births(0);
2047     fl->set_split_deaths(0);
2048   }
2049 }
2050 
2051 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2052   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2053   setFLSurplus();
2054   setFLHints();
2055   printFLCensus(sweep_count);
2056   clearFLCensus();
2057   assert_locked();
2058   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2059 }
2060 
2061 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2062   if (size < SmallForDictionary) {
2063     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2064     return (fl->coal_desired() < 0) ||
2065            ((int)fl->count() > fl->coal_desired());
2066   } else {
2067     return dictionary()->coal_dict_over_populated(size);
2068   }
2069 }
2070 
2071 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2072   assert(size < SmallForDictionary, "Size too large for indexed list");
2073   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2074   fl->increment_coal_births();
2075   fl->increment_surplus();
2076 }
2077 
2078 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2079   assert(size < SmallForDictionary, "Size too large for indexed list");
2080   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2081   fl->increment_coal_deaths();
2082   fl->decrement_surplus();
2083 }
2084 
2085 void CompactibleFreeListSpace::coalBirth(size_t size) {
2086   if (size  < SmallForDictionary) {
2087     smallCoalBirth(size);
2088   } else {
2089     dictionary()->dict_census_update(size,
2090                                    false /* split */,
2091                                    true /* birth */);
2092   }
2093 }
2094 
2095 void CompactibleFreeListSpace::coalDeath(size_t size) {
2096   if(size  < SmallForDictionary) {
2097     smallCoalDeath(size);
2098   } else {
2099     dictionary()->dict_census_update(size,
2100                                    false /* split */,
2101                                    false /* birth */);
2102   }
2103 }
2104 
2105 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2106   assert(size < SmallForDictionary, "Size too large for indexed list");
2107   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2108   fl->increment_split_births();
2109   fl->increment_surplus();
2110 }
2111 
2112 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2113   assert(size < SmallForDictionary, "Size too large for indexed list");
2114   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2115   fl->increment_split_deaths();
2116   fl->decrement_surplus();
2117 }
2118 
2119 void CompactibleFreeListSpace::split_birth(size_t size) {
2120   if (size  < SmallForDictionary) {
2121     smallSplitBirth(size);
2122   } else {
2123     dictionary()->dict_census_update(size,
2124                                    true /* split */,
2125                                    true /* birth */);
2126   }
2127 }
2128 
2129 void CompactibleFreeListSpace::splitDeath(size_t size) {
2130   if (size  < SmallForDictionary) {
2131     smallSplitDeath(size);
2132   } else {
2133     dictionary()->dict_census_update(size,
2134                                    true /* split */,
2135                                    false /* birth */);
2136   }
2137 }
2138 
2139 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2140   size_t to2 = from - to1;
2141   splitDeath(from);
2142   split_birth(to1);
2143   split_birth(to2);
2144 }
2145 
2146 void CompactibleFreeListSpace::print() const {
2147   print_on(tty);
2148 }
2149 
2150 void CompactibleFreeListSpace::prepare_for_verify() {
2151   assert_locked();
2152   repairLinearAllocationBlocks();
2153   // Verify that the SpoolBlocks look like free blocks of
2154   // appropriate sizes... To be done ...
2155 }
2156 
2157 class VerifyAllBlksClosure: public BlkClosure {
2158  private:
2159   const CompactibleFreeListSpace* _sp;
2160   const MemRegion                 _span;
2161   HeapWord*                       _last_addr;
2162   size_t                          _last_size;
2163   bool                            _last_was_obj;
2164   bool                            _last_was_live;
2165 
2166  public:
2167   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2168     MemRegion span) :  _sp(sp), _span(span),
2169                        _last_addr(NULL), _last_size(0),
2170                        _last_was_obj(false), _last_was_live(false) { }
2171 
2172   virtual size_t do_blk(HeapWord* addr) {
2173     size_t res;
2174     bool   was_obj  = false;
2175     bool   was_live = false;
2176     if (_sp->block_is_obj(addr)) {
2177       was_obj = true;
2178       oop p = oop(addr);
2179       guarantee(p->is_oop(), "Should be an oop");
2180       res = _sp->adjustObjectSize(p->size());
2181       if (_sp->obj_is_alive(addr)) {
2182         was_live = true;
2183         p->verify();
2184       }
2185     } else {
2186       FreeChunk* fc = (FreeChunk*)addr;
2187       res = fc->size();
2188       if (FLSVerifyLists && !fc->cantCoalesce()) {
2189         guarantee(_sp->verify_chunk_in_free_list(fc),
2190                   "Chunk should be on a free list");
2191       }
2192     }
2193     if (res == 0) {
2194       Log(gc, verify) log;
2195       log.error("Livelock: no rank reduction!");
2196       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2197                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2198         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2199         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2200       ResourceMark rm;
2201       _sp->print_on(log.error_stream());
2202       guarantee(false, "Verification failed.");
2203     }
2204     _last_addr = addr;
2205     _last_size = res;
2206     _last_was_obj  = was_obj;
2207     _last_was_live = was_live;
2208     return res;
2209   }
2210 };
2211 
2212 class VerifyAllOopsClosure: public OopClosure {
2213  private:
2214   const CMSCollector*             _collector;
2215   const CompactibleFreeListSpace* _sp;
2216   const MemRegion                 _span;
2217   const bool                      _past_remark;
2218   const CMSBitMap*                _bit_map;
2219 
2220  protected:
2221   void do_oop(void* p, oop obj) {
2222     if (_span.contains(obj)) { // the interior oop points into CMS heap
2223       if (!_span.contains(p)) { // reference from outside CMS heap
2224         // Should be a valid object; the first disjunct below allows
2225         // us to sidestep an assertion in block_is_obj() that insists
2226         // that p be in _sp. Note that several generations (and spaces)
2227         // are spanned by _span (CMS heap) above.
2228         guarantee(!_sp->is_in_reserved(obj) ||
2229                   _sp->block_is_obj((HeapWord*)obj),
2230                   "Should be an object");
2231         guarantee(obj->is_oop(), "Should be an oop");
2232         obj->verify();
2233         if (_past_remark) {
2234           // Remark has been completed, the object should be marked
2235           _bit_map->isMarked((HeapWord*)obj);
2236         }
2237       } else { // reference within CMS heap
2238         if (_past_remark) {
2239           // Remark has been completed -- so the referent should have
2240           // been marked, if referring object is.
2241           if (_bit_map->isMarked(_collector->block_start(p))) {
2242             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2243           }
2244         }
2245       }
2246     } else if (_sp->is_in_reserved(p)) {
2247       // the reference is from FLS, and points out of FLS
2248       guarantee(obj->is_oop(), "Should be an oop");
2249       obj->verify();
2250     }
2251   }
2252 
2253   template <class T> void do_oop_work(T* p) {
2254     T heap_oop = oopDesc::load_heap_oop(p);
2255     if (!oopDesc::is_null(heap_oop)) {
2256       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2257       do_oop(p, obj);
2258     }
2259   }
2260 
2261  public:
2262   VerifyAllOopsClosure(const CMSCollector* collector,
2263     const CompactibleFreeListSpace* sp, MemRegion span,
2264     bool past_remark, CMSBitMap* bit_map) :
2265     _collector(collector), _sp(sp), _span(span),
2266     _past_remark(past_remark), _bit_map(bit_map) { }
2267 
2268   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2269   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2270 };
2271 
2272 void CompactibleFreeListSpace::verify() const {
2273   assert_lock_strong(&_freelistLock);
2274   verify_objects_initialized();
2275   MemRegion span = _collector->_span;
2276   bool past_remark = (_collector->abstract_state() ==
2277                       CMSCollector::Sweeping);
2278 
2279   ResourceMark rm;
2280   HandleMark  hm;
2281 
2282   // Check integrity of CFL data structures
2283   _promoInfo.verify();
2284   _dictionary->verify();
2285   if (FLSVerifyIndexTable) {
2286     verifyIndexedFreeLists();
2287   }
2288   // Check integrity of all objects and free blocks in space
2289   {
2290     VerifyAllBlksClosure cl(this, span);
2291     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2292   }
2293   // Check that all references in the heap to FLS
2294   // are to valid objects in FLS or that references in
2295   // FLS are to valid objects elsewhere in the heap
2296   if (FLSVerifyAllHeapReferences)
2297   {
2298     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2299       _collector->markBitMap());
2300 
2301     // Iterate over all oops in the heap. Uses the _no_header version
2302     // since we are not interested in following the klass pointers.
2303     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2304   }
2305 
2306   if (VerifyObjectStartArray) {
2307     // Verify the block offset table
2308     _bt.verify();
2309   }
2310 }
2311 
2312 #ifndef PRODUCT
2313 void CompactibleFreeListSpace::verifyFreeLists() const {
2314   if (FLSVerifyLists) {
2315     _dictionary->verify();
2316     verifyIndexedFreeLists();
2317   } else {
2318     if (FLSVerifyDictionary) {
2319       _dictionary->verify();
2320     }
2321     if (FLSVerifyIndexTable) {
2322       verifyIndexedFreeLists();
2323     }
2324   }
2325 }
2326 #endif
2327 
2328 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2329   size_t i = 0;
2330   for (; i < IndexSetStart; i++) {
2331     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2332   }
2333   for (; i < IndexSetSize; i++) {
2334     verifyIndexedFreeList(i);
2335   }
2336 }
2337 
2338 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2339   FreeChunk* fc   =  _indexedFreeList[size].head();
2340   FreeChunk* tail =  _indexedFreeList[size].tail();
2341   size_t    num = _indexedFreeList[size].count();
2342   size_t      n = 0;
2343   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2344             "Slot should have been empty");
2345   for (; fc != NULL; fc = fc->next(), n++) {
2346     guarantee(fc->size() == size, "Size inconsistency");
2347     guarantee(fc->is_free(), "!free?");
2348     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2349     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2350   }
2351   guarantee(n == num, "Incorrect count");
2352 }
2353 
2354 #ifndef PRODUCT
2355 void CompactibleFreeListSpace::check_free_list_consistency() const {
2356   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2357     "Some sizes can't be allocated without recourse to"
2358     " linear allocation buffers");
2359   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2360     "else MIN_TREE_CHUNK_SIZE is wrong");
2361   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2362   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2363 }
2364 #endif
2365 
2366 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2367   assert_lock_strong(&_freelistLock);
2368   LogTarget(Debug, gc, freelist, census) log;
2369   if (!log.is_enabled()) {
2370     return;
2371   }
2372   AdaptiveFreeList<FreeChunk> total;
2373   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2374   ResourceMark rm;
2375   outputStream* out = log.stream();
2376   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2377   size_t total_free = 0;
2378   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2379     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2380     total_free += fl->count() * fl->size();
2381     if (i % (40*IndexSetStride) == 0) {
2382       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2383     }
2384     fl->print_on(out);
2385     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2386     total.set_surplus(    total.surplus()     + fl->surplus()    );
2387     total.set_desired(    total.desired()     + fl->desired()    );
2388     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2389     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2390     total.set_count(      total.count()       + fl->count()      );
2391     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2392     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2393     total.set_split_births(total.split_births() + fl->split_births());
2394     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2395   }
2396   total.print_on(out, "TOTAL");
2397   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2398   log.print("growth: %8.5f  deficit: %8.5f",
2399             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2400                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2401             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2402   _dictionary->print_dict_census(out);
2403 }
2404 
2405 ///////////////////////////////////////////////////////////////////////////
2406 // CompactibleFreeListSpaceLAB
2407 ///////////////////////////////////////////////////////////////////////////
2408 
2409 #define VECTOR_257(x)                                                                                  \
2410   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2411   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2412      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2413      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2417      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2418      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2419      x }
2420 
2421 // Initialize with default setting for CMS, _not_
2422 // generic OldPLABSize, whose static default is different; if overridden at the
2423 // command-line, this will get reinitialized via a call to
2424 // modify_initialization() below.
2425 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2426   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2427 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2428 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2429 
2430 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2431   _cfls(cfls)
2432 {
2433   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2434   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2435        i < CompactibleFreeListSpace::IndexSetSize;
2436        i += CompactibleFreeListSpace::IndexSetStride) {
2437     _indexedFreeList[i].set_size(i);
2438     _num_blocks[i] = 0;
2439   }
2440 }
2441 
2442 static bool _CFLS_LAB_modified = false;
2443 
2444 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2445   assert(!_CFLS_LAB_modified, "Call only once");
2446   _CFLS_LAB_modified = true;
2447   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2448        i < CompactibleFreeListSpace::IndexSetSize;
2449        i += CompactibleFreeListSpace::IndexSetStride) {
2450     _blocks_to_claim[i].modify(n, wt, true /* force */);
2451   }
2452 }
2453 
2454 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2455   FreeChunk* res;
2456   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2457   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2458     // This locking manages sync with other large object allocations.
2459     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2460                     Mutex::_no_safepoint_check_flag);
2461     res = _cfls->getChunkFromDictionaryExact(word_sz);
2462     if (res == NULL) return NULL;
2463   } else {
2464     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2465     if (fl->count() == 0) {
2466       // Attempt to refill this local free list.
2467       get_from_global_pool(word_sz, fl);
2468       // If it didn't work, give up.
2469       if (fl->count() == 0) return NULL;
2470     }
2471     res = fl->get_chunk_at_head();
2472     assert(res != NULL, "Why was count non-zero?");
2473   }
2474   res->markNotFree();
2475   assert(!res->is_free(), "shouldn't be marked free");
2476   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2477   // mangle a just allocated object with a distinct pattern.
2478   debug_only(res->mangleAllocated(word_sz));
2479   return (HeapWord*)res;
2480 }
2481 
2482 // Get a chunk of blocks of the right size and update related
2483 // book-keeping stats
2484 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2485   // Get the #blocks we want to claim
2486   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2487   assert(n_blks > 0, "Error");
2488   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2489   // In some cases, when the application has a phase change,
2490   // there may be a sudden and sharp shift in the object survival
2491   // profile, and updating the counts at the end of a scavenge
2492   // may not be quick enough, giving rise to large scavenge pauses
2493   // during these phase changes. It is beneficial to detect such
2494   // changes on-the-fly during a scavenge and avoid such a phase-change
2495   // pothole. The following code is a heuristic attempt to do that.
2496   // It is protected by a product flag until we have gained
2497   // enough experience with this heuristic and fine-tuned its behavior.
2498   // WARNING: This might increase fragmentation if we overreact to
2499   // small spikes, so some kind of historical smoothing based on
2500   // previous experience with the greater reactivity might be useful.
2501   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2502   // default.
2503   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2504     //
2505     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2506     // which is why there is a cast to double.
2507     //
2508     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2509     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2510     n_blks = MIN2(n_blks, CMSOldPLABMax);
2511   }
2512   assert(n_blks > 0, "Error");
2513   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2514   // Update stats table entry for this block size
2515   _num_blocks[word_sz] += fl->count();
2516 }
2517 
2518 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2519   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2520        i < CompactibleFreeListSpace::IndexSetSize;
2521        i += CompactibleFreeListSpace::IndexSetStride) {
2522     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2523            "Counter inconsistency");
2524     if (_global_num_workers[i] > 0) {
2525       // Need to smooth wrt historical average
2526       if (ResizeOldPLAB) {
2527         _blocks_to_claim[i].sample(
2528           MAX2(CMSOldPLABMin,
2529           MIN2(CMSOldPLABMax,
2530                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2531       }
2532       // Reset counters for next round
2533       _global_num_workers[i] = 0;
2534       _global_num_blocks[i] = 0;
2535       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2536     }
2537   }
2538 }
2539 
2540 // If this is changed in the future to allow parallel
2541 // access, one would need to take the FL locks and,
2542 // depending on how it is used, stagger access from
2543 // parallel threads to reduce contention.
2544 void CompactibleFreeListSpaceLAB::retire(int tid) {
2545   // We run this single threaded with the world stopped;
2546   // so no need for locks and such.
2547   NOT_PRODUCT(Thread* t = Thread::current();)
2548   assert(Thread::current()->is_VM_thread(), "Error");
2549   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2550        i < CompactibleFreeListSpace::IndexSetSize;
2551        i += CompactibleFreeListSpace::IndexSetStride) {
2552     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2553            "Can't retire more than what we obtained");
2554     if (_num_blocks[i] > 0) {
2555       size_t num_retire =  _indexedFreeList[i].count();
2556       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2557       {
2558         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2559         //                Mutex::_no_safepoint_check_flag);
2560 
2561         // Update globals stats for num_blocks used
2562         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2563         _global_num_workers[i]++;
2564         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2565         if (num_retire > 0) {
2566           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2567           // Reset this list.
2568           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2569           _indexedFreeList[i].set_size(i);
2570         }
2571       }
2572       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2573                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2574       // Reset stats for next round
2575       _num_blocks[i]         = 0;
2576     }
2577   }
2578 }
2579 
2580 // Used by par_get_chunk_of_blocks() for the chunks from the
2581 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2582 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2583 // to the free list "fl".  "n" is the maximum number of chunks to
2584 // be added to "fl".
2585 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2586 
2587   // We'll try all multiples of word_sz in the indexed set, starting with
2588   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2589   // then try getting a big chunk and splitting it.
2590   {
2591     bool found;
2592     int  k;
2593     size_t cur_sz;
2594     for (k = 1, cur_sz = k * word_sz, found = false;
2595          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2596          (CMSSplitIndexedFreeListBlocks || k <= 1);
2597          k++, cur_sz = k * word_sz) {
2598       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2599       fl_for_cur_sz.set_size(cur_sz);
2600       {
2601         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2602                         Mutex::_no_safepoint_check_flag);
2603         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2604         if (gfl->count() != 0) {
2605           // nn is the number of chunks of size cur_sz that
2606           // we'd need to split k-ways each, in order to create
2607           // "n" chunks of size word_sz each.
2608           const size_t nn = MAX2(n/k, (size_t)1);
2609           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2610           found = true;
2611           if (k > 1) {
2612             // Update split death stats for the cur_sz-size blocks list:
2613             // we increment the split death count by the number of blocks
2614             // we just took from the cur_sz-size blocks list and which
2615             // we will be splitting below.
2616             ssize_t deaths = gfl->split_deaths() +
2617                              fl_for_cur_sz.count();
2618             gfl->set_split_deaths(deaths);
2619           }
2620         }
2621       }
2622       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2623       if (found) {
2624         if (k == 1) {
2625           fl->prepend(&fl_for_cur_sz);
2626         } else {
2627           // Divide each block on fl_for_cur_sz up k ways.
2628           FreeChunk* fc;
2629           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2630             // Must do this in reverse order, so that anybody attempting to
2631             // access the main chunk sees it as a single free block until we
2632             // change it.
2633             size_t fc_size = fc->size();
2634             assert(fc->is_free(), "Error");
2635             for (int i = k-1; i >= 0; i--) {
2636               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2637               assert((i != 0) ||
2638                         ((fc == ffc) && ffc->is_free() &&
2639                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2640                         "Counting error");
2641               ffc->set_size(word_sz);
2642               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2643               ffc->link_next(NULL);
2644               // Above must occur before BOT is updated below.
2645               OrderAccess::storestore();
2646               // splitting from the right, fc_size == i * word_sz
2647               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2648               fc_size -= word_sz;
2649               assert(fc_size == i*word_sz, "Error");
2650               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2651               _bt.verify_single_block((HeapWord*)fc, fc_size);
2652               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2653               // Push this on "fl".
2654               fl->return_chunk_at_head(ffc);
2655             }
2656             // TRAP
2657             assert(fl->tail()->next() == NULL, "List invariant.");
2658           }
2659         }
2660         // Update birth stats for this block size.
2661         size_t num = fl->count();
2662         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2663                         Mutex::_no_safepoint_check_flag);
2664         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2665         _indexedFreeList[word_sz].set_split_births(births);
2666         return true;
2667       }
2668     }
2669     return found;
2670   }
2671 }
2672 
2673 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2674 
2675   FreeChunk* fc = NULL;
2676   FreeChunk* rem_fc = NULL;
2677   size_t rem;
2678   {
2679     MutexLockerEx x(parDictionaryAllocLock(),
2680                     Mutex::_no_safepoint_check_flag);
2681     while (n > 0) {
2682       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2683                                   FreeBlockDictionary<FreeChunk>::atLeast);
2684       if (fc != NULL) {
2685         break;
2686       } else {
2687         n--;
2688       }
2689     }
2690     if (fc == NULL) return NULL;
2691     // Otherwise, split up that block.
2692     assert((ssize_t)n >= 1, "Control point invariant");
2693     assert(fc->is_free(), "Error: should be a free block");
2694     _bt.verify_single_block((HeapWord*)fc, fc->size());
2695     const size_t nn = fc->size() / word_sz;
2696     n = MIN2(nn, n);
2697     assert((ssize_t)n >= 1, "Control point invariant");
2698     rem = fc->size() - n * word_sz;
2699     // If there is a remainder, and it's too small, allocate one fewer.
2700     if (rem > 0 && rem < MinChunkSize) {
2701       n--; rem += word_sz;
2702     }
2703     // Note that at this point we may have n == 0.
2704     assert((ssize_t)n >= 0, "Control point invariant");
2705 
2706     // If n is 0, the chunk fc that was found is not large
2707     // enough to leave a viable remainder.  We are unable to
2708     // allocate even one block.  Return fc to the
2709     // dictionary and return, leaving "fl" empty.
2710     if (n == 0) {
2711       returnChunkToDictionary(fc);
2712       return NULL;
2713     }
2714 
2715     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2716     dictionary()->dict_census_update(fc->size(),
2717                                      true /*split*/,
2718                                      false /*birth*/);
2719 
2720     // First return the remainder, if any.
2721     // Note that we hold the lock until we decide if we're going to give
2722     // back the remainder to the dictionary, since a concurrent allocation
2723     // may otherwise see the heap as empty.  (We're willing to take that
2724     // hit if the block is a small block.)
2725     if (rem > 0) {
2726       size_t prefix_size = n * word_sz;
2727       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2728       rem_fc->set_size(rem);
2729       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2730       rem_fc->link_next(NULL);
2731       // Above must occur before BOT is updated below.
2732       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2733       OrderAccess::storestore();
2734       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2735       assert(fc->is_free(), "Error");
2736       fc->set_size(prefix_size);
2737       if (rem >= IndexSetSize) {
2738         returnChunkToDictionary(rem_fc);
2739         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2740         rem_fc = NULL;
2741       }
2742       // Otherwise, return it to the small list below.
2743     }
2744   }
2745   if (rem_fc != NULL) {
2746     MutexLockerEx x(_indexedFreeListParLocks[rem],
2747                     Mutex::_no_safepoint_check_flag);
2748     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2749     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2750     smallSplitBirth(rem);
2751   }
2752   assert(n * word_sz == fc->size(),
2753          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2754          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2755          fc->size(), n, word_sz);
2756   return fc;
2757 }
2758 
2759 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2760 
2761   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2762 
2763   if (fc == NULL) {
2764     return;
2765   }
2766 
2767   size_t n = fc->size() / word_sz;
2768 
2769   assert((ssize_t)n > 0, "Consistency");
2770   // Now do the splitting up.
2771   // Must do this in reverse order, so that anybody attempting to
2772   // access the main chunk sees it as a single free block until we
2773   // change it.
2774   size_t fc_size = n * word_sz;
2775   // All but first chunk in this loop
2776   for (ssize_t i = n-1; i > 0; i--) {
2777     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2778     ffc->set_size(word_sz);
2779     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2780     ffc->link_next(NULL);
2781     // Above must occur before BOT is updated below.
2782     OrderAccess::storestore();
2783     // splitting from the right, fc_size == (n - i + 1) * wordsize
2784     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2785     fc_size -= word_sz;
2786     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2787     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2788     _bt.verify_single_block((HeapWord*)fc, fc_size);
2789     // Push this on "fl".
2790     fl->return_chunk_at_head(ffc);
2791   }
2792   // First chunk
2793   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2794   // The blocks above should show their new sizes before the first block below
2795   fc->set_size(word_sz);
2796   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2797   fc->link_next(NULL);
2798   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2799   _bt.verify_single_block((HeapWord*)fc, fc->size());
2800   fl->return_chunk_at_head(fc);
2801 
2802   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2803   {
2804     // Update the stats for this block size.
2805     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2806                     Mutex::_no_safepoint_check_flag);
2807     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2808     _indexedFreeList[word_sz].set_split_births(births);
2809     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2810     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2811   }
2812 
2813   // TRAP
2814   assert(fl->tail()->next() == NULL, "List invariant.");
2815 }
2816 
2817 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2818   assert(fl->count() == 0, "Precondition.");
2819   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2820          "Precondition");
2821 
2822   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2823     // Got it
2824     return;
2825   }
2826 
2827   // Otherwise, we'll split a block from the dictionary.
2828   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2829 }
2830 
2831 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2832   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTableModRefBS::card_size_in_words * BitsPerWord);
2833   return ergo_max;
2834 }
2835 
2836 // Set up the space's par_seq_tasks structure for work claiming
2837 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2838 // XXX Need to suitably abstract and generalize this and the next
2839 // method into one.
2840 void
2841 CompactibleFreeListSpace::
2842 initialize_sequential_subtasks_for_rescan(int n_threads) {
2843   // The "size" of each task is fixed according to rescan_task_size.
2844   assert(n_threads > 0, "Unexpected n_threads argument");
2845   const size_t task_size = rescan_task_size();
2846   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2847   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2848   assert(n_tasks == 0 ||
2849          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2850           (used_region().start() + n_tasks*task_size >= used_region().end())),
2851          "n_tasks calculation incorrect");
2852   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2853   assert(!pst->valid(), "Clobbering existing data?");
2854   // Sets the condition for completion of the subtask (how many threads
2855   // need to finish in order to be done).
2856   pst->set_n_threads(n_threads);
2857   pst->set_n_tasks((int)n_tasks);
2858 }
2859 
2860 // Set up the space's par_seq_tasks structure for work claiming
2861 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2862 void
2863 CompactibleFreeListSpace::
2864 initialize_sequential_subtasks_for_marking(int n_threads,
2865                                            HeapWord* low) {
2866   // The "size" of each task is fixed according to rescan_task_size.
2867   assert(n_threads > 0, "Unexpected n_threads argument");
2868   const size_t task_size = marking_task_size();
2869   assert(task_size > CardTableModRefBS::card_size_in_words &&
2870          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2871          "Otherwise arithmetic below would be incorrect");
2872   MemRegion span = _old_gen->reserved();
2873   if (low != NULL) {
2874     if (span.contains(low)) {
2875       // Align low down to  a card boundary so that
2876       // we can use block_offset_careful() on span boundaries.
2877       HeapWord* aligned_low = align_down(low, CardTableModRefBS::card_size);
2878       // Clip span prefix at aligned_low
2879       span = span.intersection(MemRegion(aligned_low, span.end()));
2880     } else if (low > span.end()) {
2881       span = MemRegion(low, low);  // Null region
2882     } // else use entire span
2883   }
2884   assert(span.is_empty() ||
2885          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2886         "span should start at a card boundary");
2887   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2888   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2889   assert(n_tasks == 0 ||
2890          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2891           (span.start() + n_tasks*task_size >= span.end())),
2892          "n_tasks calculation incorrect");
2893   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2894   assert(!pst->valid(), "Clobbering existing data?");
2895   // Sets the condition for completion of the subtask (how many threads
2896   // need to finish in order to be done).
2897   pst->set_n_threads(n_threads);
2898   pst->set_n_tasks((int)n_tasks);
2899 }