1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/space.inline.hpp"
  37 #include "memory/universe.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/globals.hpp"
  40 #include "runtime/handles.inline.hpp"
  41 #include "runtime/init.hpp"
  42 #include "runtime/java.hpp"
  43 #include "runtime/orderAccess.inline.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "utilities/copy.hpp"
  46 
  47 /////////////////////////////////////////////////////////////////////////
  48 //// CompactibleFreeListSpace
  49 /////////////////////////////////////////////////////////////////////////
  50 
  51 // highest ranked  free list lock rank
  52 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  53 
  54 // Defaults are 0 so things will break badly if incorrectly initialized.
  55 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  56 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  57 
  58 size_t MinChunkSize = 0;
  59 
  60 void CompactibleFreeListSpace::set_cms_values() {
  61   // Set CMS global values
  62   assert(MinChunkSize == 0, "already set");
  63 
  64   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  65   // for chunks to contain a FreeChunk.
  66   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  67   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  68 
  69   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  70   IndexSetStart  = MinChunkSize;
  71   IndexSetStride = MinObjAlignment;
  72 }
  73 
  74 // Constructor
  75 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  76   MemRegion mr, bool use_adaptive_freelists,
  77   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  78   _dictionaryChoice(dictionaryChoice),
  79   _adaptive_freelists(use_adaptive_freelists),
  80   _bt(bs, mr),
  81   // free list locks are in the range of values taken by _lockRank
  82   // This range currently is [_leaf+2, _leaf+3]
  83   // Note: this requires that CFLspace c'tors
  84   // are called serially in the order in which the locks are
  85   // are acquired in the program text. This is true today.
  86   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  87   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  88                           "CompactibleFreeListSpace._dict_par_lock", true),
  89   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSRescanMultiple),
  91   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  92                     CMSConcMarkMultiple),
  93   _collector(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99   // We have all of "mr", all of which we place in the dictionary
 100   // as one big chunk. We'll need to decide here which of several
 101   // possible alternative dictionary implementations to use. For
 102   // now the choice is easy, since we have only one working
 103   // implementation, namely, the simple binary tree (splaying
 104   // temporarily disabled).
 105   switch (dictionaryChoice) {
 106     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 107       _dictionary = new AFLBinaryTreeDictionary(mr);
 108       break;
 109     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 110     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 111     default:
 112       warning("dictionaryChoice: selected option not understood; using"
 113               " default BinaryTreeDictionary implementation instead.");
 114   }
 115   assert(_dictionary != NULL, "CMS dictionary initialization");
 116   // The indexed free lists are initially all empty and are lazily
 117   // filled in on demand. Initialize the array elements to NULL.
 118   initializeIndexedFreeListArray();
 119 
 120   // Not using adaptive free lists assumes that allocation is first
 121   // from the linAB's.  Also a cms perm gen which can be compacted
 122   // has to have the klass's klassKlass allocated at a lower
 123   // address in the heap than the klass so that the klassKlass is
 124   // moved to its new location before the klass is moved.
 125   // Set the _refillSize for the linear allocation blocks
 126   if (!use_adaptive_freelists) {
 127     FreeChunk* fc = _dictionary->get_chunk(mr.word_size(),
 128                                            FreeBlockDictionary<FreeChunk>::atLeast);
 129     // The small linAB initially has all the space and will allocate
 130     // a chunk of any size.
 131     HeapWord* addr = (HeapWord*) fc;
 132     _smallLinearAllocBlock.set(addr, fc->size() ,
 133       1024*SmallForLinearAlloc, fc->size());
 134     // Note that _unallocated_block is not updated here.
 135     // Allocations from the linear allocation block should
 136     // update it.
 137   } else {
 138     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 139                                SmallForLinearAlloc);
 140   }
 141   // CMSIndexedFreeListReplenish should be at least 1
 142   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 143   _promoInfo.setSpace(this);
 144   if (UseCMSBestFit) {
 145     _fitStrategy = FreeBlockBestFitFirst;
 146   } else {
 147     _fitStrategy = FreeBlockStrategyNone;
 148   }
 149   check_free_list_consistency();
 150 
 151   // Initialize locks for parallel case.
 152   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 153     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 154                                             "a freelist par lock",
 155                                             true);
 156     DEBUG_ONLY(
 157       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 158     )
 159   }
 160   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 161 }
 162 
 163 // Like CompactibleSpace forward() but always calls cross_threshold() to
 164 // update the block offset table.  Removed initialize_threshold call because
 165 // CFLS does not use a block offset array for contiguous spaces.
 166 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 167                                     CompactPoint* cp, HeapWord* compact_top) {
 168   // q is alive
 169   // First check if we should switch compaction space
 170   assert(this == cp->space, "'this' should be current compaction space.");
 171   size_t compaction_max_size = pointer_delta(end(), compact_top);
 172   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 173     "virtual adjustObjectSize_v() method is not correct");
 174   size_t adjusted_size = adjustObjectSize(size);
 175   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 176          "no small fragments allowed");
 177   assert(minimum_free_block_size() == MinChunkSize,
 178          "for de-virtualized reference below");
 179   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 180   if (adjusted_size + MinChunkSize > compaction_max_size &&
 181       adjusted_size != compaction_max_size) {
 182     do {
 183       // switch to next compaction space
 184       cp->space->set_compaction_top(compact_top);
 185       cp->space = cp->space->next_compaction_space();
 186       if (cp->space == NULL) {
 187         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 188         assert(cp->gen != NULL, "compaction must succeed");
 189         cp->space = cp->gen->first_compaction_space();
 190         assert(cp->space != NULL, "generation must have a first compaction space");
 191       }
 192       compact_top = cp->space->bottom();
 193       cp->space->set_compaction_top(compact_top);
 194       // The correct adjusted_size may not be the same as that for this method
 195       // (i.e., cp->space may no longer be "this" so adjust the size again.
 196       // Use the virtual method which is not used above to save the virtual
 197       // dispatch.
 198       adjusted_size = cp->space->adjust_object_size_v(size);
 199       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 200       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 201     } while (adjusted_size > compaction_max_size);
 202   }
 203 
 204   // store the forwarding pointer into the mark word
 205   if ((HeapWord*)q != compact_top) {
 206     q->forward_to(oop(compact_top));
 207     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 208   } else {
 209     // if the object isn't moving we can just set the mark to the default
 210     // mark and handle it specially later on.
 211     q->init_mark();
 212     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 213   }
 214 
 215   compact_top += adjusted_size;
 216 
 217   // we need to update the offset table so that the beginnings of objects can be
 218   // found during scavenge.  Note that we are updating the offset table based on
 219   // where the object will be once the compaction phase finishes.
 220 
 221   // Always call cross_threshold().  A contiguous space can only call it when
 222   // the compaction_top exceeds the current threshold but not for an
 223   // non-contiguous space.
 224   cp->threshold =
 225     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 226   return compact_top;
 227 }
 228 
 229 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 230 // and use of single_block instead of alloc_block.  The name here is not really
 231 // appropriate - maybe a more general name could be invented for both the
 232 // contiguous and noncontiguous spaces.
 233 
 234 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 235   _bt.single_block(start, the_end);
 236   return end();
 237 }
 238 
 239 // Initialize them to NULL.
 240 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 241   for (size_t i = 0; i < IndexSetSize; i++) {
 242     // Note that on platforms where objects are double word aligned,
 243     // the odd array elements are not used.  It is convenient, however,
 244     // to map directly from the object size to the array element.
 245     _indexedFreeList[i].reset(IndexSetSize);
 246     _indexedFreeList[i].set_size(i);
 247     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 248     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 249     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 250     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 251   }
 252 }
 253 
 254 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 255   for (size_t i = 1; i < IndexSetSize; i++) {
 256     assert(_indexedFreeList[i].size() == (size_t) i,
 257       "Indexed free list sizes are incorrect");
 258     _indexedFreeList[i].reset(IndexSetSize);
 259     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 260     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 261     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 262     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 263   }
 264 }
 265 
 266 void CompactibleFreeListSpace::reset(MemRegion mr) {
 267   resetIndexedFreeListArray();
 268   dictionary()->reset();
 269   if (BlockOffsetArrayUseUnallocatedBlock) {
 270     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 271     // Everything's allocated until proven otherwise.
 272     _bt.set_unallocated_block(end());
 273   }
 274   if (!mr.is_empty()) {
 275     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 276     _bt.single_block(mr.start(), mr.word_size());
 277     FreeChunk* fc = (FreeChunk*) mr.start();
 278     fc->set_size(mr.word_size());
 279     if (mr.word_size() >= IndexSetSize ) {
 280       returnChunkToDictionary(fc);
 281     } else {
 282       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 283       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 284     }
 285     coalBirth(mr.word_size());
 286   }
 287   _promoInfo.reset();
 288   _smallLinearAllocBlock._ptr = NULL;
 289   _smallLinearAllocBlock._word_size = 0;
 290 }
 291 
 292 void CompactibleFreeListSpace::reset_after_compaction() {
 293   // Reset the space to the new reality - one free chunk.
 294   MemRegion mr(compaction_top(), end());
 295   reset(mr);
 296   // Now refill the linear allocation block(s) if possible.
 297   if (_adaptive_freelists) {
 298     refillLinearAllocBlocksIfNeeded();
 299   } else {
 300     // Place as much of mr in the linAB as we can get,
 301     // provided it was big enough to go into the dictionary.
 302     FreeChunk* fc = dictionary()->find_largest_dict();
 303     if (fc != NULL) {
 304       assert(fc->size() == mr.word_size(),
 305              "Why was the chunk broken up?");
 306       removeChunkFromDictionary(fc);
 307       HeapWord* addr = (HeapWord*) fc;
 308       _smallLinearAllocBlock.set(addr, fc->size() ,
 309         1024*SmallForLinearAlloc, fc->size());
 310       // Note that _unallocated_block is not updated here.
 311     }
 312   }
 313 }
 314 
 315 // Walks the entire dictionary, returning a coterminal
 316 // chunk, if it exists. Use with caution since it involves
 317 // a potentially complete walk of a potentially large tree.
 318 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 319 
 320   assert_lock_strong(&_freelistLock);
 321 
 322   return dictionary()->find_chunk_ends_at(end());
 323 }
 324 
 325 
 326 #ifndef PRODUCT
 327 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 328   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 329     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 330   }
 331 }
 332 
 333 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 334   size_t sum = 0;
 335   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 336     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 337   }
 338   return sum;
 339 }
 340 
 341 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 342   size_t count = 0;
 343   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 344     debug_only(
 345       ssize_t total_list_count = 0;
 346       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 347          fc = fc->next()) {
 348         total_list_count++;
 349       }
 350       assert(total_list_count ==  _indexedFreeList[i].count(),
 351         "Count in list is incorrect");
 352     )
 353     count += _indexedFreeList[i].count();
 354   }
 355   return count;
 356 }
 357 
 358 size_t CompactibleFreeListSpace::totalCount() {
 359   size_t num = totalCountInIndexedFreeLists();
 360   num +=  dictionary()->total_count();
 361   if (_smallLinearAllocBlock._word_size != 0) {
 362     num++;
 363   }
 364   return num;
 365 }
 366 #endif
 367 
 368 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 369   FreeChunk* fc = (FreeChunk*) p;
 370   return fc->is_free();
 371 }
 372 
 373 size_t CompactibleFreeListSpace::used() const {
 374   return capacity() - free();
 375 }
 376 
 377 size_t CompactibleFreeListSpace::free() const {
 378   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 379   // if you do this while the structures are in flux you
 380   // may get an approximate answer only; for instance
 381   // because there is concurrent allocation either
 382   // directly by mutators or for promotion during a GC.
 383   // It's "MT-safe", however, in the sense that you are guaranteed
 384   // not to crash and burn, for instance, because of walking
 385   // pointers that could disappear as you were walking them.
 386   // The approximation is because the various components
 387   // that are read below are not read atomically (and
 388   // further the computation of totalSizeInIndexedFreeLists()
 389   // is itself a non-atomic computation. The normal use of
 390   // this is during a resize operation at the end of GC
 391   // and at that time you are guaranteed to get the
 392   // correct actual value. However, for instance, this is
 393   // also read completely asynchronously by the "perf-sampler"
 394   // that supports jvmstat, and you are apt to see the values
 395   // flicker in such cases.
 396   assert(_dictionary != NULL, "No _dictionary?");
 397   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 398           totalSizeInIndexedFreeLists() +
 399           _smallLinearAllocBlock._word_size) * HeapWordSize;
 400 }
 401 
 402 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 403   assert(_dictionary != NULL, "No _dictionary?");
 404   assert_locked();
 405   size_t res = _dictionary->max_chunk_size();
 406   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 407                        (size_t) SmallForLinearAlloc - 1));
 408   // XXX the following could potentially be pretty slow;
 409   // should one, pessimistically for the rare cases when res
 410   // calculated above is less than IndexSetSize,
 411   // just return res calculated above? My reasoning was that
 412   // those cases will be so rare that the extra time spent doesn't
 413   // really matter....
 414   // Note: do not change the loop test i >= res + IndexSetStride
 415   // to i > res below, because i is unsigned and res may be zero.
 416   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 417        i -= IndexSetStride) {
 418     if (_indexedFreeList[i].head() != NULL) {
 419       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 420       return i;
 421     }
 422   }
 423   return res;
 424 }
 425 
 426 void LinearAllocBlock::print_on(outputStream* st) const {
 427   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 428             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 429             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 430 }
 431 
 432 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 433   st->print_cr("COMPACTIBLE FREELIST SPACE");
 434   st->print_cr(" Space:");
 435   Space::print_on(st);
 436 
 437   st->print_cr("promoInfo:");
 438   _promoInfo.print_on(st);
 439 
 440   st->print_cr("_smallLinearAllocBlock");
 441   _smallLinearAllocBlock.print_on(st);
 442 
 443   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 444 
 445   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 446                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 447 }
 448 
 449 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 450 const {
 451   reportIndexedFreeListStatistics();
 452   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 453   gclog_or_tty->print_cr("---------------------------");
 454   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 455   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 456     _indexedFreeList[i].print_on(gclog_or_tty);
 457     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 458          fc = fc->next()) {
 459       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 460                           p2i(fc), p2i((HeapWord*)fc + i),
 461                           fc->cantCoalesce() ? "\t CC" : "");
 462     }
 463   }
 464 }
 465 
 466 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 467 const {
 468   _promoInfo.print_on(st);
 469 }
 470 
 471 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 472 const {
 473   _dictionary->report_statistics();
 474   st->print_cr("Layout of Freelists in Tree");
 475   st->print_cr("---------------------------");
 476   _dictionary->print_free_lists(st);
 477 }
 478 
 479 class BlkPrintingClosure: public BlkClosure {
 480   const CMSCollector*             _collector;
 481   const CompactibleFreeListSpace* _sp;
 482   const CMSBitMap*                _live_bit_map;
 483   const bool                      _post_remark;
 484   outputStream*                   _st;
 485 public:
 486   BlkPrintingClosure(const CMSCollector* collector,
 487                      const CompactibleFreeListSpace* sp,
 488                      const CMSBitMap* live_bit_map,
 489                      outputStream* st):
 490     _collector(collector),
 491     _sp(sp),
 492     _live_bit_map(live_bit_map),
 493     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 494     _st(st) { }
 495   size_t do_blk(HeapWord* addr);
 496 };
 497 
 498 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 499   size_t sz = _sp->block_size_no_stall(addr, _collector);
 500   assert(sz != 0, "Should always be able to compute a size");
 501   if (_sp->block_is_obj(addr)) {
 502     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 503     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 504       p2i(addr),
 505       dead ? "dead" : "live",
 506       sz,
 507       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 508     if (CMSPrintObjectsInDump && !dead) {
 509       oop(addr)->print_on(_st);
 510       _st->print_cr("--------------------------------------");
 511     }
 512   } else { // free block
 513     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 514       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 515     if (CMSPrintChunksInDump) {
 516       ((FreeChunk*)addr)->print_on(_st);
 517       _st->print_cr("--------------------------------------");
 518     }
 519   }
 520   return sz;
 521 }
 522 
 523 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 524   outputStream* st) {
 525   st->print_cr("\n=========================");
 526   st->print_cr("Block layout in CMS Heap:");
 527   st->print_cr("=========================");
 528   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 529   blk_iterate(&bpcl);
 530 
 531   st->print_cr("\n=======================================");
 532   st->print_cr("Order & Layout of Promotion Info Blocks");
 533   st->print_cr("=======================================");
 534   print_promo_info_blocks(st);
 535 
 536   st->print_cr("\n===========================");
 537   st->print_cr("Order of Indexed Free Lists");
 538   st->print_cr("=========================");
 539   print_indexed_free_lists(st);
 540 
 541   st->print_cr("\n=================================");
 542   st->print_cr("Order of Free Lists in Dictionary");
 543   st->print_cr("=================================");
 544   print_dictionary_free_lists(st);
 545 }
 546 
 547 
 548 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 549   assert_lock_strong(&_freelistLock);
 550   assert(PrintFLSStatistics != 0, "Reporting error");
 551   _dictionary->report_statistics();
 552   if (PrintFLSStatistics > 1) {
 553     reportIndexedFreeListStatistics();
 554     size_t total_size = totalSizeInIndexedFreeLists() +
 555                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 556     gclog_or_tty->print(" free=" SIZE_FORMAT " frag=%1.4f\n", total_size, flsFrag());
 557   }
 558 }
 559 
 560 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 561   assert_lock_strong(&_freelistLock);
 562   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 563                       "--------------------------------\n");
 564   size_t total_size = totalSizeInIndexedFreeLists();
 565   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 566   gclog_or_tty->print("Total Free Space: " SIZE_FORMAT "\n", total_size);
 567   gclog_or_tty->print("Max   Chunk Size: " SIZE_FORMAT "\n", maxChunkSizeInIndexedFreeLists());
 568   gclog_or_tty->print("Number of Blocks: " SIZE_FORMAT "\n", free_blocks);
 569   if (free_blocks != 0) {
 570     gclog_or_tty->print("Av.  Block  Size: " SIZE_FORMAT "\n", total_size/free_blocks);
 571   }
 572 }
 573 
 574 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 575   size_t res = 0;
 576   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 577     debug_only(
 578       ssize_t recount = 0;
 579       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 580          fc = fc->next()) {
 581         recount += 1;
 582       }
 583       assert(recount == _indexedFreeList[i].count(),
 584         "Incorrect count in list");
 585     )
 586     res += _indexedFreeList[i].count();
 587   }
 588   return res;
 589 }
 590 
 591 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 592   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 593     if (_indexedFreeList[i].head() != NULL) {
 594       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 595       return (size_t)i;
 596     }
 597   }
 598   return 0;
 599 }
 600 
 601 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 602   HeapWord* prevEnd = end();
 603   assert(prevEnd != value, "unnecessary set_end call");
 604   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 605         "New end is below unallocated block");
 606   _end = value;
 607   if (prevEnd != NULL) {
 608     // Resize the underlying block offset table.
 609     _bt.resize(pointer_delta(value, bottom()));
 610     if (value <= prevEnd) {
 611       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 612              "New end is below unallocated block");
 613     } else {
 614       // Now, take this new chunk and add it to the free blocks.
 615       // Note that the BOT has not yet been updated for this block.
 616       size_t newFcSize = pointer_delta(value, prevEnd);
 617       // XXX This is REALLY UGLY and should be fixed up. XXX
 618       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 619         // Mark the boundary of the new block in BOT
 620         _bt.mark_block(prevEnd, value);
 621         // put it all in the linAB
 622         MutexLockerEx x(parDictionaryAllocLock(),
 623                         Mutex::_no_safepoint_check_flag);
 624         _smallLinearAllocBlock._ptr = prevEnd;
 625         _smallLinearAllocBlock._word_size = newFcSize;
 626         repairLinearAllocBlock(&_smallLinearAllocBlock);
 627         // Births of chunks put into a LinAB are not recorded.  Births
 628         // of chunks as they are allocated out of a LinAB are.
 629       } else {
 630         // Add the block to the free lists, if possible coalescing it
 631         // with the last free block, and update the BOT and census data.
 632         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 633       }
 634     }
 635   }
 636 }
 637 
 638 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 639   CompactibleFreeListSpace* _cfls;
 640   CMSCollector* _collector;
 641 protected:
 642   // Override.
 643 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 644   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 645                                        HeapWord* bottom, HeapWord* top, \
 646                                        ClosureType* cl);                \
 647       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 648                                        HeapWord* bottom, HeapWord* top, \
 649                                        ClosureType* cl);                \
 650     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 651                                        HeapWord* bottom, HeapWord* top, \
 652                                        ClosureType* cl)
 653   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 654   walk_mem_region_with_cl_DECL(FilteringClosure);
 655 
 656 public:
 657   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 658                       CMSCollector* collector,
 659                       ExtendedOopClosure* cl,
 660                       CardTableModRefBS::PrecisionStyle precision,
 661                       HeapWord* boundary) :
 662     Filtering_DCTOC(sp, cl, precision, boundary),
 663     _cfls(sp), _collector(collector) {}
 664 };
 665 
 666 // We de-virtualize the block-related calls below, since we know that our
 667 // space is a CompactibleFreeListSpace.
 668 
 669 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 670 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 671                                                  HeapWord* bottom,              \
 672                                                  HeapWord* top,                 \
 673                                                  ClosureType* cl) {             \
 674    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 675    if (is_par) {                                                                \
 676      assert(SharedHeap::heap()->n_par_threads() ==                              \
 677             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 678      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 679    } else {                                                                     \
 680      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 681    }                                                                            \
 682 }                                                                               \
 683 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 684                                                       HeapWord* bottom,         \
 685                                                       HeapWord* top,            \
 686                                                       ClosureType* cl) {        \
 687   /* Skip parts that are before "mr", in case "block_start" sent us             \
 688      back too far. */                                                           \
 689   HeapWord* mr_start = mr.start();                                              \
 690   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 691   HeapWord* next = bottom + bot_size;                                           \
 692   while (next < mr_start) {                                                     \
 693     bottom = next;                                                              \
 694     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 695     next = bottom + bot_size;                                                   \
 696   }                                                                             \
 697                                                                                 \
 698   while (bottom < top) {                                                        \
 699     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 700         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 701                     oop(bottom)) &&                                             \
 702         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 703       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 704       bottom += _cfls->adjustObjectSize(word_sz);                               \
 705     } else {                                                                    \
 706       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 707     }                                                                           \
 708   }                                                                             \
 709 }                                                                               \
 710 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 711                                                         HeapWord* bottom,       \
 712                                                         HeapWord* top,          \
 713                                                         ClosureType* cl) {      \
 714   /* Skip parts that are before "mr", in case "block_start" sent us             \
 715      back too far. */                                                           \
 716   HeapWord* mr_start = mr.start();                                              \
 717   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 718   HeapWord* next = bottom + bot_size;                                           \
 719   while (next < mr_start) {                                                     \
 720     bottom = next;                                                              \
 721     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 722     next = bottom + bot_size;                                                   \
 723   }                                                                             \
 724                                                                                 \
 725   while (bottom < top) {                                                        \
 726     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 727         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 728                     oop(bottom)) &&                                             \
 729         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 730       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 731       bottom += _cfls->adjustObjectSize(word_sz);                               \
 732     } else {                                                                    \
 733       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 734     }                                                                           \
 735   }                                                                             \
 736 }
 737 
 738 // (There are only two of these, rather than N, because the split is due
 739 // only to the introduction of the FilteringClosure, a local part of the
 740 // impl of this abstraction.)
 741 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 742 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 743 
 744 DirtyCardToOopClosure*
 745 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 746                                       CardTableModRefBS::PrecisionStyle precision,
 747                                       HeapWord* boundary) {
 748   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 749 }
 750 
 751 
 752 // Note on locking for the space iteration functions:
 753 // since the collector's iteration activities are concurrent with
 754 // allocation activities by mutators, absent a suitable mutual exclusion
 755 // mechanism the iterators may go awry. For instance a block being iterated
 756 // may suddenly be allocated or divided up and part of it allocated and
 757 // so on.
 758 
 759 // Apply the given closure to each block in the space.
 760 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 761   assert_lock_strong(freelistLock());
 762   HeapWord *cur, *limit;
 763   for (cur = bottom(), limit = end(); cur < limit;
 764        cur += cl->do_blk_careful(cur));
 765 }
 766 
 767 // Apply the given closure to each block in the space.
 768 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 769   assert_lock_strong(freelistLock());
 770   HeapWord *cur, *limit;
 771   for (cur = bottom(), limit = end(); cur < limit;
 772        cur += cl->do_blk(cur));
 773 }
 774 
 775 // Apply the given closure to each oop in the space.
 776 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 777   assert_lock_strong(freelistLock());
 778   HeapWord *cur, *limit;
 779   size_t curSize;
 780   for (cur = bottom(), limit = end(); cur < limit;
 781        cur += curSize) {
 782     curSize = block_size(cur);
 783     if (block_is_obj(cur)) {
 784       oop(cur)->oop_iterate(cl);
 785     }
 786   }
 787 }
 788 
 789 // NOTE: In the following methods, in order to safely be able to
 790 // apply the closure to an object, we need to be sure that the
 791 // object has been initialized. We are guaranteed that an object
 792 // is initialized if we are holding the Heap_lock with the
 793 // world stopped.
 794 void CompactibleFreeListSpace::verify_objects_initialized() const {
 795   if (is_init_completed()) {
 796     assert_locked_or_safepoint(Heap_lock);
 797     if (Universe::is_fully_initialized()) {
 798       guarantee(SafepointSynchronize::is_at_safepoint(),
 799                 "Required for objects to be initialized");
 800     }
 801   } // else make a concession at vm start-up
 802 }
 803 
 804 // Apply the given closure to each object in the space
 805 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 806   assert_lock_strong(freelistLock());
 807   NOT_PRODUCT(verify_objects_initialized());
 808   HeapWord *cur, *limit;
 809   size_t curSize;
 810   for (cur = bottom(), limit = end(); cur < limit;
 811        cur += curSize) {
 812     curSize = block_size(cur);
 813     if (block_is_obj(cur)) {
 814       blk->do_object(oop(cur));
 815     }
 816   }
 817 }
 818 
 819 // Apply the given closure to each live object in the space
 820 //   The usage of CompactibleFreeListSpace
 821 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 822 // objects in the space with references to objects that are no longer
 823 // valid.  For example, an object may reference another object
 824 // that has already been sweep up (collected).  This method uses
 825 // obj_is_alive() to determine whether it is safe to apply the closure to
 826 // an object.  See obj_is_alive() for details on how liveness of an
 827 // object is decided.
 828 
 829 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 830   assert_lock_strong(freelistLock());
 831   NOT_PRODUCT(verify_objects_initialized());
 832   HeapWord *cur, *limit;
 833   size_t curSize;
 834   for (cur = bottom(), limit = end(); cur < limit;
 835        cur += curSize) {
 836     curSize = block_size(cur);
 837     if (block_is_obj(cur) && obj_is_alive(cur)) {
 838       blk->do_object(oop(cur));
 839     }
 840   }
 841 }
 842 
 843 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 844                                                   UpwardsObjectClosure* cl) {
 845   assert_locked(freelistLock());
 846   NOT_PRODUCT(verify_objects_initialized());
 847   assert(!mr.is_empty(), "Should be non-empty");
 848   // We use MemRegion(bottom(), end()) rather than used_region() below
 849   // because the two are not necessarily equal for some kinds of
 850   // spaces, in particular, certain kinds of free list spaces.
 851   // We could use the more complicated but more precise:
 852   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 853   // but the slight imprecision seems acceptable in the assertion check.
 854   assert(MemRegion(bottom(), end()).contains(mr),
 855          "Should be within used space");
 856   HeapWord* prev = cl->previous();   // max address from last time
 857   if (prev >= mr.end()) { // nothing to do
 858     return;
 859   }
 860   // This assert will not work when we go from cms space to perm
 861   // space, and use same closure. Easy fix deferred for later. XXX YSR
 862   // assert(prev == NULL || contains(prev), "Should be within space");
 863 
 864   bool last_was_obj_array = false;
 865   HeapWord *blk_start_addr, *region_start_addr;
 866   if (prev > mr.start()) {
 867     region_start_addr = prev;
 868     blk_start_addr    = prev;
 869     // The previous invocation may have pushed "prev" beyond the
 870     // last allocated block yet there may be still be blocks
 871     // in this region due to a particular coalescing policy.
 872     // Relax the assertion so that the case where the unallocated
 873     // block is maintained and "prev" is beyond the unallocated
 874     // block does not cause the assertion to fire.
 875     assert((BlockOffsetArrayUseUnallocatedBlock &&
 876             (!is_in(prev))) ||
 877            (blk_start_addr == block_start(region_start_addr)), "invariant");
 878   } else {
 879     region_start_addr = mr.start();
 880     blk_start_addr    = block_start(region_start_addr);
 881   }
 882   HeapWord* region_end_addr = mr.end();
 883   MemRegion derived_mr(region_start_addr, region_end_addr);
 884   while (blk_start_addr < region_end_addr) {
 885     const size_t size = block_size(blk_start_addr);
 886     if (block_is_obj(blk_start_addr)) {
 887       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 888     } else {
 889       last_was_obj_array = false;
 890     }
 891     blk_start_addr += size;
 892   }
 893   if (!last_was_obj_array) {
 894     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 895            "Should be within (closed) used space");
 896     assert(blk_start_addr > prev, "Invariant");
 897     cl->set_previous(blk_start_addr); // min address for next time
 898   }
 899 }
 900 
 901 
 902 // Callers of this iterator beware: The closure application should
 903 // be robust in the face of uninitialized objects and should (always)
 904 // return a correct size so that the next addr + size below gives us a
 905 // valid block boundary. [See for instance,
 906 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 907 // in ConcurrentMarkSweepGeneration.cpp.]
 908 HeapWord*
 909 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 910   ObjectClosureCareful* cl) {
 911   assert_lock_strong(freelistLock());
 912   // Can't use used_region() below because it may not necessarily
 913   // be the same as [bottom(),end()); although we could
 914   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 915   // that appears too cumbersome, so we just do the simpler check
 916   // in the assertion below.
 917   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 918          "mr should be non-empty and within used space");
 919   HeapWord *addr, *end;
 920   size_t size;
 921   for (addr = block_start_careful(mr.start()), end  = mr.end();
 922        addr < end; addr += size) {
 923     FreeChunk* fc = (FreeChunk*)addr;
 924     if (fc->is_free()) {
 925       // Since we hold the free list lock, which protects direct
 926       // allocation in this generation by mutators, a free object
 927       // will remain free throughout this iteration code.
 928       size = fc->size();
 929     } else {
 930       // Note that the object need not necessarily be initialized,
 931       // because (for instance) the free list lock does NOT protect
 932       // object initialization. The closure application below must
 933       // therefore be correct in the face of uninitialized objects.
 934       size = cl->do_object_careful_m(oop(addr), mr);
 935       if (size == 0) {
 936         // An unparsable object found. Signal early termination.
 937         return addr;
 938       }
 939     }
 940   }
 941   return NULL;
 942 }
 943 
 944 
 945 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 946   NOT_PRODUCT(verify_objects_initialized());
 947   return _bt.block_start(p);
 948 }
 949 
 950 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 951   return _bt.block_start_careful(p);
 952 }
 953 
 954 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 955   NOT_PRODUCT(verify_objects_initialized());
 956   // This must be volatile, or else there is a danger that the compiler
 957   // will compile the code below into a sometimes-infinite loop, by keeping
 958   // the value read the first time in a register.
 959   while (true) {
 960     // We must do this until we get a consistent view of the object.
 961     if (FreeChunk::indicatesFreeChunk(p)) {
 962       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 963       size_t res = fc->size();
 964 
 965       // Bugfix for systems with weak memory model (PPC64/IA64). The
 966       // block's free bit was set and we have read the size of the
 967       // block. Acquire and check the free bit again. If the block is
 968       // still free, the read size is correct.
 969       OrderAccess::acquire();
 970 
 971       // If the object is still a free chunk, return the size, else it
 972       // has been allocated so try again.
 973       if (FreeChunk::indicatesFreeChunk(p)) {
 974         assert(res != 0, "Block size should not be 0");
 975         return res;
 976       }
 977     } else {
 978       // must read from what 'p' points to in each loop.
 979       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
 980       if (k != NULL) {
 981         assert(k->is_klass(), "Should really be klass oop.");
 982         oop o = (oop)p;
 983         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 984 
 985         // Bugfix for systems with weak memory model (PPC64/IA64).
 986         // The object o may be an array. Acquire to make sure that the array
 987         // size (third word) is consistent.
 988         OrderAccess::acquire();
 989 
 990         size_t res = o->size_given_klass(k);
 991         res = adjustObjectSize(res);
 992         assert(res != 0, "Block size should not be 0");
 993         return res;
 994       }
 995     }
 996   }
 997 }
 998 
 999 // TODO: Now that is_parsable is gone, we should combine these two functions.
1000 // A variant of the above that uses the Printezis bits for
1001 // unparsable but allocated objects. This avoids any possible
1002 // stalls waiting for mutators to initialize objects, and is
1003 // thus potentially faster than the variant above. However,
1004 // this variant may return a zero size for a block that is
1005 // under mutation and for which a consistent size cannot be
1006 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1007 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1008                                                      const CMSCollector* c)
1009 const {
1010   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1011   // This must be volatile, or else there is a danger that the compiler
1012   // will compile the code below into a sometimes-infinite loop, by keeping
1013   // the value read the first time in a register.
1014   DEBUG_ONLY(uint loops = 0;)
1015   while (true) {
1016     // We must do this until we get a consistent view of the object.
1017     if (FreeChunk::indicatesFreeChunk(p)) {
1018       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1019       size_t res = fc->size();
1020 
1021       // Bugfix for systems with weak memory model (PPC64/IA64). The
1022       // free bit of the block was set and we have read the size of
1023       // the block. Acquire and check the free bit again. If the
1024       // block is still free, the read size is correct.
1025       OrderAccess::acquire();
1026 
1027       if (FreeChunk::indicatesFreeChunk(p)) {
1028         assert(res != 0, "Block size should not be 0");
1029         assert(loops == 0, "Should be 0");
1030         return res;
1031       }
1032     } else {
1033       // must read from what 'p' points to in each loop.
1034       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1035       // We trust the size of any object that has a non-NULL
1036       // klass and (for those in the perm gen) is parsable
1037       // -- irrespective of its conc_safe-ty.
1038       if (k != NULL) {
1039         assert(k->is_klass(), "Should really be klass oop.");
1040         oop o = (oop)p;
1041         assert(o->is_oop(), "Should be an oop");
1042 
1043         // Bugfix for systems with weak memory model (PPC64/IA64).
1044         // The object o may be an array. Acquire to make sure that the array
1045         // size (third word) is consistent.
1046         OrderAccess::acquire();
1047 
1048         size_t res = o->size_given_klass(k);
1049         res = adjustObjectSize(res);
1050         assert(res != 0, "Block size should not be 0");
1051         return res;
1052       } else {
1053         // May return 0 if P-bits not present.
1054         return c->block_size_if_printezis_bits(p);
1055       }
1056     }
1057     assert(loops == 0, "Can loop at most once");
1058     DEBUG_ONLY(loops++;)
1059   }
1060 }
1061 
1062 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1063   NOT_PRODUCT(verify_objects_initialized());
1064   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1065   FreeChunk* fc = (FreeChunk*)p;
1066   if (fc->is_free()) {
1067     return fc->size();
1068   } else {
1069     // Ignore mark word because this may be a recently promoted
1070     // object whose mark word is used to chain together grey
1071     // objects (the last one would have a null value).
1072     assert(oop(p)->is_oop(true), "Should be an oop");
1073     return adjustObjectSize(oop(p)->size());
1074   }
1075 }
1076 
1077 // This implementation assumes that the property of "being an object" is
1078 // stable.  But being a free chunk may not be (because of parallel
1079 // promotion.)
1080 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1081   FreeChunk* fc = (FreeChunk*)p;
1082   assert(is_in_reserved(p), "Should be in space");
1083   // When doing a mark-sweep-compact of the CMS generation, this
1084   // assertion may fail because prepare_for_compaction() uses
1085   // space that is garbage to maintain information on ranges of
1086   // live objects so that these live ranges can be moved as a whole.
1087   // Comment out this assertion until that problem can be solved
1088   // (i.e., that the block start calculation may look at objects
1089   // at address below "p" in finding the object that contains "p"
1090   // and those objects (if garbage) may have been modified to hold
1091   // live range information.
1092   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1093   //        "Should be a block boundary");
1094   if (FreeChunk::indicatesFreeChunk(p)) return false;
1095   Klass* k = oop(p)->klass_or_null();
1096   if (k != NULL) {
1097     // Ignore mark word because it may have been used to
1098     // chain together promoted objects (the last one
1099     // would have a null value).
1100     assert(oop(p)->is_oop(true), "Should be an oop");
1101     return true;
1102   } else {
1103     return false;  // Was not an object at the start of collection.
1104   }
1105 }
1106 
1107 // Check if the object is alive. This fact is checked either by consulting
1108 // the main marking bitmap in the sweeping phase or, if it's a permanent
1109 // generation and we're not in the sweeping phase, by checking the
1110 // perm_gen_verify_bit_map where we store the "deadness" information if
1111 // we did not sweep the perm gen in the most recent previous GC cycle.
1112 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1113   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1114          "Else races are possible");
1115   assert(block_is_obj(p), "The address should point to an object");
1116 
1117   // If we're sweeping, we use object liveness information from the main bit map
1118   // for both perm gen and old gen.
1119   // We don't need to lock the bitmap (live_map or dead_map below), because
1120   // EITHER we are in the middle of the sweeping phase, and the
1121   // main marking bit map (live_map below) is locked,
1122   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1123   // is stable, because it's mutated only in the sweeping phase.
1124   // NOTE: This method is also used by jmap where, if class unloading is
1125   // off, the results can return "false" for legitimate perm objects,
1126   // when we are not in the midst of a sweeping phase, which can result
1127   // in jmap not reporting certain perm gen objects. This will be moot
1128   // if/when the perm gen goes away in the future.
1129   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1130     CMSBitMap* live_map = _collector->markBitMap();
1131     return live_map->par_isMarked((HeapWord*) p);
1132   }
1133   return true;
1134 }
1135 
1136 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1137   FreeChunk* fc = (FreeChunk*)p;
1138   assert(is_in_reserved(p), "Should be in space");
1139   assert(_bt.block_start(p) == p, "Should be a block boundary");
1140   if (!fc->is_free()) {
1141     // Ignore mark word because it may have been used to
1142     // chain together promoted objects (the last one
1143     // would have a null value).
1144     assert(oop(p)->is_oop(true), "Should be an oop");
1145     return true;
1146   }
1147   return false;
1148 }
1149 
1150 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1151 // approximate answer if you don't hold the freelistlock when you call this.
1152 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1153   size_t size = 0;
1154   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1155     debug_only(
1156       // We may be calling here without the lock in which case we
1157       // won't do this modest sanity check.
1158       if (freelistLock()->owned_by_self()) {
1159         size_t total_list_size = 0;
1160         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1161           fc = fc->next()) {
1162           total_list_size += i;
1163         }
1164         assert(total_list_size == i * _indexedFreeList[i].count(),
1165                "Count in list is incorrect");
1166       }
1167     )
1168     size += i * _indexedFreeList[i].count();
1169   }
1170   return size;
1171 }
1172 
1173 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1174   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1175   return allocate(size);
1176 }
1177 
1178 HeapWord*
1179 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1180   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1181 }
1182 
1183 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1184   assert_lock_strong(freelistLock());
1185   HeapWord* res = NULL;
1186   assert(size == adjustObjectSize(size),
1187          "use adjustObjectSize() before calling into allocate()");
1188 
1189   if (_adaptive_freelists) {
1190     res = allocate_adaptive_freelists(size);
1191   } else {  // non-adaptive free lists
1192     res = allocate_non_adaptive_freelists(size);
1193   }
1194 
1195   if (res != NULL) {
1196     // check that res does lie in this space!
1197     assert(is_in_reserved(res), "Not in this space!");
1198     assert(is_aligned((void*)res), "alignment check");
1199 
1200     FreeChunk* fc = (FreeChunk*)res;
1201     fc->markNotFree();
1202     assert(!fc->is_free(), "shouldn't be marked free");
1203     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1204     // Verify that the block offset table shows this to
1205     // be a single block, but not one which is unallocated.
1206     _bt.verify_single_block(res, size);
1207     _bt.verify_not_unallocated(res, size);
1208     // mangle a just allocated object with a distinct pattern.
1209     debug_only(fc->mangleAllocated(size));
1210   }
1211 
1212   return res;
1213 }
1214 
1215 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1216   HeapWord* res = NULL;
1217   // try and use linear allocation for smaller blocks
1218   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1219     // if successful, the following also adjusts block offset table
1220     res = getChunkFromSmallLinearAllocBlock(size);
1221   }
1222   // Else triage to indexed lists for smaller sizes
1223   if (res == NULL) {
1224     if (size < SmallForDictionary) {
1225       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1226     } else {
1227       // else get it from the big dictionary; if even this doesn't
1228       // work we are out of luck.
1229       res = (HeapWord*)getChunkFromDictionaryExact(size);
1230     }
1231   }
1232 
1233   return res;
1234 }
1235 
1236 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1237   assert_lock_strong(freelistLock());
1238   HeapWord* res = NULL;
1239   assert(size == adjustObjectSize(size),
1240          "use adjustObjectSize() before calling into allocate()");
1241 
1242   // Strategy
1243   //   if small
1244   //     exact size from small object indexed list if small
1245   //     small or large linear allocation block (linAB) as appropriate
1246   //     take from lists of greater sized chunks
1247   //   else
1248   //     dictionary
1249   //     small or large linear allocation block if it has the space
1250   // Try allocating exact size from indexTable first
1251   if (size < IndexSetSize) {
1252     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1253     if(res != NULL) {
1254       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1255         "Not removed from free list");
1256       // no block offset table adjustment is necessary on blocks in
1257       // the indexed lists.
1258 
1259     // Try allocating from the small LinAB
1260     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1261         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1262         // if successful, the above also adjusts block offset table
1263         // Note that this call will refill the LinAB to
1264         // satisfy the request.  This is different that
1265         // evm.
1266         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1267     } else {
1268       // Raid the exact free lists larger than size, even if they are not
1269       // overpopulated.
1270       res = (HeapWord*) getChunkFromGreater(size);
1271     }
1272   } else {
1273     // Big objects get allocated directly from the dictionary.
1274     res = (HeapWord*) getChunkFromDictionaryExact(size);
1275     if (res == NULL) {
1276       // Try hard not to fail since an allocation failure will likely
1277       // trigger a synchronous GC.  Try to get the space from the
1278       // allocation blocks.
1279       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1280     }
1281   }
1282 
1283   return res;
1284 }
1285 
1286 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1287 // when promoting obj.
1288 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1289   // Depending on the object size, expansion may require refilling either a
1290   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1291   // is added because the dictionary may over-allocate to avoid fragmentation.
1292   size_t space = obj_size;
1293   if (!_adaptive_freelists) {
1294     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1295   }
1296   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1297   return space;
1298 }
1299 
1300 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1301   FreeChunk* ret;
1302 
1303   assert(numWords >= MinChunkSize, "Size is less than minimum");
1304   assert(linearAllocationWouldFail() || bestFitFirst(),
1305     "Should not be here");
1306 
1307   size_t i;
1308   size_t currSize = numWords + MinChunkSize;
1309   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1310   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1311     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1312     if (fl->head()) {
1313       ret = getFromListGreater(fl, numWords);
1314       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1315       return ret;
1316     }
1317   }
1318 
1319   currSize = MAX2((size_t)SmallForDictionary,
1320                   (size_t)(numWords + MinChunkSize));
1321 
1322   /* Try to get a chunk that satisfies request, while avoiding
1323      fragmentation that can't be handled. */
1324   {
1325     ret =  dictionary()->get_chunk(currSize);
1326     if (ret != NULL) {
1327       assert(ret->size() - numWords >= MinChunkSize,
1328              "Chunk is too small");
1329       _bt.allocated((HeapWord*)ret, ret->size());
1330       /* Carve returned chunk. */
1331       (void) splitChunkAndReturnRemainder(ret, numWords);
1332       /* Label this as no longer a free chunk. */
1333       assert(ret->is_free(), "This chunk should be free");
1334       ret->link_prev(NULL);
1335     }
1336     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1337     return ret;
1338   }
1339   ShouldNotReachHere();
1340 }
1341 
1342 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1343   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1344   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1345 }
1346 
1347 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1348   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1349          (_smallLinearAllocBlock._word_size == fc->size()),
1350          "Linear allocation block shows incorrect size");
1351   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1352           (_smallLinearAllocBlock._word_size == fc->size()));
1353 }
1354 
1355 // Check if the purported free chunk is present either as a linear
1356 // allocation block, the size-indexed table of (smaller) free blocks,
1357 // or the larger free blocks kept in the binary tree dictionary.
1358 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1359   if (verify_chunk_is_linear_alloc_block(fc)) {
1360     return true;
1361   } else if (fc->size() < IndexSetSize) {
1362     return verifyChunkInIndexedFreeLists(fc);
1363   } else {
1364     return dictionary()->verify_chunk_in_free_list(fc);
1365   }
1366 }
1367 
1368 #ifndef PRODUCT
1369 void CompactibleFreeListSpace::assert_locked() const {
1370   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1371 }
1372 
1373 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1374   CMSLockVerifier::assert_locked(lock);
1375 }
1376 #endif
1377 
1378 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1379   // In the parallel case, the main thread holds the free list lock
1380   // on behalf the parallel threads.
1381   FreeChunk* fc;
1382   {
1383     // If GC is parallel, this might be called by several threads.
1384     // This should be rare enough that the locking overhead won't affect
1385     // the sequential code.
1386     MutexLockerEx x(parDictionaryAllocLock(),
1387                     Mutex::_no_safepoint_check_flag);
1388     fc = getChunkFromDictionary(size);
1389   }
1390   if (fc != NULL) {
1391     fc->dontCoalesce();
1392     assert(fc->is_free(), "Should be free, but not coalescable");
1393     // Verify that the block offset table shows this to
1394     // be a single block, but not one which is unallocated.
1395     _bt.verify_single_block((HeapWord*)fc, fc->size());
1396     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1397   }
1398   return fc;
1399 }
1400 
1401 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1402   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1403   assert_locked();
1404 
1405   // if we are tracking promotions, then first ensure space for
1406   // promotion (including spooling space for saving header if necessary).
1407   // then allocate and copy, then track promoted info if needed.
1408   // When tracking (see PromotionInfo::track()), the mark word may
1409   // be displaced and in this case restoration of the mark word
1410   // occurs in the (oop_since_save_marks_)iterate phase.
1411   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1412     return NULL;
1413   }
1414   // Call the allocate(size_t, bool) form directly to avoid the
1415   // additional call through the allocate(size_t) form.  Having
1416   // the compile inline the call is problematic because allocate(size_t)
1417   // is a virtual method.
1418   HeapWord* res = allocate(adjustObjectSize(obj_size));
1419   if (res != NULL) {
1420     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1421     // if we should be tracking promotions, do so.
1422     if (_promoInfo.tracking()) {
1423         _promoInfo.track((PromotedObject*)res);
1424     }
1425   }
1426   return oop(res);
1427 }
1428 
1429 HeapWord*
1430 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1431   assert_locked();
1432   assert(size >= MinChunkSize, "minimum chunk size");
1433   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1434     "maximum from smallLinearAllocBlock");
1435   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1436 }
1437 
1438 HeapWord*
1439 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1440                                                        size_t size) {
1441   assert_locked();
1442   assert(size >= MinChunkSize, "too small");
1443   HeapWord* res = NULL;
1444   // Try to do linear allocation from blk, making sure that
1445   if (blk->_word_size == 0) {
1446     // We have probably been unable to fill this either in the prologue or
1447     // when it was exhausted at the last linear allocation. Bail out until
1448     // next time.
1449     assert(blk->_ptr == NULL, "consistency check");
1450     return NULL;
1451   }
1452   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1453   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1454   if (res != NULL) return res;
1455 
1456   // about to exhaust this linear allocation block
1457   if (blk->_word_size == size) { // exactly satisfied
1458     res = blk->_ptr;
1459     _bt.allocated(res, blk->_word_size);
1460   } else if (size + MinChunkSize <= blk->_refillSize) {
1461     size_t sz = blk->_word_size;
1462     // Update _unallocated_block if the size is such that chunk would be
1463     // returned to the indexed free list.  All other chunks in the indexed
1464     // free lists are allocated from the dictionary so that _unallocated_block
1465     // has already been adjusted for them.  Do it here so that the cost
1466     // for all chunks added back to the indexed free lists.
1467     if (sz < SmallForDictionary) {
1468       _bt.allocated(blk->_ptr, sz);
1469     }
1470     // Return the chunk that isn't big enough, and then refill below.
1471     addChunkToFreeLists(blk->_ptr, sz);
1472     split_birth(sz);
1473     // Don't keep statistics on adding back chunk from a LinAB.
1474   } else {
1475     // A refilled block would not satisfy the request.
1476     return NULL;
1477   }
1478 
1479   blk->_ptr = NULL; blk->_word_size = 0;
1480   refillLinearAllocBlock(blk);
1481   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1482          "block was replenished");
1483   if (res != NULL) {
1484     split_birth(size);
1485     repairLinearAllocBlock(blk);
1486   } else if (blk->_ptr != NULL) {
1487     res = blk->_ptr;
1488     size_t blk_size = blk->_word_size;
1489     blk->_word_size -= size;
1490     blk->_ptr  += size;
1491     split_birth(size);
1492     repairLinearAllocBlock(blk);
1493     // Update BOT last so that other (parallel) GC threads see a consistent
1494     // view of the BOT and free blocks.
1495     // Above must occur before BOT is updated below.
1496     OrderAccess::storestore();
1497     _bt.split_block(res, blk_size, size);  // adjust block offset table
1498   }
1499   return res;
1500 }
1501 
1502 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1503                                         LinearAllocBlock* blk,
1504                                         size_t size) {
1505   assert_locked();
1506   assert(size >= MinChunkSize, "too small");
1507 
1508   HeapWord* res = NULL;
1509   // This is the common case.  Keep it simple.
1510   if (blk->_word_size >= size + MinChunkSize) {
1511     assert(blk->_ptr != NULL, "consistency check");
1512     res = blk->_ptr;
1513     // Note that the BOT is up-to-date for the linAB before allocation.  It
1514     // indicates the start of the linAB.  The split_block() updates the
1515     // BOT for the linAB after the allocation (indicates the start of the
1516     // next chunk to be allocated).
1517     size_t blk_size = blk->_word_size;
1518     blk->_word_size -= size;
1519     blk->_ptr  += size;
1520     split_birth(size);
1521     repairLinearAllocBlock(blk);
1522     // Update BOT last so that other (parallel) GC threads see a consistent
1523     // view of the BOT and free blocks.
1524     // Above must occur before BOT is updated below.
1525     OrderAccess::storestore();
1526     _bt.split_block(res, blk_size, size);  // adjust block offset table
1527     _bt.allocated(res, size);
1528   }
1529   return res;
1530 }
1531 
1532 FreeChunk*
1533 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1534   assert_locked();
1535   assert(size < SmallForDictionary, "just checking");
1536   FreeChunk* res;
1537   res = _indexedFreeList[size].get_chunk_at_head();
1538   if (res == NULL) {
1539     res = getChunkFromIndexedFreeListHelper(size);
1540   }
1541   _bt.verify_not_unallocated((HeapWord*) res, size);
1542   assert(res == NULL || res->size() == size, "Incorrect block size");
1543   return res;
1544 }
1545 
1546 FreeChunk*
1547 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1548   bool replenish) {
1549   assert_locked();
1550   FreeChunk* fc = NULL;
1551   if (size < SmallForDictionary) {
1552     assert(_indexedFreeList[size].head() == NULL ||
1553       _indexedFreeList[size].surplus() <= 0,
1554       "List for this size should be empty or under populated");
1555     // Try best fit in exact lists before replenishing the list
1556     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1557       // Replenish list.
1558       //
1559       // Things tried that failed.
1560       //   Tried allocating out of the two LinAB's first before
1561       // replenishing lists.
1562       //   Tried small linAB of size 256 (size in indexed list)
1563       // and replenishing indexed lists from the small linAB.
1564       //
1565       FreeChunk* newFc = NULL;
1566       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1567       if (replenish_size < SmallForDictionary) {
1568         // Do not replenish from an underpopulated size.
1569         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1570             _indexedFreeList[replenish_size].head() != NULL) {
1571           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1572         } else if (bestFitFirst()) {
1573           newFc = bestFitSmall(replenish_size);
1574         }
1575       }
1576       if (newFc == NULL && replenish_size > size) {
1577         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1578         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1579       }
1580       // Note: The stats update re split-death of block obtained above
1581       // will be recorded below precisely when we know we are going to
1582       // be actually splitting it into more than one pieces below.
1583       if (newFc != NULL) {
1584         if  (replenish || CMSReplenishIntermediate) {
1585           // Replenish this list and return one block to caller.
1586           size_t i;
1587           FreeChunk *curFc, *nextFc;
1588           size_t num_blk = newFc->size() / size;
1589           assert(num_blk >= 1, "Smaller than requested?");
1590           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1591           if (num_blk > 1) {
1592             // we are sure we will be splitting the block just obtained
1593             // into multiple pieces; record the split-death of the original
1594             splitDeath(replenish_size);
1595           }
1596           // carve up and link blocks 0, ..., num_blk - 2
1597           // The last chunk is not added to the lists but is returned as the
1598           // free chunk.
1599           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1600                i = 0;
1601                i < (num_blk - 1);
1602                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1603                i++) {
1604             curFc->set_size(size);
1605             // Don't record this as a return in order to try and
1606             // determine the "returns" from a GC.
1607             _bt.verify_not_unallocated((HeapWord*) fc, size);
1608             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1609             _bt.mark_block((HeapWord*)curFc, size);
1610             split_birth(size);
1611             // Don't record the initial population of the indexed list
1612             // as a split birth.
1613           }
1614 
1615           // check that the arithmetic was OK above
1616           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1617             "inconsistency in carving newFc");
1618           curFc->set_size(size);
1619           _bt.mark_block((HeapWord*)curFc, size);
1620           split_birth(size);
1621           fc = curFc;
1622         } else {
1623           // Return entire block to caller
1624           fc = newFc;
1625         }
1626       }
1627     }
1628   } else {
1629     // Get a free chunk from the free chunk dictionary to be returned to
1630     // replenish the indexed free list.
1631     fc = getChunkFromDictionaryExact(size);
1632   }
1633   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1634   return fc;
1635 }
1636 
1637 FreeChunk*
1638 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1639   assert_locked();
1640   FreeChunk* fc = _dictionary->get_chunk(size,
1641                                          FreeBlockDictionary<FreeChunk>::atLeast);
1642   if (fc == NULL) {
1643     return NULL;
1644   }
1645   _bt.allocated((HeapWord*)fc, fc->size());
1646   if (fc->size() >= size + MinChunkSize) {
1647     fc = splitChunkAndReturnRemainder(fc, size);
1648   }
1649   assert(fc->size() >= size, "chunk too small");
1650   assert(fc->size() < size + MinChunkSize, "chunk too big");
1651   _bt.verify_single_block((HeapWord*)fc, fc->size());
1652   return fc;
1653 }
1654 
1655 FreeChunk*
1656 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1657   assert_locked();
1658   FreeChunk* fc = _dictionary->get_chunk(size,
1659                                          FreeBlockDictionary<FreeChunk>::atLeast);
1660   if (fc == NULL) {
1661     return fc;
1662   }
1663   _bt.allocated((HeapWord*)fc, fc->size());
1664   if (fc->size() == size) {
1665     _bt.verify_single_block((HeapWord*)fc, size);
1666     return fc;
1667   }
1668   assert(fc->size() > size, "get_chunk() guarantee");
1669   if (fc->size() < size + MinChunkSize) {
1670     // Return the chunk to the dictionary and go get a bigger one.
1671     returnChunkToDictionary(fc);
1672     fc = _dictionary->get_chunk(size + MinChunkSize,
1673                                 FreeBlockDictionary<FreeChunk>::atLeast);
1674     if (fc == NULL) {
1675       return NULL;
1676     }
1677     _bt.allocated((HeapWord*)fc, fc->size());
1678   }
1679   assert(fc->size() >= size + MinChunkSize, "tautology");
1680   fc = splitChunkAndReturnRemainder(fc, size);
1681   assert(fc->size() == size, "chunk is wrong size");
1682   _bt.verify_single_block((HeapWord*)fc, size);
1683   return fc;
1684 }
1685 
1686 void
1687 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1688   assert_locked();
1689 
1690   size_t size = chunk->size();
1691   _bt.verify_single_block((HeapWord*)chunk, size);
1692   // adjust _unallocated_block downward, as necessary
1693   _bt.freed((HeapWord*)chunk, size);
1694   _dictionary->return_chunk(chunk);
1695 #ifndef PRODUCT
1696   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1697     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1698     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1699     tl->verify_stats();
1700   }
1701 #endif // PRODUCT
1702 }
1703 
1704 void
1705 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1706   assert_locked();
1707   size_t size = fc->size();
1708   _bt.verify_single_block((HeapWord*) fc, size);
1709   _bt.verify_not_unallocated((HeapWord*) fc, size);
1710   if (_adaptive_freelists) {
1711     _indexedFreeList[size].return_chunk_at_tail(fc);
1712   } else {
1713     _indexedFreeList[size].return_chunk_at_head(fc);
1714   }
1715 #ifndef PRODUCT
1716   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1717      _indexedFreeList[size].verify_stats();
1718   }
1719 #endif // PRODUCT
1720 }
1721 
1722 // Add chunk to end of last block -- if it's the largest
1723 // block -- and update BOT and census data. We would
1724 // of course have preferred to coalesce it with the
1725 // last block, but it's currently less expensive to find the
1726 // largest block than it is to find the last.
1727 void
1728 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1729   HeapWord* chunk, size_t     size) {
1730   // check that the chunk does lie in this space!
1731   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1732   // One of the parallel gc task threads may be here
1733   // whilst others are allocating.
1734   Mutex* lock = &_parDictionaryAllocLock;
1735   FreeChunk* ec;
1736   {
1737     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1738     ec = dictionary()->find_largest_dict();  // get largest block
1739     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1740       // It's a coterminal block - we can coalesce.
1741       size_t old_size = ec->size();
1742       coalDeath(old_size);
1743       removeChunkFromDictionary(ec);
1744       size += old_size;
1745     } else {
1746       ec = (FreeChunk*)chunk;
1747     }
1748   }
1749   ec->set_size(size);
1750   debug_only(ec->mangleFreed(size));
1751   if (size < SmallForDictionary) {
1752     lock = _indexedFreeListParLocks[size];
1753   }
1754   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1755   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1756   // record the birth under the lock since the recording involves
1757   // manipulation of the list on which the chunk lives and
1758   // if the chunk is allocated and is the last on the list,
1759   // the list can go away.
1760   coalBirth(size);
1761 }
1762 
1763 void
1764 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1765                                               size_t     size) {
1766   // check that the chunk does lie in this space!
1767   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1768   assert_locked();
1769   _bt.verify_single_block(chunk, size);
1770 
1771   FreeChunk* fc = (FreeChunk*) chunk;
1772   fc->set_size(size);
1773   debug_only(fc->mangleFreed(size));
1774   if (size < SmallForDictionary) {
1775     returnChunkToFreeList(fc);
1776   } else {
1777     returnChunkToDictionary(fc);
1778   }
1779 }
1780 
1781 void
1782 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1783   size_t size, bool coalesced) {
1784   assert_locked();
1785   assert(chunk != NULL, "null chunk");
1786   if (coalesced) {
1787     // repair BOT
1788     _bt.single_block(chunk, size);
1789   }
1790   addChunkToFreeLists(chunk, size);
1791 }
1792 
1793 // We _must_ find the purported chunk on our free lists;
1794 // we assert if we don't.
1795 void
1796 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1797   size_t size = fc->size();
1798   assert_locked();
1799   debug_only(verifyFreeLists());
1800   if (size < SmallForDictionary) {
1801     removeChunkFromIndexedFreeList(fc);
1802   } else {
1803     removeChunkFromDictionary(fc);
1804   }
1805   _bt.verify_single_block((HeapWord*)fc, size);
1806   debug_only(verifyFreeLists());
1807 }
1808 
1809 void
1810 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1811   size_t size = fc->size();
1812   assert_locked();
1813   assert(fc != NULL, "null chunk");
1814   _bt.verify_single_block((HeapWord*)fc, size);
1815   _dictionary->remove_chunk(fc);
1816   // adjust _unallocated_block upward, as necessary
1817   _bt.allocated((HeapWord*)fc, size);
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1822   assert_locked();
1823   size_t size = fc->size();
1824   _bt.verify_single_block((HeapWord*)fc, size);
1825   NOT_PRODUCT(
1826     if (FLSVerifyIndexTable) {
1827       verifyIndexedFreeList(size);
1828     }
1829   )
1830   _indexedFreeList[size].remove_chunk(fc);
1831   NOT_PRODUCT(
1832     if (FLSVerifyIndexTable) {
1833       verifyIndexedFreeList(size);
1834     }
1835   )
1836 }
1837 
1838 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1839   /* A hint is the next larger size that has a surplus.
1840      Start search at a size large enough to guarantee that
1841      the excess is >= MIN_CHUNK. */
1842   size_t start = align_object_size(numWords + MinChunkSize);
1843   if (start < IndexSetSize) {
1844     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1845     size_t    hint = _indexedFreeList[start].hint();
1846     while (hint < IndexSetSize) {
1847       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1848       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1849       if (fl->surplus() > 0 && fl->head() != NULL) {
1850         // Found a list with surplus, reset original hint
1851         // and split out a free chunk which is returned.
1852         _indexedFreeList[start].set_hint(hint);
1853         FreeChunk* res = getFromListGreater(fl, numWords);
1854         assert(res == NULL || res->is_free(),
1855           "Should be returning a free chunk");
1856         return res;
1857       }
1858       hint = fl->hint(); /* keep looking */
1859     }
1860     /* None found. */
1861     it[start].set_hint(IndexSetSize);
1862   }
1863   return NULL;
1864 }
1865 
1866 /* Requires fl->size >= numWords + MinChunkSize */
1867 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1868   size_t numWords) {
1869   FreeChunk *curr = fl->head();
1870   size_t oldNumWords = curr->size();
1871   assert(numWords >= MinChunkSize, "Word size is too small");
1872   assert(curr != NULL, "List is empty");
1873   assert(oldNumWords >= numWords + MinChunkSize,
1874         "Size of chunks in the list is too small");
1875 
1876   fl->remove_chunk(curr);
1877   // recorded indirectly by splitChunkAndReturnRemainder -
1878   // smallSplit(oldNumWords, numWords);
1879   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1880   // Does anything have to be done for the remainder in terms of
1881   // fixing the card table?
1882   assert(new_chunk == NULL || new_chunk->is_free(),
1883     "Should be returning a free chunk");
1884   return new_chunk;
1885 }
1886 
1887 FreeChunk*
1888 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1889   size_t new_size) {
1890   assert_locked();
1891   size_t size = chunk->size();
1892   assert(size > new_size, "Split from a smaller block?");
1893   assert(is_aligned(chunk), "alignment problem");
1894   assert(size == adjustObjectSize(size), "alignment problem");
1895   size_t rem_sz = size - new_size;
1896   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1897   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1898   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1899   assert(is_aligned(ffc), "alignment problem");
1900   ffc->set_size(rem_sz);
1901   ffc->link_next(NULL);
1902   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1903   // Above must occur before BOT is updated below.
1904   // adjust block offset table
1905   OrderAccess::storestore();
1906   assert(chunk->is_free() && ffc->is_free(), "Error");
1907   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1908   if (rem_sz < SmallForDictionary) {
1909     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1910     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1911     assert(!is_par ||
1912            (SharedHeap::heap()->n_par_threads() ==
1913             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1914     returnChunkToFreeList(ffc);
1915     split(size, rem_sz);
1916     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1917   } else {
1918     returnChunkToDictionary(ffc);
1919     split(size, rem_sz);
1920   }
1921   chunk->set_size(new_size);
1922   return chunk;
1923 }
1924 
1925 void
1926 CompactibleFreeListSpace::sweep_completed() {
1927   // Now that space is probably plentiful, refill linear
1928   // allocation blocks as needed.
1929   refillLinearAllocBlocksIfNeeded();
1930 }
1931 
1932 void
1933 CompactibleFreeListSpace::gc_prologue() {
1934   assert_locked();
1935   if (PrintFLSStatistics != 0) {
1936     gclog_or_tty->print("Before GC:\n");
1937     reportFreeListStatistics();
1938   }
1939   refillLinearAllocBlocksIfNeeded();
1940 }
1941 
1942 void
1943 CompactibleFreeListSpace::gc_epilogue() {
1944   assert_locked();
1945   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1946     if (_smallLinearAllocBlock._word_size == 0)
1947       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1948   }
1949   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1950   _promoInfo.stopTrackingPromotions();
1951   repairLinearAllocationBlocks();
1952   // Print Space's stats
1953   if (PrintFLSStatistics != 0) {
1954     gclog_or_tty->print("After GC:\n");
1955     reportFreeListStatistics();
1956   }
1957 }
1958 
1959 // Iteration support, mostly delegated from a CMS generation
1960 
1961 void CompactibleFreeListSpace::save_marks() {
1962   assert(Thread::current()->is_VM_thread(),
1963          "Global variable should only be set when single-threaded");
1964   // Mark the "end" of the used space at the time of this call;
1965   // note, however, that promoted objects from this point
1966   // on are tracked in the _promoInfo below.
1967   set_saved_mark_word(unallocated_block());
1968 #ifdef ASSERT
1969   // Check the sanity of save_marks() etc.
1970   MemRegion ur    = used_region();
1971   MemRegion urasm = used_region_at_save_marks();
1972   assert(ur.contains(urasm),
1973          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1974                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1975                  p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end())));
1976 #endif
1977   // inform allocator that promotions should be tracked.
1978   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1979   _promoInfo.startTrackingPromotions();
1980 }
1981 
1982 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1983   assert(_promoInfo.tracking(), "No preceding save_marks?");
1984   assert(SharedHeap::heap()->n_par_threads() == 0,
1985          "Shouldn't be called if using parallel gc.");
1986   return _promoInfo.noPromotions();
1987 }
1988 
1989 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1990                                                                             \
1991 void CompactibleFreeListSpace::                                             \
1992 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1993   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1994          "Shouldn't be called (yet) during parallel part of gc.");          \
1995   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1996   /*                                                                        \
1997    * This also restores any displaced headers and removes the elements from \
1998    * the iteration set as they are processed, so that we have a clean slate \
1999    * at the end of the iteration. Note, thus, that if new objects are       \
2000    * promoted as a result of the iteration they are iterated over as well.  \
2001    */                                                                       \
2002   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2003 }
2004 
2005 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2006 
2007 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2008   return _smallLinearAllocBlock._word_size == 0;
2009 }
2010 
2011 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2012   // Fix up linear allocation blocks to look like free blocks
2013   repairLinearAllocBlock(&_smallLinearAllocBlock);
2014 }
2015 
2016 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2017   assert_locked();
2018   if (blk->_ptr != NULL) {
2019     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2020            "Minimum block size requirement");
2021     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2022     fc->set_size(blk->_word_size);
2023     fc->link_prev(NULL);   // mark as free
2024     fc->dontCoalesce();
2025     assert(fc->is_free(), "just marked it free");
2026     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2027   }
2028 }
2029 
2030 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2031   assert_locked();
2032   if (_smallLinearAllocBlock._ptr == NULL) {
2033     assert(_smallLinearAllocBlock._word_size == 0,
2034       "Size of linAB should be zero if the ptr is NULL");
2035     // Reset the linAB refill and allocation size limit.
2036     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2037   }
2038   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2039 }
2040 
2041 void
2042 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2043   assert_locked();
2044   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2045          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2046          "blk invariant");
2047   if (blk->_ptr == NULL) {
2048     refillLinearAllocBlock(blk);
2049   }
2050   if (PrintMiscellaneous && Verbose) {
2051     if (blk->_word_size == 0) {
2052       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2053     }
2054   }
2055 }
2056 
2057 void
2058 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2059   assert_locked();
2060   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2061          "linear allocation block should be empty");
2062   FreeChunk* fc;
2063   if (blk->_refillSize < SmallForDictionary &&
2064       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2065     // A linAB's strategy might be to use small sizes to reduce
2066     // fragmentation but still get the benefits of allocation from a
2067     // linAB.
2068   } else {
2069     fc = getChunkFromDictionary(blk->_refillSize);
2070   }
2071   if (fc != NULL) {
2072     blk->_ptr  = (HeapWord*)fc;
2073     blk->_word_size = fc->size();
2074     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2075   }
2076 }
2077 
2078 // Support for concurrent collection policy decisions.
2079 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2080   // In the future we might want to add in fragmentation stats --
2081   // including erosion of the "mountain" into this decision as well.
2082   return !adaptive_freelists() && linearAllocationWouldFail();
2083 }
2084 
2085 // Support for compaction
2086 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2087   scan_and_forward(this, cp);
2088   // Prepare_for_compaction() uses the space between live objects
2089   // so that later phase can skip dead space quickly.  So verification
2090   // of the free lists doesn't work after.
2091 }
2092 
2093 void CompactibleFreeListSpace::adjust_pointers() {
2094   // In other versions of adjust_pointers(), a bail out
2095   // based on the amount of live data in the generation
2096   // (i.e., if 0, bail out) may be used.
2097   // Cannot test used() == 0 here because the free lists have already
2098   // been mangled by the compaction.
2099 
2100   scan_and_adjust_pointers(this);
2101   // See note about verification in prepare_for_compaction().
2102 }
2103 
2104 void CompactibleFreeListSpace::compact() {
2105   scan_and_compact(this);
2106 }
2107 
2108 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2109 // where fbs is free block sizes
2110 double CompactibleFreeListSpace::flsFrag() const {
2111   size_t itabFree = totalSizeInIndexedFreeLists();
2112   double frag = 0.0;
2113   size_t i;
2114 
2115   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2116     double sz  = i;
2117     frag      += _indexedFreeList[i].count() * (sz * sz);
2118   }
2119 
2120   double totFree = itabFree +
2121                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2122   if (totFree > 0) {
2123     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2124             (totFree * totFree));
2125     frag = (double)1.0  - frag;
2126   } else {
2127     assert(frag == 0.0, "Follows from totFree == 0");
2128   }
2129   return frag;
2130 }
2131 
2132 void CompactibleFreeListSpace::beginSweepFLCensus(
2133   float inter_sweep_current,
2134   float inter_sweep_estimate,
2135   float intra_sweep_estimate) {
2136   assert_locked();
2137   size_t i;
2138   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2139     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2140     if (PrintFLSStatistics > 1) {
2141       gclog_or_tty->print("size[" SIZE_FORMAT "] : ", i);
2142     }
2143     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2144     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2145     fl->set_before_sweep(fl->count());
2146     fl->set_bfr_surp(fl->surplus());
2147   }
2148   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2149                                     inter_sweep_current,
2150                                     inter_sweep_estimate,
2151                                     intra_sweep_estimate);
2152 }
2153 
2154 void CompactibleFreeListSpace::setFLSurplus() {
2155   assert_locked();
2156   size_t i;
2157   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2158     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2159     fl->set_surplus(fl->count() -
2160                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2161   }
2162 }
2163 
2164 void CompactibleFreeListSpace::setFLHints() {
2165   assert_locked();
2166   size_t i;
2167   size_t h = IndexSetSize;
2168   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2169     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2170     fl->set_hint(h);
2171     if (fl->surplus() > 0) {
2172       h = i;
2173     }
2174   }
2175 }
2176 
2177 void CompactibleFreeListSpace::clearFLCensus() {
2178   assert_locked();
2179   size_t i;
2180   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2181     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2182     fl->set_prev_sweep(fl->count());
2183     fl->set_coal_births(0);
2184     fl->set_coal_deaths(0);
2185     fl->set_split_births(0);
2186     fl->set_split_deaths(0);
2187   }
2188 }
2189 
2190 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2191   if (PrintFLSStatistics > 0) {
2192     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2193     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2194                            p2i(largestAddr));
2195   }
2196   setFLSurplus();
2197   setFLHints();
2198   if (PrintGC && PrintFLSCensus > 0) {
2199     printFLCensus(sweep_count);
2200   }
2201   clearFLCensus();
2202   assert_locked();
2203   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2204 }
2205 
2206 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2207   if (size < SmallForDictionary) {
2208     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2209     return (fl->coal_desired() < 0) ||
2210            ((int)fl->count() > fl->coal_desired());
2211   } else {
2212     return dictionary()->coal_dict_over_populated(size);
2213   }
2214 }
2215 
2216 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2217   assert(size < SmallForDictionary, "Size too large for indexed list");
2218   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2219   fl->increment_coal_births();
2220   fl->increment_surplus();
2221 }
2222 
2223 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2224   assert(size < SmallForDictionary, "Size too large for indexed list");
2225   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2226   fl->increment_coal_deaths();
2227   fl->decrement_surplus();
2228 }
2229 
2230 void CompactibleFreeListSpace::coalBirth(size_t size) {
2231   if (size  < SmallForDictionary) {
2232     smallCoalBirth(size);
2233   } else {
2234     dictionary()->dict_census_update(size,
2235                                    false /* split */,
2236                                    true /* birth */);
2237   }
2238 }
2239 
2240 void CompactibleFreeListSpace::coalDeath(size_t size) {
2241   if(size  < SmallForDictionary) {
2242     smallCoalDeath(size);
2243   } else {
2244     dictionary()->dict_census_update(size,
2245                                    false /* split */,
2246                                    false /* birth */);
2247   }
2248 }
2249 
2250 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2251   assert(size < SmallForDictionary, "Size too large for indexed list");
2252   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2253   fl->increment_split_births();
2254   fl->increment_surplus();
2255 }
2256 
2257 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2258   assert(size < SmallForDictionary, "Size too large for indexed list");
2259   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2260   fl->increment_split_deaths();
2261   fl->decrement_surplus();
2262 }
2263 
2264 void CompactibleFreeListSpace::split_birth(size_t size) {
2265   if (size  < SmallForDictionary) {
2266     smallSplitBirth(size);
2267   } else {
2268     dictionary()->dict_census_update(size,
2269                                    true /* split */,
2270                                    true /* birth */);
2271   }
2272 }
2273 
2274 void CompactibleFreeListSpace::splitDeath(size_t size) {
2275   if (size  < SmallForDictionary) {
2276     smallSplitDeath(size);
2277   } else {
2278     dictionary()->dict_census_update(size,
2279                                    true /* split */,
2280                                    false /* birth */);
2281   }
2282 }
2283 
2284 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2285   size_t to2 = from - to1;
2286   splitDeath(from);
2287   split_birth(to1);
2288   split_birth(to2);
2289 }
2290 
2291 void CompactibleFreeListSpace::print() const {
2292   print_on(tty);
2293 }
2294 
2295 void CompactibleFreeListSpace::prepare_for_verify() {
2296   assert_locked();
2297   repairLinearAllocationBlocks();
2298   // Verify that the SpoolBlocks look like free blocks of
2299   // appropriate sizes... To be done ...
2300 }
2301 
2302 class VerifyAllBlksClosure: public BlkClosure {
2303  private:
2304   const CompactibleFreeListSpace* _sp;
2305   const MemRegion                 _span;
2306   HeapWord*                       _last_addr;
2307   size_t                          _last_size;
2308   bool                            _last_was_obj;
2309   bool                            _last_was_live;
2310 
2311  public:
2312   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2313     MemRegion span) :  _sp(sp), _span(span),
2314                        _last_addr(NULL), _last_size(0),
2315                        _last_was_obj(false), _last_was_live(false) { }
2316 
2317   virtual size_t do_blk(HeapWord* addr) {
2318     size_t res;
2319     bool   was_obj  = false;
2320     bool   was_live = false;
2321     if (_sp->block_is_obj(addr)) {
2322       was_obj = true;
2323       oop p = oop(addr);
2324       guarantee(p->is_oop(), "Should be an oop");
2325       res = _sp->adjustObjectSize(p->size());
2326       if (_sp->obj_is_alive(addr)) {
2327         was_live = true;
2328         p->verify();
2329       }
2330     } else {
2331       FreeChunk* fc = (FreeChunk*)addr;
2332       res = fc->size();
2333       if (FLSVerifyLists && !fc->cantCoalesce()) {
2334         guarantee(_sp->verify_chunk_in_free_list(fc),
2335                   "Chunk should be on a free list");
2336       }
2337     }
2338     if (res == 0) {
2339       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2340       gclog_or_tty->print_cr(
2341         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2342         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2343         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2344         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2345       _sp->print_on(gclog_or_tty);
2346       guarantee(false, "Seppuku!");
2347     }
2348     _last_addr = addr;
2349     _last_size = res;
2350     _last_was_obj  = was_obj;
2351     _last_was_live = was_live;
2352     return res;
2353   }
2354 };
2355 
2356 class VerifyAllOopsClosure: public OopClosure {
2357  private:
2358   const CMSCollector*             _collector;
2359   const CompactibleFreeListSpace* _sp;
2360   const MemRegion                 _span;
2361   const bool                      _past_remark;
2362   const CMSBitMap*                _bit_map;
2363 
2364  protected:
2365   void do_oop(void* p, oop obj) {
2366     if (_span.contains(obj)) { // the interior oop points into CMS heap
2367       if (!_span.contains(p)) { // reference from outside CMS heap
2368         // Should be a valid object; the first disjunct below allows
2369         // us to sidestep an assertion in block_is_obj() that insists
2370         // that p be in _sp. Note that several generations (and spaces)
2371         // are spanned by _span (CMS heap) above.
2372         guarantee(!_sp->is_in_reserved(obj) ||
2373                   _sp->block_is_obj((HeapWord*)obj),
2374                   "Should be an object");
2375         guarantee(obj->is_oop(), "Should be an oop");
2376         obj->verify();
2377         if (_past_remark) {
2378           // Remark has been completed, the object should be marked
2379           _bit_map->isMarked((HeapWord*)obj);
2380         }
2381       } else { // reference within CMS heap
2382         if (_past_remark) {
2383           // Remark has been completed -- so the referent should have
2384           // been marked, if referring object is.
2385           if (_bit_map->isMarked(_collector->block_start(p))) {
2386             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2387           }
2388         }
2389       }
2390     } else if (_sp->is_in_reserved(p)) {
2391       // the reference is from FLS, and points out of FLS
2392       guarantee(obj->is_oop(), "Should be an oop");
2393       obj->verify();
2394     }
2395   }
2396 
2397   template <class T> void do_oop_work(T* p) {
2398     T heap_oop = oopDesc::load_heap_oop(p);
2399     if (!oopDesc::is_null(heap_oop)) {
2400       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2401       do_oop(p, obj);
2402     }
2403   }
2404 
2405  public:
2406   VerifyAllOopsClosure(const CMSCollector* collector,
2407     const CompactibleFreeListSpace* sp, MemRegion span,
2408     bool past_remark, CMSBitMap* bit_map) :
2409     _collector(collector), _sp(sp), _span(span),
2410     _past_remark(past_remark), _bit_map(bit_map) { }
2411 
2412   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2413   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2414 };
2415 
2416 void CompactibleFreeListSpace::verify() const {
2417   assert_lock_strong(&_freelistLock);
2418   verify_objects_initialized();
2419   MemRegion span = _collector->_span;
2420   bool past_remark = (_collector->abstract_state() ==
2421                       CMSCollector::Sweeping);
2422 
2423   ResourceMark rm;
2424   HandleMark  hm;
2425 
2426   // Check integrity of CFL data structures
2427   _promoInfo.verify();
2428   _dictionary->verify();
2429   if (FLSVerifyIndexTable) {
2430     verifyIndexedFreeLists();
2431   }
2432   // Check integrity of all objects and free blocks in space
2433   {
2434     VerifyAllBlksClosure cl(this, span);
2435     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2436   }
2437   // Check that all references in the heap to FLS
2438   // are to valid objects in FLS or that references in
2439   // FLS are to valid objects elsewhere in the heap
2440   if (FLSVerifyAllHeapReferences)
2441   {
2442     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2443       _collector->markBitMap());
2444     CollectedHeap* ch = Universe::heap();
2445 
2446     // Iterate over all oops in the heap. Uses the _no_header version
2447     // since we are not interested in following the klass pointers.
2448     ch->oop_iterate_no_header(&cl);
2449   }
2450 
2451   if (VerifyObjectStartArray) {
2452     // Verify the block offset table
2453     _bt.verify();
2454   }
2455 }
2456 
2457 #ifndef PRODUCT
2458 void CompactibleFreeListSpace::verifyFreeLists() const {
2459   if (FLSVerifyLists) {
2460     _dictionary->verify();
2461     verifyIndexedFreeLists();
2462   } else {
2463     if (FLSVerifyDictionary) {
2464       _dictionary->verify();
2465     }
2466     if (FLSVerifyIndexTable) {
2467       verifyIndexedFreeLists();
2468     }
2469   }
2470 }
2471 #endif
2472 
2473 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2474   size_t i = 0;
2475   for (; i < IndexSetStart; i++) {
2476     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2477   }
2478   for (; i < IndexSetSize; i++) {
2479     verifyIndexedFreeList(i);
2480   }
2481 }
2482 
2483 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2484   FreeChunk* fc   =  _indexedFreeList[size].head();
2485   FreeChunk* tail =  _indexedFreeList[size].tail();
2486   size_t    num = _indexedFreeList[size].count();
2487   size_t      n = 0;
2488   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2489             "Slot should have been empty");
2490   for (; fc != NULL; fc = fc->next(), n++) {
2491     guarantee(fc->size() == size, "Size inconsistency");
2492     guarantee(fc->is_free(), "!free?");
2493     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2494     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2495   }
2496   guarantee(n == num, "Incorrect count");
2497 }
2498 
2499 #ifndef PRODUCT
2500 void CompactibleFreeListSpace::check_free_list_consistency() const {
2501   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2502     "Some sizes can't be allocated without recourse to"
2503     " linear allocation buffers");
2504   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2505     "else MIN_TREE_CHUNK_SIZE is wrong");
2506   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2507   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2508 }
2509 #endif
2510 
2511 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2512   assert_lock_strong(&_freelistLock);
2513   AdaptiveFreeList<FreeChunk> total;
2514   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2515   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2516   size_t total_free = 0;
2517   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2518     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2519     total_free += fl->count() * fl->size();
2520     if (i % (40*IndexSetStride) == 0) {
2521       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2522     }
2523     fl->print_on(gclog_or_tty);
2524     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2525     total.set_surplus(    total.surplus()     + fl->surplus()    );
2526     total.set_desired(    total.desired()     + fl->desired()    );
2527     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2528     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2529     total.set_count(      total.count()       + fl->count()      );
2530     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2531     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2532     total.set_split_births(total.split_births() + fl->split_births());
2533     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2534   }
2535   total.print_on(gclog_or_tty, "TOTAL");
2536   gclog_or_tty->print_cr("Total free in indexed lists "
2537                          SIZE_FORMAT " words", total_free);
2538   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2539     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2540             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2541     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2542   _dictionary->print_dict_census();
2543 }
2544 
2545 ///////////////////////////////////////////////////////////////////////////
2546 // CFLS_LAB
2547 ///////////////////////////////////////////////////////////////////////////
2548 
2549 #define VECTOR_257(x)                                                                                  \
2550   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2551   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2552      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2553      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2554      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2555      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2556      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2557      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2558      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2559      x }
2560 
2561 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2562 // OldPLABSize, whose static default is different; if overridden at the
2563 // command-line, this will get reinitialized via a call to
2564 // modify_initialization() below.
2565 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2566   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2567 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2568 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2569 
2570 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2571   _cfls(cfls)
2572 {
2573   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2574   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2575        i < CompactibleFreeListSpace::IndexSetSize;
2576        i += CompactibleFreeListSpace::IndexSetStride) {
2577     _indexedFreeList[i].set_size(i);
2578     _num_blocks[i] = 0;
2579   }
2580 }
2581 
2582 static bool _CFLS_LAB_modified = false;
2583 
2584 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2585   assert(!_CFLS_LAB_modified, "Call only once");
2586   _CFLS_LAB_modified = true;
2587   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2588        i < CompactibleFreeListSpace::IndexSetSize;
2589        i += CompactibleFreeListSpace::IndexSetStride) {
2590     _blocks_to_claim[i].modify(n, wt, true /* force */);
2591   }
2592 }
2593 
2594 HeapWord* CFLS_LAB::alloc(const ParNewTracer* gc_tracer, size_t word_sz, 
2595                           const oop old, const markOop m) {
2596   FreeChunk* res;
2597   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2598   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2599     // This locking manages sync with other large object allocations.
2600     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2601                     Mutex::_no_safepoint_check_flag);
2602     res = _cfls->getChunkFromDictionaryExact(word_sz);
2603     if (res == NULL) return NULL;
2604     if (gc_tracer->should_report_promotion_outside_plab_event()) {
2605       uint age = m->has_displaced_mark_helper() ?
2606                    m->displaced_mark_helper()->age() :
2607                    m->age();
2608       gc_tracer->report_promotion_outside_plab_event(old, word_sz, age, true);
2609     }
2610   } else {
2611     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2612     if (fl->count() == 0) {
2613       // Attempt to refill this local free list.
2614       get_from_global_pool(word_sz, fl);
2615       // If it didn't work, give up.
2616       if (fl->count() == 0) return NULL;
2617       if (gc_tracer->should_report_promotion_in_new_plab_event()) {
2618       uint age = m->has_displaced_mark_helper() ?
2619                    m->displaced_mark_helper()->age() :
2620                    m->age();
2621         gc_tracer->report_promotion_in_new_plab_event(old, word_sz, age, true,
2622                                                       fl->count() * fl->size());
2623       }
2624     }
2625     res = fl->get_chunk_at_head();
2626     assert(res != NULL, "Why was count non-zero?");
2627   }
2628   res->markNotFree();
2629   assert(!res->is_free(), "shouldn't be marked free");
2630   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2631   // mangle a just allocated object with a distinct pattern.
2632   debug_only(res->mangleAllocated(word_sz));
2633   return (HeapWord*)res;
2634 }
2635 
2636 // Get a chunk of blocks of the right size and update related
2637 // book-keeping stats
2638 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2639   // Get the #blocks we want to claim
2640   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2641   assert(n_blks > 0, "Error");
2642   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2643   // In some cases, when the application has a phase change,
2644   // there may be a sudden and sharp shift in the object survival
2645   // profile, and updating the counts at the end of a scavenge
2646   // may not be quick enough, giving rise to large scavenge pauses
2647   // during these phase changes. It is beneficial to detect such
2648   // changes on-the-fly during a scavenge and avoid such a phase-change
2649   // pothole. The following code is a heuristic attempt to do that.
2650   // It is protected by a product flag until we have gained
2651   // enough experience with this heuristic and fine-tuned its behavior.
2652   // WARNING: This might increase fragmentation if we overreact to
2653   // small spikes, so some kind of historical smoothing based on
2654   // previous experience with the greater reactivity might be useful.
2655   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2656   // default.
2657   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2658     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2659     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2660     n_blks = MIN2(n_blks, CMSOldPLABMax);
2661   }
2662   assert(n_blks > 0, "Error");
2663   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2664   // Update stats table entry for this block size
2665   _num_blocks[word_sz] += fl->count();
2666 }
2667 
2668 void CFLS_LAB::compute_desired_plab_size() {
2669   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2670        i < CompactibleFreeListSpace::IndexSetSize;
2671        i += CompactibleFreeListSpace::IndexSetStride) {
2672     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2673            "Counter inconsistency");
2674     if (_global_num_workers[i] > 0) {
2675       // Need to smooth wrt historical average
2676       if (ResizeOldPLAB) {
2677         _blocks_to_claim[i].sample(
2678           MAX2((size_t)CMSOldPLABMin,
2679           MIN2((size_t)CMSOldPLABMax,
2680                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2681       }
2682       // Reset counters for next round
2683       _global_num_workers[i] = 0;
2684       _global_num_blocks[i] = 0;
2685       if (PrintOldPLAB) {
2686         gclog_or_tty->print_cr("[" SIZE_FORMAT "]: " SIZE_FORMAT,
2687                                i, (size_t)_blocks_to_claim[i].average());
2688       }
2689     }
2690   }
2691 }
2692 
2693 // If this is changed in the future to allow parallel
2694 // access, one would need to take the FL locks and,
2695 // depending on how it is used, stagger access from
2696 // parallel threads to reduce contention.
2697 void CFLS_LAB::retire(int tid) {
2698   // We run this single threaded with the world stopped;
2699   // so no need for locks and such.
2700   NOT_PRODUCT(Thread* t = Thread::current();)
2701   assert(Thread::current()->is_VM_thread(), "Error");
2702   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2703        i < CompactibleFreeListSpace::IndexSetSize;
2704        i += CompactibleFreeListSpace::IndexSetStride) {
2705     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2706            "Can't retire more than what we obtained");
2707     if (_num_blocks[i] > 0) {
2708       size_t num_retire =  _indexedFreeList[i].count();
2709       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2710       {
2711         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2712         //                Mutex::_no_safepoint_check_flag);
2713 
2714         // Update globals stats for num_blocks used
2715         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2716         _global_num_workers[i]++;
2717         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2718         if (num_retire > 0) {
2719           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2720           // Reset this list.
2721           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2722           _indexedFreeList[i].set_size(i);
2723         }
2724       }
2725       if (PrintOldPLAB) {
2726         gclog_or_tty->print_cr("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2727                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2728       }
2729       // Reset stats for next round
2730       _num_blocks[i]         = 0;
2731     }
2732   }
2733 }
2734 
2735 // Used by par_get_chunk_of_blocks() for the chunks from the
2736 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2737 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2738 // to the free list "fl".  "n" is the maximum number of chunks to
2739 // be added to "fl".
2740 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2741 
2742   // We'll try all multiples of word_sz in the indexed set, starting with
2743   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2744   // then try getting a big chunk and splitting it.
2745   {
2746     bool found;
2747     int  k;
2748     size_t cur_sz;
2749     for (k = 1, cur_sz = k * word_sz, found = false;
2750          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2751          (CMSSplitIndexedFreeListBlocks || k <= 1);
2752          k++, cur_sz = k * word_sz) {
2753       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2754       fl_for_cur_sz.set_size(cur_sz);
2755       {
2756         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2757                         Mutex::_no_safepoint_check_flag);
2758         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2759         if (gfl->count() != 0) {
2760           // nn is the number of chunks of size cur_sz that
2761           // we'd need to split k-ways each, in order to create
2762           // "n" chunks of size word_sz each.
2763           const size_t nn = MAX2(n/k, (size_t)1);
2764           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2765           found = true;
2766           if (k > 1) {
2767             // Update split death stats for the cur_sz-size blocks list:
2768             // we increment the split death count by the number of blocks
2769             // we just took from the cur_sz-size blocks list and which
2770             // we will be splitting below.
2771             ssize_t deaths = gfl->split_deaths() +
2772                              fl_for_cur_sz.count();
2773             gfl->set_split_deaths(deaths);
2774           }
2775         }
2776       }
2777       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2778       if (found) {
2779         if (k == 1) {
2780           fl->prepend(&fl_for_cur_sz);
2781         } else {
2782           // Divide each block on fl_for_cur_sz up k ways.
2783           FreeChunk* fc;
2784           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2785             // Must do this in reverse order, so that anybody attempting to
2786             // access the main chunk sees it as a single free block until we
2787             // change it.
2788             size_t fc_size = fc->size();
2789             assert(fc->is_free(), "Error");
2790             for (int i = k-1; i >= 0; i--) {
2791               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2792               assert((i != 0) ||
2793                         ((fc == ffc) && ffc->is_free() &&
2794                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2795                         "Counting error");
2796               ffc->set_size(word_sz);
2797               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2798               ffc->link_next(NULL);
2799               // Above must occur before BOT is updated below.
2800               OrderAccess::storestore();
2801               // splitting from the right, fc_size == i * word_sz
2802               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2803               fc_size -= word_sz;
2804               assert(fc_size == i*word_sz, "Error");
2805               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2806               _bt.verify_single_block((HeapWord*)fc, fc_size);
2807               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2808               // Push this on "fl".
2809               fl->return_chunk_at_head(ffc);
2810             }
2811             // TRAP
2812             assert(fl->tail()->next() == NULL, "List invariant.");
2813           }
2814         }
2815         // Update birth stats for this block size.
2816         size_t num = fl->count();
2817         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2818                         Mutex::_no_safepoint_check_flag);
2819         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2820         _indexedFreeList[word_sz].set_split_births(births);
2821         return true;
2822       }
2823     }
2824     return found;
2825   }
2826 }
2827 
2828 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2829 
2830   FreeChunk* fc = NULL;
2831   FreeChunk* rem_fc = NULL;
2832   size_t rem;
2833   {
2834     MutexLockerEx x(parDictionaryAllocLock(),
2835                     Mutex::_no_safepoint_check_flag);
2836     while (n > 0) {
2837       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2838                                   FreeBlockDictionary<FreeChunk>::atLeast);
2839       if (fc != NULL) {
2840         break;
2841       } else {
2842         n--;
2843       }
2844     }
2845     if (fc == NULL) return NULL;
2846     // Otherwise, split up that block.
2847     assert((ssize_t)n >= 1, "Control point invariant");
2848     assert(fc->is_free(), "Error: should be a free block");
2849     _bt.verify_single_block((HeapWord*)fc, fc->size());
2850     const size_t nn = fc->size() / word_sz;
2851     n = MIN2(nn, n);
2852     assert((ssize_t)n >= 1, "Control point invariant");
2853     rem = fc->size() - n * word_sz;
2854     // If there is a remainder, and it's too small, allocate one fewer.
2855     if (rem > 0 && rem < MinChunkSize) {
2856       n--; rem += word_sz;
2857     }
2858     // Note that at this point we may have n == 0.
2859     assert((ssize_t)n >= 0, "Control point invariant");
2860 
2861     // If n is 0, the chunk fc that was found is not large
2862     // enough to leave a viable remainder.  We are unable to
2863     // allocate even one block.  Return fc to the
2864     // dictionary and return, leaving "fl" empty.
2865     if (n == 0) {
2866       returnChunkToDictionary(fc);
2867       return NULL;
2868     }
2869 
2870     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2871     dictionary()->dict_census_update(fc->size(),
2872                                      true /*split*/,
2873                                      false /*birth*/);
2874 
2875     // First return the remainder, if any.
2876     // Note that we hold the lock until we decide if we're going to give
2877     // back the remainder to the dictionary, since a concurrent allocation
2878     // may otherwise see the heap as empty.  (We're willing to take that
2879     // hit if the block is a small block.)
2880     if (rem > 0) {
2881       size_t prefix_size = n * word_sz;
2882       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2883       rem_fc->set_size(rem);
2884       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2885       rem_fc->link_next(NULL);
2886       // Above must occur before BOT is updated below.
2887       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2888       OrderAccess::storestore();
2889       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2890       assert(fc->is_free(), "Error");
2891       fc->set_size(prefix_size);
2892       if (rem >= IndexSetSize) {
2893         returnChunkToDictionary(rem_fc);
2894         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2895         rem_fc = NULL;
2896       }
2897       // Otherwise, return it to the small list below.
2898     }
2899   }
2900   if (rem_fc != NULL) {
2901     MutexLockerEx x(_indexedFreeListParLocks[rem],
2902                     Mutex::_no_safepoint_check_flag);
2903     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2904     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2905     smallSplitBirth(rem);
2906   }
2907   assert(n * word_sz == fc->size(),
2908     err_msg("Chunk size " SIZE_FORMAT " is not exactly splittable by "
2909     SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2910     fc->size(), n, word_sz));
2911   return fc;
2912 }
2913 
2914 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2915 
2916   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2917 
2918   if (fc == NULL) {
2919     return;
2920   }
2921 
2922   size_t n = fc->size() / word_sz;
2923 
2924   assert((ssize_t)n > 0, "Consistency");
2925   // Now do the splitting up.
2926   // Must do this in reverse order, so that anybody attempting to
2927   // access the main chunk sees it as a single free block until we
2928   // change it.
2929   size_t fc_size = n * word_sz;
2930   // All but first chunk in this loop
2931   for (ssize_t i = n-1; i > 0; i--) {
2932     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2933     ffc->set_size(word_sz);
2934     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2935     ffc->link_next(NULL);
2936     // Above must occur before BOT is updated below.
2937     OrderAccess::storestore();
2938     // splitting from the right, fc_size == (n - i + 1) * wordsize
2939     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2940     fc_size -= word_sz;
2941     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2942     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2943     _bt.verify_single_block((HeapWord*)fc, fc_size);
2944     // Push this on "fl".
2945     fl->return_chunk_at_head(ffc);
2946   }
2947   // First chunk
2948   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2949   // The blocks above should show their new sizes before the first block below
2950   fc->set_size(word_sz);
2951   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2952   fc->link_next(NULL);
2953   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2954   _bt.verify_single_block((HeapWord*)fc, fc->size());
2955   fl->return_chunk_at_head(fc);
2956 
2957   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2958   {
2959     // Update the stats for this block size.
2960     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2961                     Mutex::_no_safepoint_check_flag);
2962     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2963     _indexedFreeList[word_sz].set_split_births(births);
2964     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2965     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2966   }
2967 
2968   // TRAP
2969   assert(fl->tail()->next() == NULL, "List invariant.");
2970 }
2971 
2972 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2973   assert(fl->count() == 0, "Precondition.");
2974   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2975          "Precondition");
2976 
2977   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2978     // Got it
2979     return;
2980   }
2981 
2982   // Otherwise, we'll split a block from the dictionary.
2983   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2984 }
2985 
2986 // Set up the space's par_seq_tasks structure for work claiming
2987 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2988 // XXX Need to suitably abstract and generalize this and the next
2989 // method into one.
2990 void
2991 CompactibleFreeListSpace::
2992 initialize_sequential_subtasks_for_rescan(int n_threads) {
2993   // The "size" of each task is fixed according to rescan_task_size.
2994   assert(n_threads > 0, "Unexpected n_threads argument");
2995   const size_t task_size = rescan_task_size();
2996   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2997   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2998   assert(n_tasks == 0 ||
2999          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
3000           (used_region().start() + n_tasks*task_size >= used_region().end())),
3001          "n_tasks calculation incorrect");
3002   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3003   assert(!pst->valid(), "Clobbering existing data?");
3004   // Sets the condition for completion of the subtask (how many threads
3005   // need to finish in order to be done).
3006   pst->set_n_threads(n_threads);
3007   pst->set_n_tasks((int)n_tasks);
3008 }
3009 
3010 // Set up the space's par_seq_tasks structure for work claiming
3011 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
3012 void
3013 CompactibleFreeListSpace::
3014 initialize_sequential_subtasks_for_marking(int n_threads,
3015                                            HeapWord* low) {
3016   // The "size" of each task is fixed according to rescan_task_size.
3017   assert(n_threads > 0, "Unexpected n_threads argument");
3018   const size_t task_size = marking_task_size();
3019   assert(task_size > CardTableModRefBS::card_size_in_words &&
3020          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3021          "Otherwise arithmetic below would be incorrect");
3022   MemRegion span = _gen->reserved();
3023   if (low != NULL) {
3024     if (span.contains(low)) {
3025       // Align low down to  a card boundary so that
3026       // we can use block_offset_careful() on span boundaries.
3027       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3028                                  CardTableModRefBS::card_size);
3029       // Clip span prefix at aligned_low
3030       span = span.intersection(MemRegion(aligned_low, span.end()));
3031     } else if (low > span.end()) {
3032       span = MemRegion(low, low);  // Null region
3033     } // else use entire span
3034   }
3035   assert(span.is_empty() ||
3036          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3037         "span should start at a card boundary");
3038   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3039   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3040   assert(n_tasks == 0 ||
3041          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3042           (span.start() + n_tasks*task_size >= span.end())),
3043          "n_tasks calculation incorrect");
3044   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3045   assert(!pst->valid(), "Clobbering existing data?");
3046   // Sets the condition for completion of the subtask (how many threads
3047   // need to finish in order to be done).
3048   pst->set_n_threads(n_threads);
3049   pst->set_n_tasks((int)n_tasks);
3050 }