1 /*
   2  * Copyright (c) 2001, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc_implementation/concurrentMarkSweep/cmsLockVerifier.hpp"
  27 #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp"
  28 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc_implementation/concurrentMarkSweep/concurrentMarkSweepThread.hpp"
  30 #include "gc_implementation/shared/liveRange.hpp"
  31 #include "gc_implementation/shared/spaceDecorator.hpp"
  32 #include "gc_interface/collectedHeap.inline.hpp"
  33 #include "memory/allocation.inline.hpp"
  34 #include "memory/blockOffsetTable.inline.hpp"
  35 #include "memory/resourceArea.hpp"
  36 #include "memory/universe.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "runtime/globals.hpp"
  39 #include "runtime/handles.inline.hpp"
  40 #include "runtime/init.hpp"
  41 #include "runtime/java.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "utilities/copy.hpp"
  44 
  45 /////////////////////////////////////////////////////////////////////////
  46 //// CompactibleFreeListSpace
  47 /////////////////////////////////////////////////////////////////////////
  48 
  49 // highest ranked  free list lock rank
  50 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  51 
  52 // Defaults are 0 so things will break badly if incorrectly initialized.
  53 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  54 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  55 
  56 size_t MinChunkSize = 0;
  57 
  58 void CompactibleFreeListSpace::set_cms_values() {
  59   // Set CMS global values
  60   assert(MinChunkSize == 0, "already set");
  61 
  62   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  63   // for chunks to contain a FreeChunk.
  64   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  65   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  66 
  67   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  68   IndexSetStart  = MinChunkSize;
  69   IndexSetStride = MinObjAlignment;
  70 }
  71 
  72 // Constructor
  73 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  74   MemRegion mr, bool use_adaptive_freelists,
  75   FreeBlockDictionary<FreeChunk>::DictionaryChoice dictionaryChoice) :
  76   _dictionaryChoice(dictionaryChoice),
  77   _adaptive_freelists(use_adaptive_freelists),
  78   _bt(bs, mr),
  79   // free list locks are in the range of values taken by _lockRank
  80   // This range currently is [_leaf+2, _leaf+3]
  81   // Note: this requires that CFLspace c'tors
  82   // are called serially in the order in which the locks are
  83   // are acquired in the program text. This is true today.
  84   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true),
  87   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  88                     CMSRescanMultiple),
  89   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  90                     CMSConcMarkMultiple),
  91   _collector(NULL)
  92 {
  93   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  94          "FreeChunk is larger than expected");
  95   _bt.set_space(this);
  96   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  97   // We have all of "mr", all of which we place in the dictionary
  98   // as one big chunk. We'll need to decide here which of several
  99   // possible alternative dictionary implementations to use. For
 100   // now the choice is easy, since we have only one working
 101   // implementation, namely, the simple binary tree (splaying
 102   // temporarily disabled).
 103   switch (dictionaryChoice) {
 104     case FreeBlockDictionary<FreeChunk>::dictionaryBinaryTree:
 105       _dictionary = new BinaryTreeDictionary<FreeChunk, AdaptiveFreeList>(mr);
 106       break;
 107     case FreeBlockDictionary<FreeChunk>::dictionarySplayTree:
 108     case FreeBlockDictionary<FreeChunk>::dictionarySkipList:
 109     default:
 110       warning("dictionaryChoice: selected option not understood; using"
 111               " default BinaryTreeDictionary implementation instead.");
 112   }
 113   assert(_dictionary != NULL, "CMS dictionary initialization");
 114   // The indexed free lists are initially all empty and are lazily
 115   // filled in on demand. Initialize the array elements to NULL.
 116   initializeIndexedFreeListArray();
 117 
 118   // Not using adaptive free lists assumes that allocation is first
 119   // from the linAB's.  Also a cms perm gen which can be compacted
 120   // has to have the klass's klassKlass allocated at a lower
 121   // address in the heap than the klass so that the klassKlass is
 122   // moved to its new location before the klass is moved.
 123   // Set the _refillSize for the linear allocation blocks
 124   if (!use_adaptive_freelists) {
 125     FreeChunk* fc = _dictionary->get_chunk(mr.word_size());
 126     // The small linAB initially has all the space and will allocate
 127     // a chunk of any size.
 128     HeapWord* addr = (HeapWord*) fc;
 129     _smallLinearAllocBlock.set(addr, fc->size() ,
 130       1024*SmallForLinearAlloc, fc->size());
 131     // Note that _unallocated_block is not updated here.
 132     // Allocations from the linear allocation block should
 133     // update it.
 134   } else {
 135     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 136                                SmallForLinearAlloc);
 137   }
 138   // CMSIndexedFreeListReplenish should be at least 1
 139   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 140   _promoInfo.setSpace(this);
 141   if (UseCMSBestFit) {
 142     _fitStrategy = FreeBlockBestFitFirst;
 143   } else {
 144     _fitStrategy = FreeBlockStrategyNone;
 145   }
 146   check_free_list_consistency();
 147 
 148   // Initialize locks for parallel case.
 149 
 150   if (CollectedHeap::use_parallel_gc_threads()) {
 151     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 152       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 153                                               "a freelist par lock",
 154                                               true);
 155       if (_indexedFreeListParLocks[i] == NULL)
 156         vm_exit_during_initialization("Could not allocate a par lock");
 157       DEBUG_ONLY(
 158         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 159       )
 160     }
 161     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 162   }
 163 }
 164 
 165 // Like CompactibleSpace forward() but always calls cross_threshold() to
 166 // update the block offset table.  Removed initialize_threshold call because
 167 // CFLS does not use a block offset array for contiguous spaces.
 168 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 169                                     CompactPoint* cp, HeapWord* compact_top) {
 170   // q is alive
 171   // First check if we should switch compaction space
 172   assert(this == cp->space, "'this' should be current compaction space.");
 173   size_t compaction_max_size = pointer_delta(end(), compact_top);
 174   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 175     "virtual adjustObjectSize_v() method is not correct");
 176   size_t adjusted_size = adjustObjectSize(size);
 177   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 178          "no small fragments allowed");
 179   assert(minimum_free_block_size() == MinChunkSize,
 180          "for de-virtualized reference below");
 181   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 182   if (adjusted_size + MinChunkSize > compaction_max_size &&
 183       adjusted_size != compaction_max_size) {
 184     do {
 185       // switch to next compaction space
 186       cp->space->set_compaction_top(compact_top);
 187       cp->space = cp->space->next_compaction_space();
 188       if (cp->space == NULL) {
 189         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 190         assert(cp->gen != NULL, "compaction must succeed");
 191         cp->space = cp->gen->first_compaction_space();
 192         assert(cp->space != NULL, "generation must have a first compaction space");
 193       }
 194       compact_top = cp->space->bottom();
 195       cp->space->set_compaction_top(compact_top);
 196       // The correct adjusted_size may not be the same as that for this method
 197       // (i.e., cp->space may no longer be "this" so adjust the size again.
 198       // Use the virtual method which is not used above to save the virtual
 199       // dispatch.
 200       adjusted_size = cp->space->adjust_object_size_v(size);
 201       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 202       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 203     } while (adjusted_size > compaction_max_size);
 204   }
 205 
 206   // store the forwarding pointer into the mark word
 207   if ((HeapWord*)q != compact_top) {
 208     q->forward_to(oop(compact_top));
 209     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 210   } else {
 211     // if the object isn't moving we can just set the mark to the default
 212     // mark and handle it specially later on.
 213     q->init_mark();
 214     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 215   }
 216 
 217   compact_top += adjusted_size;
 218 
 219   // we need to update the offset table so that the beginnings of objects can be
 220   // found during scavenge.  Note that we are updating the offset table based on
 221   // where the object will be once the compaction phase finishes.
 222 
 223   // Always call cross_threshold().  A contiguous space can only call it when
 224   // the compaction_top exceeds the current threshold but not for an
 225   // non-contiguous space.
 226   cp->threshold =
 227     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 228   return compact_top;
 229 }
 230 
 231 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 232 // and use of single_block instead of alloc_block.  The name here is not really
 233 // appropriate - maybe a more general name could be invented for both the
 234 // contiguous and noncontiguous spaces.
 235 
 236 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 237   _bt.single_block(start, the_end);
 238   return end();
 239 }
 240 
 241 // Initialize them to NULL.
 242 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 243   for (size_t i = 0; i < IndexSetSize; i++) {
 244     // Note that on platforms where objects are double word aligned,
 245     // the odd array elements are not used.  It is convenient, however,
 246     // to map directly from the object size to the array element.
 247     _indexedFreeList[i].reset(IndexSetSize);
 248     _indexedFreeList[i].set_size(i);
 249     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 250     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 251     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 252     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 253   }
 254 }
 255 
 256 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 257   for (size_t i = 1; i < IndexSetSize; i++) {
 258     assert(_indexedFreeList[i].size() == (size_t) i,
 259       "Indexed free list sizes are incorrect");
 260     _indexedFreeList[i].reset(IndexSetSize);
 261     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 262     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 263     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 264     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 265   }
 266 }
 267 
 268 void CompactibleFreeListSpace::reset(MemRegion mr) {
 269   resetIndexedFreeListArray();
 270   dictionary()->reset();
 271   if (BlockOffsetArrayUseUnallocatedBlock) {
 272     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 273     // Everything's allocated until proven otherwise.
 274     _bt.set_unallocated_block(end());
 275   }
 276   if (!mr.is_empty()) {
 277     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 278     _bt.single_block(mr.start(), mr.word_size());
 279     FreeChunk* fc = (FreeChunk*) mr.start();
 280     fc->set_size(mr.word_size());
 281     if (mr.word_size() >= IndexSetSize ) {
 282       returnChunkToDictionary(fc, true);
 283     } else {
 284       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 285       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc, true, true);
 286     }
 287   }
 288   _promoInfo.reset();
 289   _smallLinearAllocBlock._ptr = NULL;
 290   _smallLinearAllocBlock._word_size = 0;
 291 }
 292 
 293 void CompactibleFreeListSpace::reset_after_compaction() {
 294   // Reset the space to the new reality - one free chunk.
 295   MemRegion mr(compaction_top(), end());
 296   reset(mr);
 297   // Now refill the linear allocation block(s) if possible.
 298   if (_adaptive_freelists) {
 299     refillLinearAllocBlocksIfNeeded();
 300   } else {
 301     // Place as much of mr in the linAB as we can get,
 302     // provided it was big enough to go into the dictionary.
 303     FreeChunk* fc = dictionary()->find_largest_dict();
 304     if (fc != NULL) {
 305       assert(fc->size() == mr.word_size(),
 306              "Why was the chunk broken up?");
 307       removeChunkFromDictionary(fc);
 308       HeapWord* addr = (HeapWord*) fc;
 309       _smallLinearAllocBlock.set(addr, fc->size() ,
 310         1024*SmallForLinearAlloc, fc->size());
 311       // Note that _unallocated_block is not updated here.
 312     }
 313   }
 314 }
 315 
 316 // Walks the entire dictionary, returning a coterminal
 317 // chunk, if it exists. Use with caution since it involves
 318 // a potentially complete walk of a potentially large tree.
 319 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 320 
 321   assert_lock_strong(&_freelistLock);
 322 
 323   return dictionary()->find_chunk_ends_at(end());
 324 }
 325 
 326 
 327 #ifndef PRODUCT
 328 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 329   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 330     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 331   }
 332 }
 333 
 334 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 335   size_t sum = 0;
 336   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 337     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 338   }
 339   return sum;
 340 }
 341 
 342 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 343   size_t count = 0;
 344   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 345     debug_only(
 346       ssize_t total_list_count = 0;
 347       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 348          fc = fc->next()) {
 349         total_list_count++;
 350       }
 351       assert(total_list_count ==  _indexedFreeList[i].count(),
 352         "Count in list is incorrect");
 353     )
 354     count += _indexedFreeList[i].count();
 355   }
 356   return count;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::totalCount() {
 360   size_t num = totalCountInIndexedFreeLists();
 361   num +=  dictionary()->total_count();
 362   if (_smallLinearAllocBlock._word_size != 0) {
 363     num++;
 364   }
 365   return num;
 366 }
 367 #endif
 368 
 369 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 370   FreeChunk* fc = (FreeChunk*) p;
 371   return fc->is_free();
 372 }
 373 
 374 size_t CompactibleFreeListSpace::used() const {
 375   return capacity() - free();
 376 }
 377 
 378 size_t CompactibleFreeListSpace::free() const {
 379   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 380   // if you do this while the structures are in flux you
 381   // may get an approximate answer only; for instance
 382   // because there is concurrent allocation either
 383   // directly by mutators or for promotion during a GC.
 384   // It's "MT-safe", however, in the sense that you are guaranteed
 385   // not to crash and burn, for instance, because of walking
 386   // pointers that could disappear as you were walking them.
 387   // The approximation is because the various components
 388   // that are read below are not read atomically (and
 389   // further the computation of totalSizeInIndexedFreeLists()
 390   // is itself a non-atomic computation. The normal use of
 391   // this is during a resize operation at the end of GC
 392   // and at that time you are guaranteed to get the
 393   // correct actual value. However, for instance, this is
 394   // also read completely asynchronously by the "perf-sampler"
 395   // that supports jvmstat, and you are apt to see the values
 396   // flicker in such cases.
 397   assert(_dictionary != NULL, "No _dictionary?");
 398   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 399           totalSizeInIndexedFreeLists() +
 400           _smallLinearAllocBlock._word_size) * HeapWordSize;
 401 }
 402 
 403 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 404   assert(_dictionary != NULL, "No _dictionary?");
 405   assert_locked();
 406   size_t res = _dictionary->max_chunk_size();
 407   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 408                        (size_t) SmallForLinearAlloc - 1));
 409   // XXX the following could potentially be pretty slow;
 410   // should one, pesimally for the rare cases when res
 411   // caclulated above is less than IndexSetSize,
 412   // just return res calculated above? My reasoning was that
 413   // those cases will be so rare that the extra time spent doesn't
 414   // really matter....
 415   // Note: do not change the loop test i >= res + IndexSetStride
 416   // to i > res below, because i is unsigned and res may be zero.
 417   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 418        i -= IndexSetStride) {
 419     if (_indexedFreeList[i].head() != NULL) {
 420       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 421       return i;
 422     }
 423   }
 424   return res;
 425 }
 426 
 427 void LinearAllocBlock::print_on(outputStream* st) const {
 428   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 429             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 430             _ptr, _word_size, _refillSize, _allocation_size_limit);
 431 }
 432 
 433 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 434   st->print_cr("COMPACTIBLE FREELIST SPACE");
 435   st->print_cr(" Space:");
 436   Space::print_on(st);
 437 
 438   st->print_cr("promoInfo:");
 439   _promoInfo.print_on(st);
 440 
 441   st->print_cr("_smallLinearAllocBlock");
 442   _smallLinearAllocBlock.print_on(st);
 443 
 444   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 445 
 446   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 447                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 448 }
 449 
 450 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 451 const {
 452   reportIndexedFreeListStatistics();
 453   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 454   gclog_or_tty->print_cr("---------------------------");
 455   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 456   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 457     _indexedFreeList[i].print_on(gclog_or_tty);
 458     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 459          fc = fc->next()) {
 460       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 461                           fc, (HeapWord*)fc + i,
 462                           fc->cantCoalesce() ? "\t CC" : "");
 463     }
 464   }
 465 }
 466 
 467 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 468 const {
 469   _promoInfo.print_on(st);
 470 }
 471 
 472 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 473 const {
 474   _dictionary->report_statistics();
 475   st->print_cr("Layout of Freelists in Tree");
 476   st->print_cr("---------------------------");
 477   _dictionary->print_free_lists(st);
 478 }
 479 
 480 class BlkPrintingClosure: public BlkClosure {
 481   const CMSCollector*             _collector;
 482   const CompactibleFreeListSpace* _sp;
 483   const CMSBitMap*                _live_bit_map;
 484   const bool                      _post_remark;
 485   outputStream*                   _st;
 486 public:
 487   BlkPrintingClosure(const CMSCollector* collector,
 488                      const CompactibleFreeListSpace* sp,
 489                      const CMSBitMap* live_bit_map,
 490                      outputStream* st):
 491     _collector(collector),
 492     _sp(sp),
 493     _live_bit_map(live_bit_map),
 494     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 495     _st(st) { }
 496   size_t do_blk(HeapWord* addr);
 497 };
 498 
 499 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 500   size_t sz = _sp->block_size_no_stall(addr, _collector);
 501   assert(sz != 0, "Should always be able to compute a size");
 502   if (_sp->block_is_obj(addr)) {
 503     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 504     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 505       addr,
 506       dead ? "dead" : "live",
 507       sz,
 508       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 509     if (CMSPrintObjectsInDump && !dead) {
 510       oop(addr)->print_on(_st);
 511       _st->print_cr("--------------------------------------");
 512     }
 513   } else { // free block
 514     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 515       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 516     if (CMSPrintChunksInDump) {
 517       ((FreeChunk*)addr)->print_on(_st);
 518       _st->print_cr("--------------------------------------");
 519     }
 520   }
 521   return sz;
 522 }
 523 
 524 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 525   outputStream* st) {
 526   st->print_cr("\n=========================");
 527   st->print_cr("Block layout in CMS Heap:");
 528   st->print_cr("=========================");
 529   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 530   blk_iterate(&bpcl);
 531 
 532   st->print_cr("\n=======================================");
 533   st->print_cr("Order & Layout of Promotion Info Blocks");
 534   st->print_cr("=======================================");
 535   print_promo_info_blocks(st);
 536 
 537   st->print_cr("\n===========================");
 538   st->print_cr("Order of Indexed Free Lists");
 539   st->print_cr("=========================");
 540   print_indexed_free_lists(st);
 541 
 542   st->print_cr("\n=================================");
 543   st->print_cr("Order of Free Lists in Dictionary");
 544   st->print_cr("=================================");
 545   print_dictionary_free_lists(st);
 546 }
 547 
 548 
 549 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 550   assert_lock_strong(&_freelistLock);
 551   assert(PrintFLSStatistics != 0, "Reporting error");
 552   _dictionary->report_statistics();
 553   if (PrintFLSStatistics > 1) {
 554     reportIndexedFreeListStatistics();
 555     size_t total_size = totalSizeInIndexedFreeLists() +
 556                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 557     gclog_or_tty->print(" free=%ld frag=%1.4f\n", total_size, flsFrag());
 558   }
 559 }
 560 
 561 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 562   assert_lock_strong(&_freelistLock);
 563   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 564                       "--------------------------------\n");
 565   size_t total_size = totalSizeInIndexedFreeLists();
 566   size_t   free_blocks = numFreeBlocksInIndexedFreeLists();
 567   gclog_or_tty->print("Total Free Space: %d\n", total_size);
 568   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 569   gclog_or_tty->print("Number of Blocks: %d\n", free_blocks);
 570   if (free_blocks != 0) {
 571     gclog_or_tty->print("Av.  Block  Size: %d\n", total_size/free_blocks);
 572   }
 573 }
 574 
 575 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 576   size_t res = 0;
 577   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 578     debug_only(
 579       ssize_t recount = 0;
 580       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 581          fc = fc->next()) {
 582         recount += 1;
 583       }
 584       assert(recount == _indexedFreeList[i].count(),
 585         "Incorrect count in list");
 586     )
 587     res += _indexedFreeList[i].count();
 588   }
 589   return res;
 590 }
 591 
 592 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 593   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 594     if (_indexedFreeList[i].head() != NULL) {
 595       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 596       return (size_t)i;
 597     }
 598   }
 599   return 0;
 600 }
 601 
 602 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 603   HeapWord* prevEnd = end();
 604   assert(prevEnd != value, "unnecessary set_end call");
 605   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 606         "New end is below unallocated block");
 607   _end = value;
 608   if (prevEnd != NULL) {
 609     // Resize the underlying block offset table.
 610     _bt.resize(pointer_delta(value, bottom()));
 611     if (value <= prevEnd) {
 612       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 613              "New end is below unallocated block");
 614     } else {
 615       // Now, take this new chunk and add it to the free blocks.
 616       // Note that the BOT has not yet been updated for this block.
 617       size_t newFcSize = pointer_delta(value, prevEnd);
 618       // XXX This is REALLY UGLY and should be fixed up. XXX
 619       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 620         // Mark the boundary of the new block in BOT
 621         _bt.mark_block(prevEnd, value);
 622         // put it all in the linAB
 623         if (ParallelGCThreads == 0) {
 624           _smallLinearAllocBlock._ptr = prevEnd;
 625           _smallLinearAllocBlock._word_size = newFcSize;
 626           repairLinearAllocBlock(&_smallLinearAllocBlock);
 627         } else { // ParallelGCThreads > 0
 628           MutexLockerEx x(parDictionaryAllocLock(),
 629                           Mutex::_no_safepoint_check_flag);
 630           _smallLinearAllocBlock._ptr = prevEnd;
 631           _smallLinearAllocBlock._word_size = newFcSize;
 632           repairLinearAllocBlock(&_smallLinearAllocBlock);
 633         }
 634         // Births of chunks put into a LinAB are not recorded.  Births
 635         // of chunks as they are allocated out of a LinAB are.
 636       } else {
 637         // Add the block to the free lists, if possible coalescing it
 638         // with the last free block, and update the BOT and census data.
 639         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 640       }
 641     }
 642   }
 643 }
 644 
 645 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 646   CompactibleFreeListSpace* _cfls;
 647   CMSCollector* _collector;
 648 protected:
 649   // Override.
 650 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 651   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 652                                        HeapWord* bottom, HeapWord* top, \
 653                                        ClosureType* cl);                \
 654       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 655                                        HeapWord* bottom, HeapWord* top, \
 656                                        ClosureType* cl);                \
 657     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 658                                        HeapWord* bottom, HeapWord* top, \
 659                                        ClosureType* cl)
 660   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 661   walk_mem_region_with_cl_DECL(FilteringClosure);
 662 
 663 public:
 664   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 665                       CMSCollector* collector,
 666                       ExtendedOopClosure* cl,
 667                       CardTableModRefBS::PrecisionStyle precision,
 668                       HeapWord* boundary) :
 669     Filtering_DCTOC(sp, cl, precision, boundary),
 670     _cfls(sp), _collector(collector) {}
 671 };
 672 
 673 // We de-virtualize the block-related calls below, since we know that our
 674 // space is a CompactibleFreeListSpace.
 675 
 676 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 677 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 678                                                  HeapWord* bottom,              \
 679                                                  HeapWord* top,                 \
 680                                                  ClosureType* cl) {             \
 681    bool is_par = SharedHeap::heap()->n_par_threads() > 0;                       \
 682    if (is_par) {                                                                \
 683      assert(SharedHeap::heap()->n_par_threads() ==                              \
 684             SharedHeap::heap()->workers()->active_workers(), "Mismatch");       \
 685      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 686    } else {                                                                     \
 687      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 688    }                                                                            \
 689 }                                                                               \
 690 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 691                                                       HeapWord* bottom,         \
 692                                                       HeapWord* top,            \
 693                                                       ClosureType* cl) {        \
 694   /* Skip parts that are before "mr", in case "block_start" sent us             \
 695      back too far. */                                                           \
 696   HeapWord* mr_start = mr.start();                                              \
 697   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 698   HeapWord* next = bottom + bot_size;                                           \
 699   while (next < mr_start) {                                                     \
 700     bottom = next;                                                              \
 701     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 702     next = bottom + bot_size;                                                   \
 703   }                                                                             \
 704                                                                                 \
 705   while (bottom < top) {                                                        \
 706     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 707         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 708                     oop(bottom)) &&                                             \
 709         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 710       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 711       bottom += _cfls->adjustObjectSize(word_sz);                               \
 712     } else {                                                                    \
 713       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 714     }                                                                           \
 715   }                                                                             \
 716 }                                                                               \
 717 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 718                                                         HeapWord* bottom,       \
 719                                                         HeapWord* top,          \
 720                                                         ClosureType* cl) {      \
 721   /* Skip parts that are before "mr", in case "block_start" sent us             \
 722      back too far. */                                                           \
 723   HeapWord* mr_start = mr.start();                                              \
 724   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 725   HeapWord* next = bottom + bot_size;                                           \
 726   while (next < mr_start) {                                                     \
 727     bottom = next;                                                              \
 728     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 729     next = bottom + bot_size;                                                   \
 730   }                                                                             \
 731                                                                                 \
 732   while (bottom < top) {                                                        \
 733     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 734         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 735                     oop(bottom)) &&                                             \
 736         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 737       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 738       bottom += _cfls->adjustObjectSize(word_sz);                               \
 739     } else {                                                                    \
 740       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 741     }                                                                           \
 742   }                                                                             \
 743 }
 744 
 745 // (There are only two of these, rather than N, because the split is due
 746 // only to the introduction of the FilteringClosure, a local part of the
 747 // impl of this abstraction.)
 748 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 749 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 750 
 751 DirtyCardToOopClosure*
 752 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 753                                       CardTableModRefBS::PrecisionStyle precision,
 754                                       HeapWord* boundary) {
 755   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 756 }
 757 
 758 
 759 // Note on locking for the space iteration functions:
 760 // since the collector's iteration activities are concurrent with
 761 // allocation activities by mutators, absent a suitable mutual exclusion
 762 // mechanism the iterators may go awry. For instace a block being iterated
 763 // may suddenly be allocated or divided up and part of it allocated and
 764 // so on.
 765 
 766 // Apply the given closure to each block in the space.
 767 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 768   assert_lock_strong(freelistLock());
 769   HeapWord *cur, *limit;
 770   for (cur = bottom(), limit = end(); cur < limit;
 771        cur += cl->do_blk_careful(cur));
 772 }
 773 
 774 // Apply the given closure to each block in the space.
 775 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 776   assert_lock_strong(freelistLock());
 777   HeapWord *cur, *limit;
 778   for (cur = bottom(), limit = end(); cur < limit;
 779        cur += cl->do_blk(cur));
 780 }
 781 
 782 // Apply the given closure to each oop in the space.
 783 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 784   assert_lock_strong(freelistLock());
 785   HeapWord *cur, *limit;
 786   size_t curSize;
 787   for (cur = bottom(), limit = end(); cur < limit;
 788        cur += curSize) {
 789     curSize = block_size(cur);
 790     if (block_is_obj(cur)) {
 791       oop(cur)->oop_iterate(cl);
 792     }
 793   }
 794 }
 795 
 796 // Apply the given closure to each oop in the space \intersect memory region.
 797 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, ExtendedOopClosure* cl) {
 798   assert_lock_strong(freelistLock());
 799   if (is_empty()) {
 800     return;
 801   }
 802   MemRegion cur = MemRegion(bottom(), end());
 803   mr = mr.intersection(cur);
 804   if (mr.is_empty()) {
 805     return;
 806   }
 807   if (mr.equals(cur)) {
 808     oop_iterate(cl);
 809     return;
 810   }
 811   assert(mr.end() <= end(), "just took an intersection above");
 812   HeapWord* obj_addr = block_start(mr.start());
 813   HeapWord* t = mr.end();
 814 
 815   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 816   if (block_is_obj(obj_addr)) {
 817     // Handle first object specially.
 818     oop obj = oop(obj_addr);
 819     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 820   } else {
 821     FreeChunk* fc = (FreeChunk*)obj_addr;
 822     obj_addr += fc->size();
 823   }
 824   while (obj_addr < t) {
 825     HeapWord* obj = obj_addr;
 826     obj_addr += block_size(obj_addr);
 827     // If "obj_addr" is not greater than top, then the
 828     // entire object "obj" is within the region.
 829     if (obj_addr <= t) {
 830       if (block_is_obj(obj)) {
 831         oop(obj)->oop_iterate(cl);
 832       }
 833     } else {
 834       // "obj" extends beyond end of region
 835       if (block_is_obj(obj)) {
 836         oop(obj)->oop_iterate(&smr_blk);
 837       }
 838       break;
 839     }
 840   }
 841 }
 842 
 843 // NOTE: In the following methods, in order to safely be able to
 844 // apply the closure to an object, we need to be sure that the
 845 // object has been initialized. We are guaranteed that an object
 846 // is initialized if we are holding the Heap_lock with the
 847 // world stopped.
 848 void CompactibleFreeListSpace::verify_objects_initialized() const {
 849   if (is_init_completed()) {
 850     assert_locked_or_safepoint(Heap_lock);
 851     if (Universe::is_fully_initialized()) {
 852       guarantee(SafepointSynchronize::is_at_safepoint(),
 853                 "Required for objects to be initialized");
 854     }
 855   } // else make a concession at vm start-up
 856 }
 857 
 858 // Apply the given closure to each object in the space
 859 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 860   assert_lock_strong(freelistLock());
 861   NOT_PRODUCT(verify_objects_initialized());
 862   HeapWord *cur, *limit;
 863   size_t curSize;
 864   for (cur = bottom(), limit = end(); cur < limit;
 865        cur += curSize) {
 866     curSize = block_size(cur);
 867     if (block_is_obj(cur)) {
 868       blk->do_object(oop(cur));
 869     }
 870   }
 871 }
 872 
 873 // Apply the given closure to each live object in the space
 874 //   The usage of CompactibleFreeListSpace
 875 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 876 // objects in the space with references to objects that are no longer
 877 // valid.  For example, an object may reference another object
 878 // that has already been sweep up (collected).  This method uses
 879 // obj_is_alive() to determine whether it is safe to apply the closure to
 880 // an object.  See obj_is_alive() for details on how liveness of an
 881 // object is decided.
 882 
 883 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 884   assert_lock_strong(freelistLock());
 885   NOT_PRODUCT(verify_objects_initialized());
 886   HeapWord *cur, *limit;
 887   size_t curSize;
 888   for (cur = bottom(), limit = end(); cur < limit;
 889        cur += curSize) {
 890     curSize = block_size(cur);
 891     if (block_is_obj(cur) && obj_is_alive(cur)) {
 892       blk->do_object(oop(cur));
 893     }
 894   }
 895 }
 896 
 897 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 898                                                   UpwardsObjectClosure* cl) {
 899   assert_locked(freelistLock());
 900   NOT_PRODUCT(verify_objects_initialized());
 901   Space::object_iterate_mem(mr, cl);
 902 }
 903 
 904 // Callers of this iterator beware: The closure application should
 905 // be robust in the face of uninitialized objects and should (always)
 906 // return a correct size so that the next addr + size below gives us a
 907 // valid block boundary. [See for instance,
 908 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 909 // in ConcurrentMarkSweepGeneration.cpp.]
 910 HeapWord*
 911 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 912   assert_lock_strong(freelistLock());
 913   HeapWord *addr, *last;
 914   size_t size;
 915   for (addr = bottom(), last  = end();
 916        addr < last; addr += size) {
 917     FreeChunk* fc = (FreeChunk*)addr;
 918     if (fc->is_free()) {
 919       // Since we hold the free list lock, which protects direct
 920       // allocation in this generation by mutators, a free object
 921       // will remain free throughout this iteration code.
 922       size = fc->size();
 923     } else {
 924       // Note that the object need not necessarily be initialized,
 925       // because (for instance) the free list lock does NOT protect
 926       // object initialization. The closure application below must
 927       // therefore be correct in the face of uninitialized objects.
 928       size = cl->do_object_careful(oop(addr));
 929       if (size == 0) {
 930         // An unparsable object found. Signal early termination.
 931         return addr;
 932       }
 933     }
 934   }
 935   return NULL;
 936 }
 937 
 938 // Callers of this iterator beware: The closure application should
 939 // be robust in the face of uninitialized objects and should (always)
 940 // return a correct size so that the next addr + size below gives us a
 941 // valid block boundary. [See for instance,
 942 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 943 // in ConcurrentMarkSweepGeneration.cpp.]
 944 HeapWord*
 945 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 946   ObjectClosureCareful* cl) {
 947   assert_lock_strong(freelistLock());
 948   // Can't use used_region() below because it may not necessarily
 949   // be the same as [bottom(),end()); although we could
 950   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 951   // that appears too cumbersome, so we just do the simpler check
 952   // in the assertion below.
 953   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 954          "mr should be non-empty and within used space");
 955   HeapWord *addr, *end;
 956   size_t size;
 957   for (addr = block_start_careful(mr.start()), end  = mr.end();
 958        addr < end; addr += size) {
 959     FreeChunk* fc = (FreeChunk*)addr;
 960     if (fc->is_free()) {
 961       // Since we hold the free list lock, which protects direct
 962       // allocation in this generation by mutators, a free object
 963       // will remain free throughout this iteration code.
 964       size = fc->size();
 965     } else {
 966       // Note that the object need not necessarily be initialized,
 967       // because (for instance) the free list lock does NOT protect
 968       // object initialization. The closure application below must
 969       // therefore be correct in the face of uninitialized objects.
 970       size = cl->do_object_careful_m(oop(addr), mr);
 971       if (size == 0) {
 972         // An unparsable object found. Signal early termination.
 973         return addr;
 974       }
 975     }
 976   }
 977   return NULL;
 978 }
 979 
 980 
 981 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 982   NOT_PRODUCT(verify_objects_initialized());
 983   return _bt.block_start(p);
 984 }
 985 
 986 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 987   return _bt.block_start_careful(p);
 988 }
 989 
 990 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 991   NOT_PRODUCT(verify_objects_initialized());
 992   // This must be volatile, or else there is a danger that the compiler
 993   // will compile the code below into a sometimes-infinite loop, by keeping
 994   // the value read the first time in a register.
 995   while (true) {
 996     // We must do this until we get a consistent view of the object.
 997     if (FreeChunk::indicatesFreeChunk(p)) {
 998       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 999       size_t res = fc->size();
1000       // If the object is still a free chunk, return the size, else it
1001       // has been allocated so try again.
1002       if (FreeChunk::indicatesFreeChunk(p)) {
1003         assert(res != 0, "Block size should not be 0");
1004         return res;
1005       }
1006     } else {
1007       // must read from what 'p' points to in each loop.
1008       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1009       if (k != NULL) {
1010         assert(k->is_klass(), "Should really be klass oop.");
1011         oop o = (oop)p;
1012         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
1013         size_t res = o->size_given_klass(k);
1014         res = adjustObjectSize(res);
1015         assert(res != 0, "Block size should not be 0");
1016         return res;
1017       }
1018     }
1019   }
1020 }
1021 
1022 // TODO: Now that is_parsable is gone, we should combine these two functions.
1023 // A variant of the above that uses the Printezis bits for
1024 // unparsable but allocated objects. This avoids any possible
1025 // stalls waiting for mutators to initialize objects, and is
1026 // thus potentially faster than the variant above. However,
1027 // this variant may return a zero size for a block that is
1028 // under mutation and for which a consistent size cannot be
1029 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1030 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1031                                                      const CMSCollector* c)
1032 const {
1033   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1034   // This must be volatile, or else there is a danger that the compiler
1035   // will compile the code below into a sometimes-infinite loop, by keeping
1036   // the value read the first time in a register.
1037   DEBUG_ONLY(uint loops = 0;)
1038   while (true) {
1039     // We must do this until we get a consistent view of the object.
1040     if (FreeChunk::indicatesFreeChunk(p)) {
1041       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1042       size_t res = fc->size();
1043       if (FreeChunk::indicatesFreeChunk(p)) {
1044         assert(res != 0, "Block size should not be 0");
1045         assert(loops == 0, "Should be 0");
1046         return res;
1047       }
1048     } else {
1049       // must read from what 'p' points to in each loop.
1050       Klass* k = ((volatile oopDesc*)p)->klass_or_null();
1051       // We trust the size of any object that has a non-NULL
1052       // klass and (for those in the perm gen) is parsable
1053       // -- irrespective of its conc_safe-ty.
1054       if (k != NULL) {
1055         assert(k->is_klass(), "Should really be klass oop.");
1056         oop o = (oop)p;
1057         assert(o->is_oop(), "Should be an oop");
1058         size_t res = o->size_given_klass(k);
1059         res = adjustObjectSize(res);
1060         assert(res != 0, "Block size should not be 0");
1061         return res;
1062       } else {
1063         // May return 0 if P-bits not present.
1064         return c->block_size_if_printezis_bits(p);
1065       }
1066     }
1067     assert(loops == 0, "Can loop at most once");
1068     DEBUG_ONLY(loops++;)
1069   }
1070 }
1071 
1072 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1073   NOT_PRODUCT(verify_objects_initialized());
1074   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1075   FreeChunk* fc = (FreeChunk*)p;
1076   if (fc->is_free()) {
1077     return fc->size();
1078   } else {
1079     // Ignore mark word because this may be a recently promoted
1080     // object whose mark word is used to chain together grey
1081     // objects (the last one would have a null value).
1082     assert(oop(p)->is_oop(true), "Should be an oop");
1083     return adjustObjectSize(oop(p)->size());
1084   }
1085 }
1086 
1087 // This implementation assumes that the property of "being an object" is
1088 // stable.  But being a free chunk may not be (because of parallel
1089 // promotion.)
1090 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1091   FreeChunk* fc = (FreeChunk*)p;
1092   assert(is_in_reserved(p), "Should be in space");
1093   // When doing a mark-sweep-compact of the CMS generation, this
1094   // assertion may fail because prepare_for_compaction() uses
1095   // space that is garbage to maintain information on ranges of
1096   // live objects so that these live ranges can be moved as a whole.
1097   // Comment out this assertion until that problem can be solved
1098   // (i.e., that the block start calculation may look at objects
1099   // at address below "p" in finding the object that contains "p"
1100   // and those objects (if garbage) may have been modified to hold
1101   // live range information.
1102   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1103   //        "Should be a block boundary");
1104   if (FreeChunk::indicatesFreeChunk(p)) return false;
1105   Klass* k = oop(p)->klass_or_null();
1106   if (k != NULL) {
1107     // Ignore mark word because it may have been used to
1108     // chain together promoted objects (the last one
1109     // would have a null value).
1110     assert(oop(p)->is_oop(true), "Should be an oop");
1111     return true;
1112   } else {
1113     return false;  // Was not an object at the start of collection.
1114   }
1115 }
1116 
1117 // Check if the object is alive. This fact is checked either by consulting
1118 // the main marking bitmap in the sweeping phase or, if it's a permanent
1119 // generation and we're not in the sweeping phase, by checking the
1120 // perm_gen_verify_bit_map where we store the "deadness" information if
1121 // we did not sweep the perm gen in the most recent previous GC cycle.
1122 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1123   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1124          "Else races are possible");
1125   assert(block_is_obj(p), "The address should point to an object");
1126 
1127   // If we're sweeping, we use object liveness information from the main bit map
1128   // for both perm gen and old gen.
1129   // We don't need to lock the bitmap (live_map or dead_map below), because
1130   // EITHER we are in the middle of the sweeping phase, and the
1131   // main marking bit map (live_map below) is locked,
1132   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1133   // is stable, because it's mutated only in the sweeping phase.
1134   // NOTE: This method is also used by jmap where, if class unloading is
1135   // off, the results can return "false" for legitimate perm objects,
1136   // when we are not in the midst of a sweeping phase, which can result
1137   // in jmap not reporting certain perm gen objects. This will be moot
1138   // if/when the perm gen goes away in the future.
1139   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1140     CMSBitMap* live_map = _collector->markBitMap();
1141     return live_map->par_isMarked((HeapWord*) p);
1142   }
1143   return true;
1144 }
1145 
1146 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1147   FreeChunk* fc = (FreeChunk*)p;
1148   assert(is_in_reserved(p), "Should be in space");
1149   assert(_bt.block_start(p) == p, "Should be a block boundary");
1150   if (!fc->is_free()) {
1151     // Ignore mark word because it may have been used to
1152     // chain together promoted objects (the last one
1153     // would have a null value).
1154     assert(oop(p)->is_oop(true), "Should be an oop");
1155     return true;
1156   }
1157   return false;
1158 }
1159 
1160 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1161 // approximate answer if you don't hold the freelistlock when you call this.
1162 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1163   size_t size = 0;
1164   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1165     debug_only(
1166       // We may be calling here without the lock in which case we
1167       // won't do this modest sanity check.
1168       if (freelistLock()->owned_by_self()) {
1169         size_t total_list_size = 0;
1170         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1171           fc = fc->next()) {
1172           total_list_size += i;
1173         }
1174         assert(total_list_size == i * _indexedFreeList[i].count(),
1175                "Count in list is incorrect");
1176       }
1177     )
1178     size += i * _indexedFreeList[i].count();
1179   }
1180   return size;
1181 }
1182 
1183 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1184   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1185   return allocate(size);
1186 }
1187 
1188 HeapWord*
1189 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1190   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1191 }
1192 
1193 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1194   assert_lock_strong(freelistLock());
1195   HeapWord* res = NULL;
1196   assert(size == adjustObjectSize(size),
1197          "use adjustObjectSize() before calling into allocate()");
1198 
1199   if (_adaptive_freelists) {
1200     res = allocate_adaptive_freelists(size);
1201   } else {  // non-adaptive free lists
1202     res = allocate_non_adaptive_freelists(size);
1203   }
1204 
1205   if (res != NULL) {
1206     // check that res does lie in this space!
1207     assert(is_in_reserved(res), "Not in this space!");
1208     assert(is_aligned((void*)res), "alignment check");
1209 
1210     FreeChunk* fc = (FreeChunk*)res;
1211     fc->markNotFree();
1212     assert(!fc->is_free(), "shouldn't be marked free");
1213     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1214     // Verify that the block offset table shows this to
1215     // be a single block, but not one which is unallocated.
1216     _bt.verify_single_block(res, size);
1217     _bt.verify_not_unallocated(res, size);
1218     // mangle a just allocated object with a distinct pattern.
1219     debug_only(fc->mangleAllocated(size));
1220   }
1221 
1222   return res;
1223 }
1224 
1225 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1226   HeapWord* res = NULL;
1227   // try and use linear allocation for smaller blocks
1228   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1229     // if successful, the following also adjusts block offset table
1230     res = getChunkFromSmallLinearAllocBlock(size);
1231   }
1232   // Else triage to indexed lists for smaller sizes
1233   if (res == NULL) {
1234     if (size < SmallForDictionary) {
1235       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1236     } else {
1237       // else get it from the big dictionary; if even this doesn't
1238       // work we are out of luck.
1239       res = (HeapWord*)getChunkFromDictionaryExact(size);
1240     }
1241   }
1242 
1243   return res;
1244 }
1245 
1246 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1247   assert_lock_strong(freelistLock());
1248   HeapWord* res = NULL;
1249   assert(size == adjustObjectSize(size),
1250          "use adjustObjectSize() before calling into allocate()");
1251 
1252   // Strategy
1253   //   if small
1254   //     exact size from small object indexed list if small
1255   //     small or large linear allocation block (linAB) as appropriate
1256   //     take from lists of greater sized chunks
1257   //   else
1258   //     dictionary
1259   //     small or large linear allocation block if it has the space
1260   // Try allocating exact size from indexTable first
1261   if (size < IndexSetSize) {
1262     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1263     if(res != NULL) {
1264       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1265         "Not removed from free list");
1266       // no block offset table adjustment is necessary on blocks in
1267       // the indexed lists.
1268 
1269     // Try allocating from the small LinAB
1270     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1271         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1272         // if successful, the above also adjusts block offset table
1273         // Note that this call will refill the LinAB to
1274         // satisfy the request.  This is different that
1275         // evm.
1276         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1277     } else {
1278       // Raid the exact free lists larger than size, even if they are not
1279       // overpopulated.
1280       res = (HeapWord*) getChunkFromGreater(size);
1281     }
1282   } else {
1283     // Big objects get allocated directly from the dictionary.
1284     res = (HeapWord*) getChunkFromDictionaryExact(size);
1285     if (res == NULL) {
1286       // Try hard not to fail since an allocation failure will likely
1287       // trigger a synchronous GC.  Try to get the space from the
1288       // allocation blocks.
1289       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1290     }
1291   }
1292 
1293   return res;
1294 }
1295 
1296 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1297 // when promoting obj.
1298 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1299   // Depending on the object size, expansion may require refilling either a
1300   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1301   // is added because the dictionary may over-allocate to avoid fragmentation.
1302   size_t space = obj_size;
1303   if (!_adaptive_freelists) {
1304     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1305   }
1306   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1307   return space;
1308 }
1309 
1310 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1311   FreeChunk* ret;
1312 
1313   assert(numWords >= MinChunkSize, "Size is less than minimum");
1314   assert(linearAllocationWouldFail() || bestFitFirst(),
1315     "Should not be here");
1316 
1317   size_t i;
1318   size_t currSize = numWords + MinChunkSize;
1319   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1320   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1321     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1322     if (fl->head()) {
1323       ret = getFromListGreater(fl, numWords);
1324       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1325       return ret;
1326     }
1327   }
1328 
1329   currSize = MAX2((size_t)SmallForDictionary,
1330                   (size_t)(numWords + MinChunkSize));
1331 
1332   /* Try to get a chunk that satisfies request, while avoiding
1333      fragmentation that can't be handled. */
1334   {
1335     ret =  dictionary()->get_chunk(currSize);
1336     if (ret != NULL) {
1337       assert(ret->size() - numWords >= MinChunkSize,
1338              "Chunk is too small");
1339       _bt.allocated((HeapWord*)ret, ret->size());
1340       /* Carve returned chunk. */
1341       (void) splitChunkAndReturnRemainder(ret, numWords);
1342       /* Label this as no longer a free chunk. */
1343       assert(ret->is_free(), "This chunk should be free");
1344       ret->link_prev(NULL);
1345     }
1346     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1347     return ret;
1348   }
1349   ShouldNotReachHere();
1350 }
1351 
1352 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1353   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1354   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1355 }
1356 
1357 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1358   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1359          (_smallLinearAllocBlock._word_size == fc->size()),
1360          "Linear allocation block shows incorrect size");
1361   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1362           (_smallLinearAllocBlock._word_size == fc->size()));
1363 }
1364 
1365 // Check if the purported free chunk is present either as a linear
1366 // allocation block, the size-indexed table of (smaller) free blocks,
1367 // or the larger free blocks kept in the binary tree dictionary.
1368 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1369   if (verify_chunk_is_linear_alloc_block(fc)) {
1370     return true;
1371   } else if (fc->size() < IndexSetSize) {
1372     return verifyChunkInIndexedFreeLists(fc);
1373   } else {
1374     return dictionary()->verify_chunk_in_free_list(fc);
1375   }
1376 }
1377 
1378 #ifndef PRODUCT
1379 void CompactibleFreeListSpace::assert_locked() const {
1380   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1381 }
1382 
1383 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1384   CMSLockVerifier::assert_locked(lock);
1385 }
1386 #endif
1387 
1388 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1389   // In the parallel case, the main thread holds the free list lock
1390   // on behalf the parallel threads.
1391   FreeChunk* fc;
1392   {
1393     // If GC is parallel, this might be called by several threads.
1394     // This should be rare enough that the locking overhead won't affect
1395     // the sequential code.
1396     MutexLockerEx x(parDictionaryAllocLock(),
1397                     Mutex::_no_safepoint_check_flag);
1398     fc = getChunkFromDictionary(size);
1399   }
1400   if (fc != NULL) {
1401     fc->dontCoalesce();
1402     assert(fc->is_free(), "Should be free, but not coalescable");
1403     // Verify that the block offset table shows this to
1404     // be a single block, but not one which is unallocated.
1405     _bt.verify_single_block((HeapWord*)fc, fc->size());
1406     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1407   }
1408   return fc;
1409 }
1410 
1411 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1412   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1413   assert_locked();
1414 
1415   // if we are tracking promotions, then first ensure space for
1416   // promotion (including spooling space for saving header if necessary).
1417   // then allocate and copy, then track promoted info if needed.
1418   // When tracking (see PromotionInfo::track()), the mark word may
1419   // be displaced and in this case restoration of the mark word
1420   // occurs in the (oop_since_save_marks_)iterate phase.
1421   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1422     return NULL;
1423   }
1424   // Call the allocate(size_t, bool) form directly to avoid the
1425   // additional call through the allocate(size_t) form.  Having
1426   // the compile inline the call is problematic because allocate(size_t)
1427   // is a virtual method.
1428   HeapWord* res = allocate(adjustObjectSize(obj_size));
1429   if (res != NULL) {
1430     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1431     // if we should be tracking promotions, do so.
1432     if (_promoInfo.tracking()) {
1433         _promoInfo.track((PromotedObject*)res);
1434     }
1435   }
1436   return oop(res);
1437 }
1438 
1439 HeapWord*
1440 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1441   assert_locked();
1442   assert(size >= MinChunkSize, "minimum chunk size");
1443   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1444     "maximum from smallLinearAllocBlock");
1445   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1446 }
1447 
1448 HeapWord*
1449 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1450                                                        size_t size) {
1451   assert_locked();
1452   assert(size >= MinChunkSize, "too small");
1453   HeapWord* res = NULL;
1454   // Try to do linear allocation from blk, making sure that
1455   if (blk->_word_size == 0) {
1456     // We have probably been unable to fill this either in the prologue or
1457     // when it was exhausted at the last linear allocation. Bail out until
1458     // next time.
1459     assert(blk->_ptr == NULL, "consistency check");
1460     return NULL;
1461   }
1462   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1463   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1464   if (res != NULL) return res;
1465 
1466   // about to exhaust this linear allocation block
1467   if (blk->_word_size == size) { // exactly satisfied
1468     res = blk->_ptr;
1469     _bt.allocated(res, blk->_word_size);
1470   } else if (size + MinChunkSize <= blk->_refillSize) {
1471     size_t sz = blk->_word_size;
1472     // Update _unallocated_block if the size is such that chunk would be
1473     // returned to the indexed free list.  All other chunks in the indexed
1474     // free lists are allocated from the dictionary so that _unallocated_block
1475     // has already been adjusted for them.  Do it here so that the cost
1476     // for all chunks added back to the indexed free lists.
1477     if (sz < SmallForDictionary) {
1478       _bt.allocated(blk->_ptr, sz);
1479     }
1480     // Return the chunk that isn't big enough, and then refill below.
1481     addChunkToFreeLists(blk->_ptr, sz, true);
1482     split_birth(sz);
1483     // Don't keep statistics on adding back chunk from a LinAB.
1484   } else {
1485     // A refilled block would not satisfy the request.
1486     return NULL;
1487   }
1488 
1489   blk->_ptr = NULL; blk->_word_size = 0;
1490   refillLinearAllocBlock(blk);
1491   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1492          "block was replenished");
1493   if (res != NULL) {
1494     split_birth(size);
1495     repairLinearAllocBlock(blk);
1496   } else if (blk->_ptr != NULL) {
1497     res = blk->_ptr;
1498     size_t blk_size = blk->_word_size;
1499     blk->_word_size -= size;
1500     blk->_ptr  += size;
1501     split_birth(size);
1502     repairLinearAllocBlock(blk);
1503     // Update BOT last so that other (parallel) GC threads see a consistent
1504     // view of the BOT and free blocks.
1505     // Above must occur before BOT is updated below.
1506     OrderAccess::storestore();
1507     _bt.split_block(res, blk_size, size);  // adjust block offset table
1508   }
1509   return res;
1510 }
1511 
1512 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1513                                         LinearAllocBlock* blk,
1514                                         size_t size) {
1515   assert_locked();
1516   assert(size >= MinChunkSize, "too small");
1517 
1518   HeapWord* res = NULL;
1519   // This is the common case.  Keep it simple.
1520   if (blk->_word_size >= size + MinChunkSize) {
1521     assert(blk->_ptr != NULL, "consistency check");
1522     res = blk->_ptr;
1523     // Note that the BOT is up-to-date for the linAB before allocation.  It
1524     // indicates the start of the linAB.  The split_block() updates the
1525     // BOT for the linAB after the allocation (indicates the start of the
1526     // next chunk to be allocated).
1527     size_t blk_size = blk->_word_size;
1528     blk->_word_size -= size;
1529     blk->_ptr  += size;
1530     split_birth(size);
1531     repairLinearAllocBlock(blk);
1532     // Update BOT last so that other (parallel) GC threads see a consistent
1533     // view of the BOT and free blocks.
1534     // Above must occur before BOT is updated below.
1535     OrderAccess::storestore();
1536     _bt.split_block(res, blk_size, size);  // adjust block offset table
1537     _bt.allocated(res, size);
1538   }
1539   return res;
1540 }
1541 
1542 FreeChunk*
1543 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1544   assert_locked();
1545   assert(size < SmallForDictionary, "just checking");
1546   FreeChunk* res;
1547   res = _indexedFreeList[size].get_chunk_at_head();
1548   if (res == NULL) {
1549     res = getChunkFromIndexedFreeListHelper(size);
1550   }
1551   _bt.verify_not_unallocated((HeapWord*) res, size);
1552   assert(res == NULL || res->size() == size, "Incorrect block size");
1553   return res;
1554 }
1555 
1556 FreeChunk*
1557 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1558   bool replenish) {
1559   assert_locked();
1560   FreeChunk* fc = NULL;
1561   if (size < SmallForDictionary) {
1562     assert(_indexedFreeList[size].head() == NULL ||
1563       _indexedFreeList[size].surplus() <= 0,
1564       "List for this size should be empty or under populated");
1565     // Try best fit in exact lists before replenishing the list
1566     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1567       // Replenish list.
1568       //
1569       // Things tried that failed.
1570       //   Tried allocating out of the two LinAB's first before
1571       // replenishing lists.
1572       //   Tried small linAB of size 256 (size in indexed list)
1573       // and replenishing indexed lists from the small linAB.
1574       //
1575       FreeChunk* newFc = NULL;
1576       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1577       if (replenish_size < SmallForDictionary) {
1578         // Do not replenish from an underpopulated size.
1579         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1580             _indexedFreeList[replenish_size].head() != NULL) {
1581           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1582         } else if (bestFitFirst()) {
1583           newFc = bestFitSmall(replenish_size);
1584         }
1585       }
1586       if (newFc == NULL && replenish_size > size) {
1587         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1588         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1589       }
1590       // Note: The stats update re split-death of block obtained above
1591       // will be recorded below precisely when we know we are going to
1592       // be actually splitting it into more than one pieces below.
1593       if (newFc != NULL) {
1594         if  (replenish || CMSReplenishIntermediate) {
1595           // Replenish this list and return one block to caller.
1596           size_t i;
1597           FreeChunk *curFc, *nextFc;
1598           size_t num_blk = newFc->size() / size;
1599           assert(num_blk >= 1, "Smaller than requested?");
1600           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1601           if (num_blk > 1) {
1602             // we are sure we will be splitting the block just obtained
1603             // into multiple pieces; record the split-death of the original
1604             splitDeath(replenish_size);
1605           }
1606           // carve up and link blocks 0, ..., num_blk - 2
1607           // The last chunk is not added to the lists but is returned as the
1608           // free chunk.
1609           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1610                i = 0;
1611                i < (num_blk - 1);
1612                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1613                i++) {
1614             curFc->set_size(size);
1615             // Don't record this as a return in order to try and
1616             // determine the "returns" from a GC.
1617             _bt.verify_not_unallocated((HeapWord*) fc, size);
1618             _indexedFreeList[size].return_chunk_at_tail(curFc, false, true);
1619             _bt.mark_block((HeapWord*)curFc, size);
1620             split_birth(size);
1621             // Don't record the initial population of the indexed list
1622             // as a split birth.
1623           }
1624 
1625           // check that the arithmetic was OK above
1626           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1627             "inconsistency in carving newFc");
1628           curFc->set_size(size);
1629           _bt.mark_block((HeapWord*)curFc, size);
1630           split_birth(size);
1631           fc = curFc;
1632         } else {
1633           // Return entire block to caller
1634           fc = newFc;
1635         }
1636       }
1637     }
1638   } else {
1639     // Get a free chunk from the free chunk dictionary to be returned to
1640     // replenish the indexed free list.
1641     fc = getChunkFromDictionaryExact(size);
1642   }
1643   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1644   return fc;
1645 }
1646 
1647 FreeChunk*
1648 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1649   assert_locked();
1650   FreeChunk* fc = _dictionary->get_chunk(size);
1651   if (fc == NULL) {
1652     return NULL;
1653   }
1654   _bt.allocated((HeapWord*)fc, fc->size());
1655   if (fc->size() >= size + MinChunkSize) {
1656     fc = splitChunkAndReturnRemainder(fc, size);
1657   }
1658   assert(fc->size() >= size, "chunk too small");
1659   assert(fc->size() < size + MinChunkSize, "chunk too big");
1660   _bt.verify_single_block((HeapWord*)fc, fc->size());
1661   return fc;
1662 }
1663 
1664 FreeChunk*
1665 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1666   assert_locked();
1667   FreeChunk* fc = _dictionary->get_chunk(size);
1668   if (fc == NULL) {
1669     return fc;
1670   }
1671   _bt.allocated((HeapWord*)fc, fc->size());
1672   if (fc->size() == size) {
1673     _bt.verify_single_block((HeapWord*)fc, size);
1674     return fc;
1675   }
1676   assert(fc->size() > size, "get_chunk() guarantee");
1677   if (fc->size() < size + MinChunkSize) {
1678     // Return the chunk to the dictionary and go get a bigger one.
1679     returnChunkToDictionary(fc, true);
1680     fc = _dictionary->get_chunk(size + MinChunkSize);
1681     if (fc == NULL) {
1682       return NULL;
1683     }
1684     _bt.allocated((HeapWord*)fc, fc->size());
1685   }
1686   assert(fc->size() >= size + MinChunkSize, "tautology");
1687   fc = splitChunkAndReturnRemainder(fc, size);
1688   assert(fc->size() == size, "chunk is wrong size");
1689   _bt.verify_single_block((HeapWord*)fc, size);
1690   return fc;
1691 }
1692 
1693 void
1694 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk, bool deallocate_pages) {
1695   assert_locked();
1696 
1697   size_t size = chunk->size();
1698   _bt.verify_single_block((HeapWord*)chunk, size);
1699   // adjust _unallocated_block downward, as necessary
1700   _bt.freed((HeapWord*)chunk, size);
1701   _dictionary->return_chunk(chunk, deallocate_pages);
1702 #ifndef PRODUCT
1703   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1704     TreeChunk<FreeChunk, AdaptiveFreeList>* tc = TreeChunk<FreeChunk, AdaptiveFreeList>::as_TreeChunk(chunk);
1705     TreeList<FreeChunk, AdaptiveFreeList>* tl = tc->list();
1706     tl->verify_stats();
1707   }
1708 #endif // PRODUCT
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc, bool deallocate_pages) {
1713   assert_locked();
1714   size_t size = fc->size();
1715   _bt.verify_single_block((HeapWord*) fc, size);
1716   _bt.verify_not_unallocated((HeapWord*) fc, size);
1717   if (_adaptive_freelists) {
1718     _indexedFreeList[size].return_chunk_at_tail(fc, true, deallocate_pages);
1719   } else {
1720     _indexedFreeList[size].return_chunk_at_head(fc, true, deallocate_pages);
1721   }
1722 #ifndef PRODUCT
1723   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1724      _indexedFreeList[size].verify_stats();
1725   }
1726 #endif // PRODUCT
1727 }
1728 
1729 // Add chunk to end of last block -- if it's the largest
1730 // block -- and update BOT and census data. We would
1731 // of course have preferred to coalesce it with the
1732 // last block, but it's currently less expensive to find the
1733 // largest block than it is to find the last.
1734 void
1735 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1736   HeapWord* chunk, size_t     size) {
1737   // check that the chunk does lie in this space!
1738   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1739   // One of the parallel gc task threads may be here
1740   // whilst others are allocating.
1741   Mutex* lock = NULL;
1742   if (ParallelGCThreads != 0) {
1743     lock = &_parDictionaryAllocLock;
1744   }
1745   FreeChunk* ec;
1746   {
1747     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1748     ec = dictionary()->find_largest_dict();  // get largest block
1749     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1750       // It's a coterminal block - we can coalesce.
1751       size_t old_size = ec->size();
1752       coalDeath(old_size);
1753       removeChunkFromDictionary(ec);
1754       size += old_size;
1755     } else {
1756       ec = (FreeChunk*)chunk;
1757     }
1758   }
1759   ec->set_size(size);
1760   debug_only(ec->mangleFreed(size));
1761   if (size < SmallForDictionary) {
1762     lock = _indexedFreeListParLocks[size];
1763   }
1764   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1765   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true, true);
1766   // record the birth under the lock since the recording involves
1767   // manipulation of the list on which the chunk lives and
1768   // if the chunk is allocated and is the last on the list,
1769   // the list can go away.
1770   coalBirth(size);
1771 }
1772 
1773 void
1774 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1775                                               size_t     size,
1776                                               bool deallocate_pages) {
1777   // check that the chunk does lie in this space!
1778   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1779   assert_locked();
1780   _bt.verify_single_block(chunk, size);
1781 
1782   FreeChunk* fc = (FreeChunk*) chunk;
1783   fc->set_size(size);
1784   debug_only(fc->mangleFreed(size));
1785   if (size < SmallForDictionary) {
1786     returnChunkToFreeList(fc, deallocate_pages);
1787   } else {
1788     returnChunkToDictionary(fc, deallocate_pages);
1789   }
1790 }
1791 
1792 void
1793 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1794   size_t size, bool coalesced, bool deallocate_pages) {
1795   assert_locked();
1796   assert(chunk != NULL, "null chunk");
1797   if (coalesced) {
1798     // repair BOT
1799     _bt.single_block(chunk, size);
1800   }
1801   addChunkToFreeLists(chunk, size, deallocate_pages);
1802 }
1803 
1804 // We _must_ find the purported chunk on our free lists;
1805 // we assert if we don't.
1806 void
1807 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1808   size_t size = fc->size();
1809   assert_locked();
1810   debug_only(verifyFreeLists());
1811   if (size < SmallForDictionary) {
1812     removeChunkFromIndexedFreeList(fc);
1813   } else {
1814     removeChunkFromDictionary(fc);
1815   }
1816   _bt.verify_single_block((HeapWord*)fc, size);
1817   debug_only(verifyFreeLists());
1818 }
1819 
1820 void
1821 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1822   size_t size = fc->size();
1823   assert_locked();
1824   assert(fc != NULL, "null chunk");
1825   _bt.verify_single_block((HeapWord*)fc, size);
1826   _dictionary->remove_chunk(fc);
1827   // adjust _unallocated_block upward, as necessary
1828   _bt.allocated((HeapWord*)fc, size);
1829 }
1830 
1831 void
1832 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1833   assert_locked();
1834   size_t size = fc->size();
1835   _bt.verify_single_block((HeapWord*)fc, size);
1836   NOT_PRODUCT(
1837     if (FLSVerifyIndexTable) {
1838       verifyIndexedFreeList(size);
1839     }
1840   )
1841   _indexedFreeList[size].remove_chunk(fc);
1842   NOT_PRODUCT(
1843     if (FLSVerifyIndexTable) {
1844       verifyIndexedFreeList(size);
1845     }
1846   )
1847 }
1848 
1849 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1850   /* A hint is the next larger size that has a surplus.
1851      Start search at a size large enough to guarantee that
1852      the excess is >= MIN_CHUNK. */
1853   size_t start = align_object_size(numWords + MinChunkSize);
1854   if (start < IndexSetSize) {
1855     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1856     size_t    hint = _indexedFreeList[start].hint();
1857     while (hint < IndexSetSize) {
1858       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1859       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1860       if (fl->surplus() > 0 && fl->head() != NULL) {
1861         // Found a list with surplus, reset original hint
1862         // and split out a free chunk which is returned.
1863         _indexedFreeList[start].set_hint(hint);
1864         FreeChunk* res = getFromListGreater(fl, numWords);
1865         assert(res == NULL || res->is_free(),
1866           "Should be returning a free chunk");
1867         return res;
1868       }
1869       hint = fl->hint(); /* keep looking */
1870     }
1871     /* None found. */
1872     it[start].set_hint(IndexSetSize);
1873   }
1874   return NULL;
1875 }
1876 
1877 /* Requires fl->size >= numWords + MinChunkSize */
1878 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1879   size_t numWords) {
1880   FreeChunk *curr = fl->head();
1881   size_t oldNumWords = curr->size();
1882   assert(numWords >= MinChunkSize, "Word size is too small");
1883   assert(curr != NULL, "List is empty");
1884   assert(oldNumWords >= numWords + MinChunkSize,
1885         "Size of chunks in the list is too small");
1886 
1887   fl->remove_chunk(curr);
1888   // recorded indirectly by splitChunkAndReturnRemainder -
1889   // smallSplit(oldNumWords, numWords);
1890   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1891   // Does anything have to be done for the remainder in terms of
1892   // fixing the card table?
1893   assert(new_chunk == NULL || new_chunk->is_free(),
1894     "Should be returning a free chunk");
1895   return new_chunk;
1896 }
1897 
1898 FreeChunk*
1899 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1900   size_t new_size) {
1901   assert_locked();
1902   size_t size = chunk->size();
1903   assert(size > new_size, "Split from a smaller block?");
1904   assert(is_aligned(chunk), "alignment problem");
1905   assert(size == adjustObjectSize(size), "alignment problem");
1906   size_t rem_size = size - new_size;
1907   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1908   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1909   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1910   assert(is_aligned(ffc), "alignment problem");
1911   ffc->set_size(rem_size);
1912   ffc->link_next(NULL);
1913   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1914   // Above must occur before BOT is updated below.
1915   // adjust block offset table
1916   OrderAccess::storestore();
1917   assert(chunk->is_free() && ffc->is_free(), "Error");
1918   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1919   if (rem_size < SmallForDictionary) {
1920     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1921     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1922     assert(!is_par ||
1923            (SharedHeap::heap()->n_par_threads() ==
1924             SharedHeap::heap()->workers()->active_workers()), "Mismatch");
1925     // We don't need to deallocate pages here because a chunk is split
1926     // and its pages are already deallocated at this point. Otherwise,
1927     // we would be wastefully calling a system call to deallocate
1928     // pages especially at every OG allocation after a compacting GC.
1929     returnChunkToFreeList(ffc, false);
1930     split(size, rem_size);
1931     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1932   } else {
1933     // We don't need to deallocate pages here because a chunk is split
1934     // and its pages are already deallocated at this point. Otherwise,
1935     // we would be wastefully calling a system call to deallocate
1936     // pages especially at every OG allocation after a compacting GC.
1937     returnChunkToDictionary(ffc, false);
1938     split(size ,rem_size);
1939   }
1940   chunk->set_size(new_size);
1941   return chunk;
1942 }
1943 
1944 void
1945 CompactibleFreeListSpace::sweep_completed() {
1946   // Now that space is probably plentiful, refill linear
1947   // allocation blocks as needed.
1948   refillLinearAllocBlocksIfNeeded();
1949 }
1950 
1951 void
1952 CompactibleFreeListSpace::gc_prologue() {
1953   assert_locked();
1954   if (PrintFLSStatistics != 0) {
1955     gclog_or_tty->print("Before GC:\n");
1956     reportFreeListStatistics();
1957   }
1958   refillLinearAllocBlocksIfNeeded();
1959 }
1960 
1961 void
1962 CompactibleFreeListSpace::gc_epilogue() {
1963   assert_locked();
1964   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1965     if (_smallLinearAllocBlock._word_size == 0)
1966       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1967   }
1968   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1969   _promoInfo.stopTrackingPromotions();
1970   repairLinearAllocationBlocks();
1971   // Print Space's stats
1972   if (PrintFLSStatistics != 0) {
1973     gclog_or_tty->print("After GC:\n");
1974     reportFreeListStatistics();
1975   }
1976 }
1977 
1978 // Iteration support, mostly delegated from a CMS generation
1979 
1980 void CompactibleFreeListSpace::save_marks() {
1981   assert(Thread::current()->is_VM_thread(),
1982          "Global variable should only be set when single-threaded");
1983   // Mark the "end" of the used space at the time of this call;
1984   // note, however, that promoted objects from this point
1985   // on are tracked in the _promoInfo below.
1986   set_saved_mark_word(unallocated_block());
1987 #ifdef ASSERT
1988   // Check the sanity of save_marks() etc.
1989   MemRegion ur    = used_region();
1990   MemRegion urasm = used_region_at_save_marks();
1991   assert(ur.contains(urasm),
1992          err_msg(" Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1993                  " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1994                  ur.start(), ur.end(), urasm.start(), urasm.end()));
1995 #endif
1996   // inform allocator that promotions should be tracked.
1997   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1998   _promoInfo.startTrackingPromotions();
1999 }
2000 
2001 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
2002   assert(_promoInfo.tracking(), "No preceding save_marks?");
2003   assert(SharedHeap::heap()->n_par_threads() == 0,
2004          "Shouldn't be called if using parallel gc.");
2005   return _promoInfo.noPromotions();
2006 }
2007 
2008 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
2009                                                                             \
2010 void CompactibleFreeListSpace::                                             \
2011 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
2012   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
2013          "Shouldn't be called (yet) during parallel part of gc.");          \
2014   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
2015   /*                                                                        \
2016    * This also restores any displaced headers and removes the elements from \
2017    * the iteration set as they are processed, so that we have a clean slate \
2018    * at the end of the iteration. Note, thus, that if new objects are       \
2019    * promoted as a result of the iteration they are iterated over as well.  \
2020    */                                                                       \
2021   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
2022 }
2023 
2024 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
2025 
2026 
2027 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
2028   // ugghh... how would one do this efficiently for a non-contiguous space?
2029   guarantee(false, "NYI");
2030 }
2031 
2032 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
2033   return _smallLinearAllocBlock._word_size == 0;
2034 }
2035 
2036 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
2037   // Fix up linear allocation blocks to look like free blocks
2038   repairLinearAllocBlock(&_smallLinearAllocBlock);
2039 }
2040 
2041 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
2042   assert_locked();
2043   if (blk->_ptr != NULL) {
2044     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
2045            "Minimum block size requirement");
2046     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
2047     fc->set_size(blk->_word_size);
2048     fc->link_prev(NULL);   // mark as free
2049     fc->dontCoalesce();
2050     assert(fc->is_free(), "just marked it free");
2051     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2052   }
2053 }
2054 
2055 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2056   assert_locked();
2057   if (_smallLinearAllocBlock._ptr == NULL) {
2058     assert(_smallLinearAllocBlock._word_size == 0,
2059       "Size of linAB should be zero if the ptr is NULL");
2060     // Reset the linAB refill and allocation size limit.
2061     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2062   }
2063   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2064 }
2065 
2066 void
2067 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2068   assert_locked();
2069   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2070          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2071          "blk invariant");
2072   if (blk->_ptr == NULL) {
2073     refillLinearAllocBlock(blk);
2074   }
2075   if (PrintMiscellaneous && Verbose) {
2076     if (blk->_word_size == 0) {
2077       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2078     }
2079   }
2080 }
2081 
2082 void
2083 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2084   assert_locked();
2085   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2086          "linear allocation block should be empty");
2087   FreeChunk* fc;
2088   if (blk->_refillSize < SmallForDictionary &&
2089       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2090     // A linAB's strategy might be to use small sizes to reduce
2091     // fragmentation but still get the benefits of allocation from a
2092     // linAB.
2093   } else {
2094     fc = getChunkFromDictionary(blk->_refillSize);
2095   }
2096   if (fc != NULL) {
2097     blk->_ptr  = (HeapWord*)fc;
2098     blk->_word_size = fc->size();
2099     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2100   }
2101 }
2102 
2103 // Support for concurrent collection policy decisions.
2104 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2105   // In the future we might want to add in frgamentation stats --
2106   // including erosion of the "mountain" into this decision as well.
2107   return !adaptive_freelists() && linearAllocationWouldFail();
2108 }
2109 
2110 // Support for compaction
2111 
2112 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2113   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2114   // prepare_for_compaction() uses the space between live objects
2115   // so that later phase can skip dead space quickly.  So verification
2116   // of the free lists doesn't work after.
2117 }
2118 
2119 #define obj_size(q) adjustObjectSize(oop(q)->size())
2120 #define adjust_obj_size(s) adjustObjectSize(s)
2121 
2122 void CompactibleFreeListSpace::adjust_pointers() {
2123   // In other versions of adjust_pointers(), a bail out
2124   // based on the amount of live data in the generation
2125   // (i.e., if 0, bail out) may be used.
2126   // Cannot test used() == 0 here because the free lists have already
2127   // been mangled by the compaction.
2128 
2129   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2130   // See note about verification in prepare_for_compaction().
2131 }
2132 
2133 void CompactibleFreeListSpace::compact() {
2134   SCAN_AND_COMPACT(obj_size);
2135 }
2136 
2137 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2138 // where fbs is free block sizes
2139 double CompactibleFreeListSpace::flsFrag() const {
2140   size_t itabFree = totalSizeInIndexedFreeLists();
2141   double frag = 0.0;
2142   size_t i;
2143 
2144   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2145     double sz  = i;
2146     frag      += _indexedFreeList[i].count() * (sz * sz);
2147   }
2148 
2149   double totFree = itabFree +
2150                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
2151   if (totFree > 0) {
2152     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2153             (totFree * totFree));
2154     frag = (double)1.0  - frag;
2155   } else {
2156     assert(frag == 0.0, "Follows from totFree == 0");
2157   }
2158   return frag;
2159 }
2160 
2161 void CompactibleFreeListSpace::beginSweepFLCensus(
2162   float inter_sweep_current,
2163   float inter_sweep_estimate,
2164   float intra_sweep_estimate) {
2165   assert_locked();
2166   size_t i;
2167   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2168     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2169     if (PrintFLSStatistics > 1) {
2170       gclog_or_tty->print("size[%d] : ", i);
2171     }
2172     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2173     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2174     fl->set_before_sweep(fl->count());
2175     fl->set_bfr_surp(fl->surplus());
2176   }
2177   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2178                                     inter_sweep_current,
2179                                     inter_sweep_estimate,
2180                                     intra_sweep_estimate);
2181 }
2182 
2183 void CompactibleFreeListSpace::setFLSurplus() {
2184   assert_locked();
2185   size_t i;
2186   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2187     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2188     fl->set_surplus(fl->count() -
2189                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2190   }
2191 }
2192 
2193 void CompactibleFreeListSpace::setFLHints() {
2194   assert_locked();
2195   size_t i;
2196   size_t h = IndexSetSize;
2197   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2198     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2199     fl->set_hint(h);
2200     if (fl->surplus() > 0) {
2201       h = i;
2202     }
2203   }
2204 }
2205 
2206 void CompactibleFreeListSpace::clearFLCensus() {
2207   assert_locked();
2208   size_t i;
2209   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2210     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2211     fl->set_prev_sweep(fl->count());
2212     fl->set_coal_births(0);
2213     fl->set_coal_deaths(0);
2214     fl->set_split_births(0);
2215     fl->set_split_deaths(0);
2216   }
2217 }
2218 
2219 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2220   if (PrintFLSStatistics > 0) {
2221     HeapWord* largestAddr = (HeapWord*) dictionary()->find_largest_dict();
2222     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2223                            largestAddr);
2224   }
2225   setFLSurplus();
2226   setFLHints();
2227   if (PrintGC && PrintFLSCensus > 0) {
2228     printFLCensus(sweep_count);
2229   }
2230   clearFLCensus();
2231   assert_locked();
2232   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2233 }
2234 
2235 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2236   if (size < SmallForDictionary) {
2237     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2238     return (fl->coal_desired() < 0) ||
2239            ((int)fl->count() > fl->coal_desired());
2240   } else {
2241     return dictionary()->coal_dict_over_populated(size);
2242   }
2243 }
2244 
2245 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2246   assert(size < SmallForDictionary, "Size too large for indexed list");
2247   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2248   fl->increment_coal_births();
2249   fl->increment_surplus();
2250 }
2251 
2252 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2253   assert(size < SmallForDictionary, "Size too large for indexed list");
2254   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2255   fl->increment_coal_deaths();
2256   fl->decrement_surplus();
2257 }
2258 
2259 void CompactibleFreeListSpace::coalBirth(size_t size) {
2260   if (size  < SmallForDictionary) {
2261     smallCoalBirth(size);
2262   } else {
2263     dictionary()->dict_census_update(size,
2264                                    false /* split */,
2265                                    true /* birth */);
2266   }
2267 }
2268 
2269 void CompactibleFreeListSpace::coalDeath(size_t size) {
2270   if(size  < SmallForDictionary) {
2271     smallCoalDeath(size);
2272   } else {
2273     dictionary()->dict_census_update(size,
2274                                    false /* split */,
2275                                    false /* birth */);
2276   }
2277 }
2278 
2279 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2280   assert(size < SmallForDictionary, "Size too large for indexed list");
2281   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2282   fl->increment_split_births();
2283   fl->increment_surplus();
2284 }
2285 
2286 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2287   assert(size < SmallForDictionary, "Size too large for indexed list");
2288   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2289   fl->increment_split_deaths();
2290   fl->decrement_surplus();
2291 }
2292 
2293 void CompactibleFreeListSpace::split_birth(size_t size) {
2294   if (size  < SmallForDictionary) {
2295     smallSplitBirth(size);
2296   } else {
2297     dictionary()->dict_census_update(size,
2298                                    true /* split */,
2299                                    true /* birth */);
2300   }
2301 }
2302 
2303 void CompactibleFreeListSpace::splitDeath(size_t size) {
2304   if (size  < SmallForDictionary) {
2305     smallSplitDeath(size);
2306   } else {
2307     dictionary()->dict_census_update(size,
2308                                    true /* split */,
2309                                    false /* birth */);
2310   }
2311 }
2312 
2313 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2314   size_t to2 = from - to1;
2315   splitDeath(from);
2316   split_birth(to1);
2317   split_birth(to2);
2318 }
2319 
2320 void CompactibleFreeListSpace::print() const {
2321   print_on(tty);
2322 }
2323 
2324 void CompactibleFreeListSpace::prepare_for_verify() {
2325   assert_locked();
2326   repairLinearAllocationBlocks();
2327   // Verify that the SpoolBlocks look like free blocks of
2328   // appropriate sizes... To be done ...
2329 }
2330 
2331 class VerifyAllBlksClosure: public BlkClosure {
2332  private:
2333   const CompactibleFreeListSpace* _sp;
2334   const MemRegion                 _span;
2335   HeapWord*                       _last_addr;
2336   size_t                          _last_size;
2337   bool                            _last_was_obj;
2338   bool                            _last_was_live;
2339 
2340  public:
2341   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2342     MemRegion span) :  _sp(sp), _span(span),
2343                        _last_addr(NULL), _last_size(0),
2344                        _last_was_obj(false), _last_was_live(false) { }
2345 
2346   virtual size_t do_blk(HeapWord* addr) {
2347     size_t res;
2348     bool   was_obj  = false;
2349     bool   was_live = false;
2350     if (_sp->block_is_obj(addr)) {
2351       was_obj = true;
2352       oop p = oop(addr);
2353       guarantee(p->is_oop(), "Should be an oop");
2354       res = _sp->adjustObjectSize(p->size());
2355       if (_sp->obj_is_alive(addr)) {
2356         was_live = true;
2357         p->verify();
2358       }
2359     } else {
2360       FreeChunk* fc = (FreeChunk*)addr;
2361       res = fc->size();
2362       if (FLSVerifyLists && !fc->cantCoalesce()) {
2363         guarantee(_sp->verify_chunk_in_free_list(fc),
2364                   "Chunk should be on a free list");
2365       }
2366     }
2367     if (res == 0) {
2368       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2369       gclog_or_tty->print_cr(
2370         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2371         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2372         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2373         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2374       _sp->print_on(gclog_or_tty);
2375       guarantee(false, "Seppuku!");
2376     }
2377     _last_addr = addr;
2378     _last_size = res;
2379     _last_was_obj  = was_obj;
2380     _last_was_live = was_live;
2381     return res;
2382   }
2383 };
2384 
2385 class VerifyAllOopsClosure: public OopClosure {
2386  private:
2387   const CMSCollector*             _collector;
2388   const CompactibleFreeListSpace* _sp;
2389   const MemRegion                 _span;
2390   const bool                      _past_remark;
2391   const CMSBitMap*                _bit_map;
2392 
2393  protected:
2394   void do_oop(void* p, oop obj) {
2395     if (_span.contains(obj)) { // the interior oop points into CMS heap
2396       if (!_span.contains(p)) { // reference from outside CMS heap
2397         // Should be a valid object; the first disjunct below allows
2398         // us to sidestep an assertion in block_is_obj() that insists
2399         // that p be in _sp. Note that several generations (and spaces)
2400         // are spanned by _span (CMS heap) above.
2401         guarantee(!_sp->is_in_reserved(obj) ||
2402                   _sp->block_is_obj((HeapWord*)obj),
2403                   "Should be an object");
2404         guarantee(obj->is_oop(), "Should be an oop");
2405         obj->verify();
2406         if (_past_remark) {
2407           // Remark has been completed, the object should be marked
2408           _bit_map->isMarked((HeapWord*)obj);
2409         }
2410       } else { // reference within CMS heap
2411         if (_past_remark) {
2412           // Remark has been completed -- so the referent should have
2413           // been marked, if referring object is.
2414           if (_bit_map->isMarked(_collector->block_start(p))) {
2415             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2416           }
2417         }
2418       }
2419     } else if (_sp->is_in_reserved(p)) {
2420       // the reference is from FLS, and points out of FLS
2421       guarantee(obj->is_oop(), "Should be an oop");
2422       obj->verify();
2423     }
2424   }
2425 
2426   template <class T> void do_oop_work(T* p) {
2427     T heap_oop = oopDesc::load_heap_oop(p);
2428     if (!oopDesc::is_null(heap_oop)) {
2429       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2430       do_oop(p, obj);
2431     }
2432   }
2433 
2434  public:
2435   VerifyAllOopsClosure(const CMSCollector* collector,
2436     const CompactibleFreeListSpace* sp, MemRegion span,
2437     bool past_remark, CMSBitMap* bit_map) :
2438     _collector(collector), _sp(sp), _span(span),
2439     _past_remark(past_remark), _bit_map(bit_map) { }
2440 
2441   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2442   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2443 };
2444 
2445 void CompactibleFreeListSpace::verify() const {
2446   assert_lock_strong(&_freelistLock);
2447   verify_objects_initialized();
2448   MemRegion span = _collector->_span;
2449   bool past_remark = (_collector->abstract_state() ==
2450                       CMSCollector::Sweeping);
2451 
2452   ResourceMark rm;
2453   HandleMark  hm;
2454 
2455   // Check integrity of CFL data structures
2456   _promoInfo.verify();
2457   _dictionary->verify();
2458   if (FLSVerifyIndexTable) {
2459     verifyIndexedFreeLists();
2460   }
2461   // Check integrity of all objects and free blocks in space
2462   {
2463     VerifyAllBlksClosure cl(this, span);
2464     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2465   }
2466   // Check that all references in the heap to FLS
2467   // are to valid objects in FLS or that references in
2468   // FLS are to valid objects elsewhere in the heap
2469   if (FLSVerifyAllHeapReferences)
2470   {
2471     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2472       _collector->markBitMap());
2473     CollectedHeap* ch = Universe::heap();
2474 
2475     // Iterate over all oops in the heap. Uses the _no_header version
2476     // since we are not interested in following the klass pointers.
2477     ch->oop_iterate_no_header(&cl);
2478   }
2479 
2480   if (VerifyObjectStartArray) {
2481     // Verify the block offset table
2482     _bt.verify();
2483   }
2484 }
2485 
2486 #ifndef PRODUCT
2487 void CompactibleFreeListSpace::verifyFreeLists() const {
2488   if (FLSVerifyLists) {
2489     _dictionary->verify();
2490     verifyIndexedFreeLists();
2491   } else {
2492     if (FLSVerifyDictionary) {
2493       _dictionary->verify();
2494     }
2495     if (FLSVerifyIndexTable) {
2496       verifyIndexedFreeLists();
2497     }
2498   }
2499 }
2500 #endif
2501 
2502 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2503   size_t i = 0;
2504   for (; i < IndexSetStart; i++) {
2505     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2506   }
2507   for (; i < IndexSetSize; i++) {
2508     verifyIndexedFreeList(i);
2509   }
2510 }
2511 
2512 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2513   FreeChunk* fc   =  _indexedFreeList[size].head();
2514   FreeChunk* tail =  _indexedFreeList[size].tail();
2515   size_t    num = _indexedFreeList[size].count();
2516   size_t      n = 0;
2517   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2518             "Slot should have been empty");
2519   for (; fc != NULL; fc = fc->next(), n++) {
2520     guarantee(fc->size() == size, "Size inconsistency");
2521     guarantee(fc->is_free(), "!free?");
2522     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2523     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2524   }
2525   guarantee(n == num, "Incorrect count");
2526 }
2527 
2528 #ifndef PRODUCT
2529 void CompactibleFreeListSpace::check_free_list_consistency() const {
2530   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size() <= IndexSetSize),
2531     "Some sizes can't be allocated without recourse to"
2532     " linear allocation buffers");
2533   assert((TreeChunk<FreeChunk, AdaptiveFreeList>::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList>)),
2534     "else MIN_TREE_CHUNK_SIZE is wrong");
2535   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2536   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2537 }
2538 #endif
2539 
2540 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2541   assert_lock_strong(&_freelistLock);
2542   AdaptiveFreeList<FreeChunk> total;
2543   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2544   AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2545   size_t total_free = 0;
2546   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2547     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2548     total_free += fl->count() * fl->size();
2549     if (i % (40*IndexSetStride) == 0) {
2550       AdaptiveFreeList<FreeChunk>::print_labels_on(gclog_or_tty, "size");
2551     }
2552     fl->print_on(gclog_or_tty);
2553     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2554     total.set_surplus(    total.surplus()     + fl->surplus()    );
2555     total.set_desired(    total.desired()     + fl->desired()    );
2556     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2557     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2558     total.set_count(      total.count()       + fl->count()      );
2559     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2560     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2561     total.set_split_births(total.split_births() + fl->split_births());
2562     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2563   }
2564   total.print_on(gclog_or_tty, "TOTAL");
2565   gclog_or_tty->print_cr("Total free in indexed lists "
2566                          SIZE_FORMAT " words", total_free);
2567   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2568     (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2569             (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2570     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2571   _dictionary->print_dict_census();
2572 }
2573 
2574 ///////////////////////////////////////////////////////////////////////////
2575 // CFLS_LAB
2576 ///////////////////////////////////////////////////////////////////////////
2577 
2578 #define VECTOR_257(x)                                                                                  \
2579   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2580   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2581      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2582      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2583      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2584      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2585      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2586      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2587      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2588      x }
2589 
2590 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2591 // OldPLABSize, whose static default is different; if overridden at the
2592 // command-line, this will get reinitialized via a call to
2593 // modify_initialization() below.
2594 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2595   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2596 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2597 uint   CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2598 
2599 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2600   _cfls(cfls)
2601 {
2602   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2603   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2604        i < CompactibleFreeListSpace::IndexSetSize;
2605        i += CompactibleFreeListSpace::IndexSetStride) {
2606     _indexedFreeList[i].set_size(i);
2607     _num_blocks[i] = 0;
2608   }
2609 }
2610 
2611 static bool _CFLS_LAB_modified = false;
2612 
2613 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2614   assert(!_CFLS_LAB_modified, "Call only once");
2615   _CFLS_LAB_modified = true;
2616   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2617        i < CompactibleFreeListSpace::IndexSetSize;
2618        i += CompactibleFreeListSpace::IndexSetStride) {
2619     _blocks_to_claim[i].modify(n, wt, true /* force */);
2620   }
2621 }
2622 
2623 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2624   FreeChunk* res;
2625   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2626   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2627     // This locking manages sync with other large object allocations.
2628     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2629                     Mutex::_no_safepoint_check_flag);
2630     res = _cfls->getChunkFromDictionaryExact(word_sz);
2631     if (res == NULL) return NULL;
2632   } else {
2633     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2634     if (fl->count() == 0) {
2635       // Attempt to refill this local free list.
2636       get_from_global_pool(word_sz, fl);
2637       // If it didn't work, give up.
2638       if (fl->count() == 0) return NULL;
2639     }
2640     res = fl->get_chunk_at_head();
2641     assert(res != NULL, "Why was count non-zero?");
2642   }
2643   res->markNotFree();
2644   assert(!res->is_free(), "shouldn't be marked free");
2645   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2646   // mangle a just allocated object with a distinct pattern.
2647   debug_only(res->mangleAllocated(word_sz));
2648   return (HeapWord*)res;
2649 }
2650 
2651 // Get a chunk of blocks of the right size and update related
2652 // book-keeping stats
2653 void CFLS_LAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2654   // Get the #blocks we want to claim
2655   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2656   assert(n_blks > 0, "Error");
2657   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2658   // In some cases, when the application has a phase change,
2659   // there may be a sudden and sharp shift in the object survival
2660   // profile, and updating the counts at the end of a scavenge
2661   // may not be quick enough, giving rise to large scavenge pauses
2662   // during these phase changes. It is beneficial to detect such
2663   // changes on-the-fly during a scavenge and avoid such a phase-change
2664   // pothole. The following code is a heuristic attempt to do that.
2665   // It is protected by a product flag until we have gained
2666   // enough experience with this heuristic and fine-tuned its behaviour.
2667   // WARNING: This might increase fragmentation if we overreact to
2668   // small spikes, so some kind of historical smoothing based on
2669   // previous experience with the greater reactivity might be useful.
2670   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2671   // default.
2672   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2673     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2674     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2675     n_blks = MIN2(n_blks, CMSOldPLABMax);
2676   }
2677   assert(n_blks > 0, "Error");
2678   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2679   // Update stats table entry for this block size
2680   _num_blocks[word_sz] += fl->count();
2681 }
2682 
2683 void CFLS_LAB::compute_desired_plab_size() {
2684   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2685        i < CompactibleFreeListSpace::IndexSetSize;
2686        i += CompactibleFreeListSpace::IndexSetStride) {
2687     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2688            "Counter inconsistency");
2689     if (_global_num_workers[i] > 0) {
2690       // Need to smooth wrt historical average
2691       if (ResizeOldPLAB) {
2692         _blocks_to_claim[i].sample(
2693           MAX2((size_t)CMSOldPLABMin,
2694           MIN2((size_t)CMSOldPLABMax,
2695                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2696       }
2697       // Reset counters for next round
2698       _global_num_workers[i] = 0;
2699       _global_num_blocks[i] = 0;
2700       if (PrintOldPLAB) {
2701         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2702       }
2703     }
2704   }
2705 }
2706 
2707 // If this is changed in the future to allow parallel
2708 // access, one would need to take the FL locks and,
2709 // depending on how it is used, stagger access from
2710 // parallel threads to reduce contention.
2711 void CFLS_LAB::retire(int tid) {
2712   // We run this single threaded with the world stopped;
2713   // so no need for locks and such.
2714   NOT_PRODUCT(Thread* t = Thread::current();)
2715   assert(Thread::current()->is_VM_thread(), "Error");
2716   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2717        i < CompactibleFreeListSpace::IndexSetSize;
2718        i += CompactibleFreeListSpace::IndexSetStride) {
2719     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2720            "Can't retire more than what we obtained");
2721     if (_num_blocks[i] > 0) {
2722       size_t num_retire =  _indexedFreeList[i].count();
2723       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2724       {
2725         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2726         //                Mutex::_no_safepoint_check_flag);
2727 
2728         // Update globals stats for num_blocks used
2729         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2730         _global_num_workers[i]++;
2731         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2732         if (num_retire > 0) {
2733           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2734           // Reset this list.
2735           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2736           _indexedFreeList[i].set_size(i);
2737         }
2738       }
2739       if (PrintOldPLAB) {
2740         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2741                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2742       }
2743       // Reset stats for next round
2744       _num_blocks[i]         = 0;
2745     }
2746   }
2747 }
2748 
2749 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2750   assert(fl->count() == 0, "Precondition.");
2751   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2752          "Precondition");
2753 
2754   // We'll try all multiples of word_sz in the indexed set, starting with
2755   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2756   // then try getting a big chunk and splitting it.
2757   {
2758     bool found;
2759     int  k;
2760     size_t cur_sz;
2761     for (k = 1, cur_sz = k * word_sz, found = false;
2762          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2763          (CMSSplitIndexedFreeListBlocks || k <= 1);
2764          k++, cur_sz = k * word_sz) {
2765       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2766       fl_for_cur_sz.set_size(cur_sz);
2767       {
2768         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2769                         Mutex::_no_safepoint_check_flag);
2770         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2771         if (gfl->count() != 0) {
2772           // nn is the number of chunks of size cur_sz that
2773           // we'd need to split k-ways each, in order to create
2774           // "n" chunks of size word_sz each.
2775           const size_t nn = MAX2(n/k, (size_t)1);
2776           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2777           found = true;
2778           if (k > 1) {
2779             // Update split death stats for the cur_sz-size blocks list:
2780             // we increment the split death count by the number of blocks
2781             // we just took from the cur_sz-size blocks list and which
2782             // we will be splitting below.
2783             ssize_t deaths = gfl->split_deaths() +
2784                              fl_for_cur_sz.count();
2785             gfl->set_split_deaths(deaths);
2786           }
2787         }
2788       }
2789       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2790       if (found) {
2791         if (k == 1) {
2792           fl->prepend(&fl_for_cur_sz);
2793         } else {
2794           // Divide each block on fl_for_cur_sz up k ways.
2795           FreeChunk* fc;
2796           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2797             // Must do this in reverse order, so that anybody attempting to
2798             // access the main chunk sees it as a single free block until we
2799             // change it.
2800             size_t fc_size = fc->size();
2801             assert(fc->is_free(), "Error");
2802             for (int i = k-1; i >= 0; i--) {
2803               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2804               assert((i != 0) ||
2805                         ((fc == ffc) && ffc->is_free() &&
2806                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2807                         "Counting error");
2808               ffc->set_size(word_sz);
2809               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2810               ffc->link_next(NULL);
2811               // Above must occur before BOT is updated below.
2812               OrderAccess::storestore();
2813               // splitting from the right, fc_size == i * word_sz
2814               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2815               fc_size -= word_sz;
2816               assert(fc_size == i*word_sz, "Error");
2817               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2818               _bt.verify_single_block((HeapWord*)fc, fc_size);
2819               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2820               // Push this on "fl".
2821               //
2822               // Note: deallocate_pages=false (3rd arg). No need to
2823               // deallocate pages for the free chunk when it's returned
2824               // back to the free lists because the original free chunk
2825               // comes from the free lists and pages are already
2826               // deallocated. Since this function is called frequently
2827               // for each promotion of an object, this helps in speed by
2828               // avoiding unnecessary system calls
2829               // (os::deallocate_pages()). This applies to the other
2830               // returnChunk... calls in this function as well.
2831               fl->return_chunk_at_head(ffc, true, false);
2832             }
2833             // TRAP
2834             assert(fl->tail()->next() == NULL, "List invariant.");
2835           }
2836         }
2837         // Update birth stats for this block size.
2838         size_t num = fl->count();
2839         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2840                         Mutex::_no_safepoint_check_flag);
2841         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2842         _indexedFreeList[word_sz].set_split_births(births);
2843         return;
2844       }
2845     }
2846   }
2847   // Otherwise, we'll split a block from the dictionary.
2848   FreeChunk* fc = NULL;
2849   FreeChunk* rem_fc = NULL;
2850   size_t rem;
2851   {
2852     MutexLockerEx x(parDictionaryAllocLock(),
2853                     Mutex::_no_safepoint_check_flag);
2854     while (n > 0) {
2855       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2856                                   FreeBlockDictionary<FreeChunk>::atLeast);
2857       if (fc != NULL) {
2858         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2859         dictionary()->dict_census_update(fc->size(),
2860                                        true /*split*/,
2861                                        false /*birth*/);
2862         break;
2863       } else {
2864         n--;
2865       }
2866     }
2867     if (fc == NULL) return;
2868     // Otherwise, split up that block.
2869     assert((ssize_t)n >= 1, "Control point invariant");
2870     assert(fc->is_free(), "Error: should be a free block");
2871     _bt.verify_single_block((HeapWord*)fc, fc->size());
2872     const size_t nn = fc->size() / word_sz;
2873     n = MIN2(nn, n);
2874     assert((ssize_t)n >= 1, "Control point invariant");
2875     rem = fc->size() - n * word_sz;
2876     // If there is a remainder, and it's too small, allocate one fewer.
2877     if (rem > 0 && rem < MinChunkSize) {
2878       n--; rem += word_sz;
2879     }
2880     // Note that at this point we may have n == 0.
2881     assert((ssize_t)n >= 0, "Control point invariant");
2882 
2883     // If n is 0, the chunk fc that was found is not large
2884     // enough to leave a viable remainder.  We are unable to
2885     // allocate even one block.  Return fc to the
2886     // dictionary and return, leaving "fl" empty.
2887     if (n == 0) {
2888       returnChunkToDictionary(fc, false);
2889       assert(fl->count() == 0, "We never allocated any blocks");
2890       return;
2891     }
2892 
2893     // First return the remainder, if any.
2894     // Note that we hold the lock until we decide if we're going to give
2895     // back the remainder to the dictionary, since a concurrent allocation
2896     // may otherwise see the heap as empty.  (We're willing to take that
2897     // hit if the block is a small block.)
2898     if (rem > 0) {
2899       size_t prefix_size = n * word_sz;
2900       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2901       rem_fc->set_size(rem);
2902       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2903       rem_fc->link_next(NULL);
2904       // Above must occur before BOT is updated below.
2905       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2906       OrderAccess::storestore();
2907       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2908       assert(fc->is_free(), "Error");
2909       fc->set_size(prefix_size);
2910       if (rem >= IndexSetSize) {
2911         returnChunkToDictionary(rem_fc, false);
2912         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2913         rem_fc = NULL;
2914       }
2915       // Otherwise, return it to the small list below.
2916     }
2917   }
2918   if (rem_fc != NULL) {
2919     MutexLockerEx x(_indexedFreeListParLocks[rem],
2920                     Mutex::_no_safepoint_check_flag);
2921     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2922     _indexedFreeList[rem].return_chunk_at_head(rem_fc, true, false);
2923     smallSplitBirth(rem);
2924   }
2925   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2926   // Now do the splitting up.
2927   // Must do this in reverse order, so that anybody attempting to
2928   // access the main chunk sees it as a single free block until we
2929   // change it.
2930   size_t fc_size = n * word_sz;
2931   // All but first chunk in this loop
2932   for (ssize_t i = n-1; i > 0; i--) {
2933     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2934     ffc->set_size(word_sz);
2935     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2936     ffc->link_next(NULL);
2937     // Above must occur before BOT is updated below.
2938     OrderAccess::storestore();
2939     // splitting from the right, fc_size == (n - i + 1) * wordsize
2940     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2941     fc_size -= word_sz;
2942     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2943     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2944     _bt.verify_single_block((HeapWord*)fc, fc_size);
2945     // Push this on "fl".
2946     fl->return_chunk_at_head(ffc, true, false);
2947   }
2948   // First chunk
2949   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2950   // The blocks above should show their new sizes before the first block below
2951   fc->set_size(word_sz);
2952   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2953   fc->link_next(NULL);
2954   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2955   _bt.verify_single_block((HeapWord*)fc, fc->size());
2956   fl->return_chunk_at_head(fc, true, false);
2957 
2958   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2959   {
2960     // Update the stats for this block size.
2961     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2962                     Mutex::_no_safepoint_check_flag);
2963     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2964     _indexedFreeList[word_sz].set_split_births(births);
2965     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2966     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2967   }
2968 
2969   // TRAP
2970   assert(fl->tail()->next() == NULL, "List invariant.");
2971 }
2972 
2973 // Set up the space's par_seq_tasks structure for work claiming
2974 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2975 // XXX Need to suitably abstract and generalize this and the next
2976 // method into one.
2977 void
2978 CompactibleFreeListSpace::
2979 initialize_sequential_subtasks_for_rescan(int n_threads) {
2980   // The "size" of each task is fixed according to rescan_task_size.
2981   assert(n_threads > 0, "Unexpected n_threads argument");
2982   const size_t task_size = rescan_task_size();
2983   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2984   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2985   assert(n_tasks == 0 ||
2986          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2987           (used_region().start() + n_tasks*task_size >= used_region().end())),
2988          "n_tasks calculation incorrect");
2989   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2990   assert(!pst->valid(), "Clobbering existing data?");
2991   // Sets the condition for completion of the subtask (how many threads
2992   // need to finish in order to be done).
2993   pst->set_n_threads(n_threads);
2994   pst->set_n_tasks((int)n_tasks);
2995 }
2996 
2997 // Set up the space's par_seq_tasks structure for work claiming
2998 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2999 void
3000 CompactibleFreeListSpace::
3001 initialize_sequential_subtasks_for_marking(int n_threads,
3002                                            HeapWord* low) {
3003   // The "size" of each task is fixed according to rescan_task_size.
3004   assert(n_threads > 0, "Unexpected n_threads argument");
3005   const size_t task_size = marking_task_size();
3006   assert(task_size > CardTableModRefBS::card_size_in_words &&
3007          (task_size %  CardTableModRefBS::card_size_in_words == 0),
3008          "Otherwise arithmetic below would be incorrect");
3009   MemRegion span = _gen->reserved();
3010   if (low != NULL) {
3011     if (span.contains(low)) {
3012       // Align low down to  a card boundary so that
3013       // we can use block_offset_careful() on span boundaries.
3014       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
3015                                  CardTableModRefBS::card_size);
3016       // Clip span prefix at aligned_low
3017       span = span.intersection(MemRegion(aligned_low, span.end()));
3018     } else if (low > span.end()) {
3019       span = MemRegion(low, low);  // Null region
3020     } // else use entire span
3021   }
3022   assert(span.is_empty() ||
3023          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
3024         "span should start at a card boundary");
3025   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
3026   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
3027   assert(n_tasks == 0 ||
3028          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
3029           (span.start() + n_tasks*task_size >= span.end())),
3030          "n_tasks calculation incorrect");
3031   SequentialSubTasksDone* pst = conc_par_seq_tasks();
3032   assert(!pst->valid(), "Clobbering existing data?");
3033   // Sets the condition for completion of the subtask (how many threads
3034   // need to finish in order to be done).
3035   pst->set_n_threads(n_threads);
3036   pst->set_n_tasks((int)n_tasks);
3037 }