1 /*
   2  * Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsLockVerifier.hpp"
  27 #include "gc/cms/compactibleFreeListSpace.hpp"
  28 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  29 #include "gc/cms/concurrentMarkSweepThread.hpp"
  30 #include "gc/shared/blockOffsetTable.inline.hpp"
  31 #include "gc/shared/collectedHeap.inline.hpp"
  32 #include "gc/shared/genCollectedHeap.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/logStream.inline.hpp"
  36 #include "memory/allocation.inline.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.inline.hpp"
  39 #include "oops/oop.inline.hpp"
  40 #include "runtime/globals.hpp"
  41 #include "runtime/handles.inline.hpp"
  42 #include "runtime/init.hpp"
  43 #include "runtime/java.hpp"
  44 #include "runtime/orderAccess.inline.hpp"
  45 #include "runtime/vmThread.hpp"
  46 #include "utilities/copy.hpp"
  47 
  48 /////////////////////////////////////////////////////////////////////////
  49 //// CompactibleFreeListSpace
  50 /////////////////////////////////////////////////////////////////////////
  51 
  52 // highest ranked  free list lock rank
  53 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  54 
  55 // Defaults are 0 so things will break badly if incorrectly initialized.
  56 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  57 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  58 
  59 size_t MinChunkSize = 0;
  60 
  61 void CompactibleFreeListSpace::set_cms_values() {
  62   // Set CMS global values
  63   assert(MinChunkSize == 0, "already set");
  64 
  65   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  66   // for chunks to contain a FreeChunk.
  67   size_t min_chunk_size_in_bytes = align_size_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  68   MinChunkSize = min_chunk_size_in_bytes / BytesPerWord;
  69 
  70   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  71   IndexSetStart  = MinChunkSize;
  72   IndexSetStride = MinObjAlignment;
  73 }
  74 
  75 // Constructor
  76 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  77   _bt(bs, mr),
  78   // free list locks are in the range of values taken by _lockRank
  79   // This range currently is [_leaf+2, _leaf+3]
  80   // Note: this requires that CFLspace c'tors
  81   // are called serially in the order in which the locks are
  82   // are acquired in the program text. This is true today.
  83   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  84                 Monitor::_safepoint_check_sometimes),
  85   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  86                           "CompactibleFreeListSpace._dict_par_lock", true,
  87                           Monitor::_safepoint_check_never),
  88   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
  89                     CMSRescanMultiple),
  90   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
  91                     CMSConcMarkMultiple),
  92   _collector(NULL),
  93   _preconsumptionDirtyCardClosure(NULL)
  94 {
  95   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
  96          "FreeChunk is larger than expected");
  97   _bt.set_space(this);
  98   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  99 
 100   _dictionary = new AFLBinaryTreeDictionary(mr);
 101 
 102   assert(_dictionary != NULL, "CMS dictionary initialization");
 103   // The indexed free lists are initially all empty and are lazily
 104   // filled in on demand. Initialize the array elements to NULL.
 105   initializeIndexedFreeListArray();
 106 
 107   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 108                              SmallForLinearAlloc);
 109 
 110   // CMSIndexedFreeListReplenish should be at least 1
 111   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 112   _promoInfo.setSpace(this);
 113   if (UseCMSBestFit) {
 114     _fitStrategy = FreeBlockBestFitFirst;
 115   } else {
 116     _fitStrategy = FreeBlockStrategyNone;
 117   }
 118   check_free_list_consistency();
 119 
 120   // Initialize locks for parallel case.
 121   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 122     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 123                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 124     DEBUG_ONLY(
 125       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 126     )
 127   }
 128   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->young_gen();
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   compact_top += adjusted_size;
 184 
 185   // we need to update the offset table so that the beginnings of objects can be
 186   // found during scavenge.  Note that we are updating the offset table based on
 187   // where the object will be once the compaction phase finishes.
 188 
 189   // Always call cross_threshold().  A contiguous space can only call it when
 190   // the compaction_top exceeds the current threshold but not for an
 191   // non-contiguous space.
 192   cp->threshold =
 193     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 194   return compact_top;
 195 }
 196 
 197 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 198 // and use of single_block instead of alloc_block.  The name here is not really
 199 // appropriate - maybe a more general name could be invented for both the
 200 // contiguous and noncontiguous spaces.
 201 
 202 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 203   _bt.single_block(start, the_end);
 204   return end();
 205 }
 206 
 207 // Initialize them to NULL.
 208 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 209   for (size_t i = 0; i < IndexSetSize; i++) {
 210     // Note that on platforms where objects are double word aligned,
 211     // the odd array elements are not used.  It is convenient, however,
 212     // to map directly from the object size to the array element.
 213     _indexedFreeList[i].reset(IndexSetSize);
 214     _indexedFreeList[i].set_size(i);
 215     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 216     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 217     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 219   }
 220 }
 221 
 222 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 223   return adjustObjectSize(oop(addr)->size());
 224 }
 225 
 226 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 227   for (size_t i = 1; i < IndexSetSize; i++) {
 228     assert(_indexedFreeList[i].size() == (size_t) i,
 229       "Indexed free list sizes are incorrect");
 230     _indexedFreeList[i].reset(IndexSetSize);
 231     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 232     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 233     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 234     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 235   }
 236 }
 237 
 238 void CompactibleFreeListSpace::reset(MemRegion mr) {
 239   resetIndexedFreeListArray();
 240   dictionary()->reset();
 241   if (BlockOffsetArrayUseUnallocatedBlock) {
 242     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 243     // Everything's allocated until proven otherwise.
 244     _bt.set_unallocated_block(end());
 245   }
 246   if (!mr.is_empty()) {
 247     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 248     _bt.single_block(mr.start(), mr.word_size());
 249     FreeChunk* fc = (FreeChunk*) mr.start();
 250     fc->set_size(mr.word_size());
 251     if (mr.word_size() >= IndexSetSize ) {
 252       returnChunkToDictionary(fc);
 253     } else {
 254       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 255       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 256     }
 257     coalBirth(mr.word_size());
 258   }
 259   _promoInfo.reset();
 260   _smallLinearAllocBlock._ptr = NULL;
 261   _smallLinearAllocBlock._word_size = 0;
 262 }
 263 
 264 void CompactibleFreeListSpace::reset_after_compaction() {
 265   // Reset the space to the new reality - one free chunk.
 266   MemRegion mr(compaction_top(), end());
 267   reset(mr);
 268   // Now refill the linear allocation block(s) if possible.
 269   refillLinearAllocBlocksIfNeeded();
 270 }
 271 
 272 // Walks the entire dictionary, returning a coterminal
 273 // chunk, if it exists. Use with caution since it involves
 274 // a potentially complete walk of a potentially large tree.
 275 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 276 
 277   assert_lock_strong(&_freelistLock);
 278 
 279   return dictionary()->find_chunk_ends_at(end());
 280 }
 281 
 282 
 283 #ifndef PRODUCT
 284 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 285   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 286     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 287   }
 288 }
 289 
 290 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 291   size_t sum = 0;
 292   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 293     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 294   }
 295   return sum;
 296 }
 297 
 298 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 299   size_t count = 0;
 300   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 301     debug_only(
 302       ssize_t total_list_count = 0;
 303       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 304          fc = fc->next()) {
 305         total_list_count++;
 306       }
 307       assert(total_list_count ==  _indexedFreeList[i].count(),
 308         "Count in list is incorrect");
 309     )
 310     count += _indexedFreeList[i].count();
 311   }
 312   return count;
 313 }
 314 
 315 size_t CompactibleFreeListSpace::totalCount() {
 316   size_t num = totalCountInIndexedFreeLists();
 317   num +=  dictionary()->total_count();
 318   if (_smallLinearAllocBlock._word_size != 0) {
 319     num++;
 320   }
 321   return num;
 322 }
 323 #endif
 324 
 325 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 326   FreeChunk* fc = (FreeChunk*) p;
 327   return fc->is_free();
 328 }
 329 
 330 size_t CompactibleFreeListSpace::used() const {
 331   return capacity() - free();
 332 }
 333 
 334 size_t CompactibleFreeListSpace::free() const {
 335   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 336   // if you do this while the structures are in flux you
 337   // may get an approximate answer only; for instance
 338   // because there is concurrent allocation either
 339   // directly by mutators or for promotion during a GC.
 340   // It's "MT-safe", however, in the sense that you are guaranteed
 341   // not to crash and burn, for instance, because of walking
 342   // pointers that could disappear as you were walking them.
 343   // The approximation is because the various components
 344   // that are read below are not read atomically (and
 345   // further the computation of totalSizeInIndexedFreeLists()
 346   // is itself a non-atomic computation. The normal use of
 347   // this is during a resize operation at the end of GC
 348   // and at that time you are guaranteed to get the
 349   // correct actual value. However, for instance, this is
 350   // also read completely asynchronously by the "perf-sampler"
 351   // that supports jvmstat, and you are apt to see the values
 352   // flicker in such cases.
 353   assert(_dictionary != NULL, "No _dictionary?");
 354   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 355           totalSizeInIndexedFreeLists() +
 356           _smallLinearAllocBlock._word_size) * HeapWordSize;
 357 }
 358 
 359 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 360   assert(_dictionary != NULL, "No _dictionary?");
 361   assert_locked();
 362   size_t res = _dictionary->max_chunk_size();
 363   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 364                        (size_t) SmallForLinearAlloc - 1));
 365   // XXX the following could potentially be pretty slow;
 366   // should one, pessimistically for the rare cases when res
 367   // calculated above is less than IndexSetSize,
 368   // just return res calculated above? My reasoning was that
 369   // those cases will be so rare that the extra time spent doesn't
 370   // really matter....
 371   // Note: do not change the loop test i >= res + IndexSetStride
 372   // to i > res below, because i is unsigned and res may be zero.
 373   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 374        i -= IndexSetStride) {
 375     if (_indexedFreeList[i].head() != NULL) {
 376       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 377       return i;
 378     }
 379   }
 380   return res;
 381 }
 382 
 383 void LinearAllocBlock::print_on(outputStream* st) const {
 384   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 385             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 386             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 387 }
 388 
 389 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 390   st->print_cr("COMPACTIBLE FREELIST SPACE");
 391   st->print_cr(" Space:");
 392   Space::print_on(st);
 393 
 394   st->print_cr("promoInfo:");
 395   _promoInfo.print_on(st);
 396 
 397   st->print_cr("_smallLinearAllocBlock");
 398   _smallLinearAllocBlock.print_on(st);
 399 
 400   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 401 
 402   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 403 }
 404 
 405 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 406 const {
 407   reportIndexedFreeListStatistics(st);
 408   st->print_cr("Layout of Indexed Freelists");
 409   st->print_cr("---------------------------");
 410   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 411   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 412     _indexedFreeList[i].print_on(st);
 413     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 414       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 415                    p2i(fc), p2i((HeapWord*)fc + i),
 416                    fc->cantCoalesce() ? "\t CC" : "");
 417     }
 418   }
 419 }
 420 
 421 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 422 const {
 423   _promoInfo.print_on(st);
 424 }
 425 
 426 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 427 const {
 428   _dictionary->report_statistics(st);
 429   st->print_cr("Layout of Freelists in Tree");
 430   st->print_cr("---------------------------");
 431   _dictionary->print_free_lists(st);
 432 }
 433 
 434 class BlkPrintingClosure: public BlkClosure {
 435   const CMSCollector*             _collector;
 436   const CompactibleFreeListSpace* _sp;
 437   const CMSBitMap*                _live_bit_map;
 438   const bool                      _post_remark;
 439   outputStream*                   _st;
 440 public:
 441   BlkPrintingClosure(const CMSCollector* collector,
 442                      const CompactibleFreeListSpace* sp,
 443                      const CMSBitMap* live_bit_map,
 444                      outputStream* st):
 445     _collector(collector),
 446     _sp(sp),
 447     _live_bit_map(live_bit_map),
 448     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 449     _st(st) { }
 450   size_t do_blk(HeapWord* addr);
 451 };
 452 
 453 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 454   size_t sz = _sp->block_size_no_stall(addr, _collector);
 455   assert(sz != 0, "Should always be able to compute a size");
 456   if (_sp->block_is_obj(addr)) {
 457     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 458     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 459       p2i(addr),
 460       dead ? "dead" : "live",
 461       sz,
 462       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 463     if (CMSPrintObjectsInDump && !dead) {
 464       oop(addr)->print_on(_st);
 465       _st->print_cr("--------------------------------------");
 466     }
 467   } else { // free block
 468     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 469       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 470     if (CMSPrintChunksInDump) {
 471       ((FreeChunk*)addr)->print_on(_st);
 472       _st->print_cr("--------------------------------------");
 473     }
 474   }
 475   return sz;
 476 }
 477 
 478 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 479   st->print_cr("=========================");
 480   st->print_cr("Block layout in CMS Heap:");
 481   st->print_cr("=========================");
 482   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 483   blk_iterate(&bpcl);
 484 
 485   st->print_cr("=======================================");
 486   st->print_cr("Order & Layout of Promotion Info Blocks");
 487   st->print_cr("=======================================");
 488   print_promo_info_blocks(st);
 489 
 490   st->print_cr("===========================");
 491   st->print_cr("Order of Indexed Free Lists");
 492   st->print_cr("=========================");
 493   print_indexed_free_lists(st);
 494 
 495   st->print_cr("=================================");
 496   st->print_cr("Order of Free Lists in Dictionary");
 497   st->print_cr("=================================");
 498   print_dictionary_free_lists(st);
 499 }
 500 
 501 
 502 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 503   assert_lock_strong(&_freelistLock);
 504   Log(gc, freelist, stats) log;
 505   if (!log.is_debug()) {
 506     return;
 507   }
 508   log.debug("%s", title);
 509 
 510   LogStream out(log.debug());
 511   _dictionary->report_statistics(&out);
 512 
 513   if (log.is_trace()) {
 514     LogStream trace_out(log.trace());
 515     reportIndexedFreeListStatistics(&trace_out);
 516     size_t total_size = totalSizeInIndexedFreeLists() +
 517                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 518     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 519   }
 520 }
 521 
 522 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 523   assert_lock_strong(&_freelistLock);
 524   st->print_cr("Statistics for IndexedFreeLists:");
 525   st->print_cr("--------------------------------");
 526   size_t total_size = totalSizeInIndexedFreeLists();
 527   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 528   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 529   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 530   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 531   if (free_blocks != 0) {
 532     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 533   }
 534 }
 535 
 536 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 537   size_t res = 0;
 538   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 539     debug_only(
 540       ssize_t recount = 0;
 541       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 542          fc = fc->next()) {
 543         recount += 1;
 544       }
 545       assert(recount == _indexedFreeList[i].count(),
 546         "Incorrect count in list");
 547     )
 548     res += _indexedFreeList[i].count();
 549   }
 550   return res;
 551 }
 552 
 553 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 554   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 555     if (_indexedFreeList[i].head() != NULL) {
 556       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 557       return (size_t)i;
 558     }
 559   }
 560   return 0;
 561 }
 562 
 563 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 564   HeapWord* prevEnd = end();
 565   assert(prevEnd != value, "unnecessary set_end call");
 566   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 567         "New end is below unallocated block");
 568   _end = value;
 569   if (prevEnd != NULL) {
 570     // Resize the underlying block offset table.
 571     _bt.resize(pointer_delta(value, bottom()));
 572     if (value <= prevEnd) {
 573       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 574              "New end is below unallocated block");
 575     } else {
 576       // Now, take this new chunk and add it to the free blocks.
 577       // Note that the BOT has not yet been updated for this block.
 578       size_t newFcSize = pointer_delta(value, prevEnd);
 579       // Add the block to the free lists, if possible coalescing it
 580       // with the last free block, and update the BOT and census data.
 581       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 582     }
 583   }
 584 }
 585 
 586 class FreeListSpaceDCTOC : public FilteringDCTOC {
 587   CompactibleFreeListSpace* _cfls;
 588   CMSCollector* _collector;
 589   bool _parallel;
 590 protected:
 591   // Override.
 592 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 593   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 594                                        HeapWord* bottom, HeapWord* top, \
 595                                        ClosureType* cl);                \
 596       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 597                                        HeapWord* bottom, HeapWord* top, \
 598                                        ClosureType* cl);                \
 599     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 600                                        HeapWord* bottom, HeapWord* top, \
 601                                        ClosureType* cl)
 602   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 603   walk_mem_region_with_cl_DECL(FilteringClosure);
 604 
 605 public:
 606   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 607                      CMSCollector* collector,
 608                      ExtendedOopClosure* cl,
 609                      CardTable::PrecisionStyle precision,
 610                      HeapWord* boundary,
 611                      bool parallel) :
 612     FilteringDCTOC(sp, cl, precision, boundary),
 613     _cfls(sp), _collector(collector), _parallel(parallel) {}
 614 };
 615 
 616 // We de-virtualize the block-related calls below, since we know that our
 617 // space is a CompactibleFreeListSpace.
 618 
 619 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 620 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 621                                                  HeapWord* bottom,              \
 622                                                  HeapWord* top,                 \
 623                                                  ClosureType* cl) {             \
 624    if (_parallel) {                                                             \
 625      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 626    } else {                                                                     \
 627      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 628    }                                                                            \
 629 }                                                                               \
 630 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 631                                                      HeapWord* bottom,          \
 632                                                      HeapWord* top,             \
 633                                                      ClosureType* cl) {         \
 634   /* Skip parts that are before "mr", in case "block_start" sent us             \
 635      back too far. */                                                           \
 636   HeapWord* mr_start = mr.start();                                              \
 637   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 638   HeapWord* next = bottom + bot_size;                                           \
 639   while (next < mr_start) {                                                     \
 640     bottom = next;                                                              \
 641     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 642     next = bottom + bot_size;                                                   \
 643   }                                                                             \
 644                                                                                 \
 645   while (bottom < top) {                                                        \
 646     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 647         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 648                     oop(bottom)) &&                                             \
 649         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 650       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 651       bottom += _cfls->adjustObjectSize(word_sz);                               \
 652     } else {                                                                    \
 653       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 654     }                                                                           \
 655   }                                                                             \
 656 }                                                                               \
 657 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 658                                                        HeapWord* bottom,        \
 659                                                        HeapWord* top,           \
 660                                                        ClosureType* cl) {       \
 661   /* Skip parts that are before "mr", in case "block_start" sent us             \
 662      back too far. */                                                           \
 663   HeapWord* mr_start = mr.start();                                              \
 664   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 665   HeapWord* next = bottom + bot_size;                                           \
 666   while (next < mr_start) {                                                     \
 667     bottom = next;                                                              \
 668     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 669     next = bottom + bot_size;                                                   \
 670   }                                                                             \
 671                                                                                 \
 672   while (bottom < top) {                                                        \
 673     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 674         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 675                     oop(bottom)) &&                                             \
 676         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 677       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 678       bottom += _cfls->adjustObjectSize(word_sz);                               \
 679     } else {                                                                    \
 680       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 681     }                                                                           \
 682   }                                                                             \
 683 }
 684 
 685 // (There are only two of these, rather than N, because the split is due
 686 // only to the introduction of the FilteringClosure, a local part of the
 687 // impl of this abstraction.)
 688 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 689 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 690 
 691 DirtyCardToOopClosure*
 692 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 693                                       CardTable::PrecisionStyle precision,
 694                                       HeapWord* boundary,
 695                                       bool parallel) {
 696   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 697 }
 698 
 699 
 700 // Note on locking for the space iteration functions:
 701 // since the collector's iteration activities are concurrent with
 702 // allocation activities by mutators, absent a suitable mutual exclusion
 703 // mechanism the iterators may go awry. For instance a block being iterated
 704 // may suddenly be allocated or divided up and part of it allocated and
 705 // so on.
 706 
 707 // Apply the given closure to each block in the space.
 708 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 709   assert_lock_strong(freelistLock());
 710   HeapWord *cur, *limit;
 711   for (cur = bottom(), limit = end(); cur < limit;
 712        cur += cl->do_blk_careful(cur));
 713 }
 714 
 715 // Apply the given closure to each block in the space.
 716 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 717   assert_lock_strong(freelistLock());
 718   HeapWord *cur, *limit;
 719   for (cur = bottom(), limit = end(); cur < limit;
 720        cur += cl->do_blk(cur));
 721 }
 722 
 723 // Apply the given closure to each oop in the space.
 724 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 725   assert_lock_strong(freelistLock());
 726   HeapWord *cur, *limit;
 727   size_t curSize;
 728   for (cur = bottom(), limit = end(); cur < limit;
 729        cur += curSize) {
 730     curSize = block_size(cur);
 731     if (block_is_obj(cur)) {
 732       oop(cur)->oop_iterate(cl);
 733     }
 734   }
 735 }
 736 
 737 // NOTE: In the following methods, in order to safely be able to
 738 // apply the closure to an object, we need to be sure that the
 739 // object has been initialized. We are guaranteed that an object
 740 // is initialized if we are holding the Heap_lock with the
 741 // world stopped.
 742 void CompactibleFreeListSpace::verify_objects_initialized() const {
 743   if (is_init_completed()) {
 744     assert_locked_or_safepoint(Heap_lock);
 745     if (Universe::is_fully_initialized()) {
 746       guarantee(SafepointSynchronize::is_at_safepoint(),
 747                 "Required for objects to be initialized");
 748     }
 749   } // else make a concession at vm start-up
 750 }
 751 
 752 // Apply the given closure to each object in the space
 753 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 754   assert_lock_strong(freelistLock());
 755   NOT_PRODUCT(verify_objects_initialized());
 756   HeapWord *cur, *limit;
 757   size_t curSize;
 758   for (cur = bottom(), limit = end(); cur < limit;
 759        cur += curSize) {
 760     curSize = block_size(cur);
 761     if (block_is_obj(cur)) {
 762       blk->do_object(oop(cur));
 763     }
 764   }
 765 }
 766 
 767 // Apply the given closure to each live object in the space
 768 //   The usage of CompactibleFreeListSpace
 769 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 770 // objects in the space with references to objects that are no longer
 771 // valid.  For example, an object may reference another object
 772 // that has already been sweep up (collected).  This method uses
 773 // obj_is_alive() to determine whether it is safe to apply the closure to
 774 // an object.  See obj_is_alive() for details on how liveness of an
 775 // object is decided.
 776 
 777 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 778   assert_lock_strong(freelistLock());
 779   NOT_PRODUCT(verify_objects_initialized());
 780   HeapWord *cur, *limit;
 781   size_t curSize;
 782   for (cur = bottom(), limit = end(); cur < limit;
 783        cur += curSize) {
 784     curSize = block_size(cur);
 785     if (block_is_obj(cur) && obj_is_alive(cur)) {
 786       blk->do_object(oop(cur));
 787     }
 788   }
 789 }
 790 
 791 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 792                                                   UpwardsObjectClosure* cl) {
 793   assert_locked(freelistLock());
 794   NOT_PRODUCT(verify_objects_initialized());
 795   assert(!mr.is_empty(), "Should be non-empty");
 796   // We use MemRegion(bottom(), end()) rather than used_region() below
 797   // because the two are not necessarily equal for some kinds of
 798   // spaces, in particular, certain kinds of free list spaces.
 799   // We could use the more complicated but more precise:
 800   // MemRegion(used_region().start(), round_to(used_region().end(), CardSize))
 801   // but the slight imprecision seems acceptable in the assertion check.
 802   assert(MemRegion(bottom(), end()).contains(mr),
 803          "Should be within used space");
 804   HeapWord* prev = cl->previous();   // max address from last time
 805   if (prev >= mr.end()) { // nothing to do
 806     return;
 807   }
 808   // This assert will not work when we go from cms space to perm
 809   // space, and use same closure. Easy fix deferred for later. XXX YSR
 810   // assert(prev == NULL || contains(prev), "Should be within space");
 811 
 812   bool last_was_obj_array = false;
 813   HeapWord *blk_start_addr, *region_start_addr;
 814   if (prev > mr.start()) {
 815     region_start_addr = prev;
 816     blk_start_addr    = prev;
 817     // The previous invocation may have pushed "prev" beyond the
 818     // last allocated block yet there may be still be blocks
 819     // in this region due to a particular coalescing policy.
 820     // Relax the assertion so that the case where the unallocated
 821     // block is maintained and "prev" is beyond the unallocated
 822     // block does not cause the assertion to fire.
 823     assert((BlockOffsetArrayUseUnallocatedBlock &&
 824             (!is_in(prev))) ||
 825            (blk_start_addr == block_start(region_start_addr)), "invariant");
 826   } else {
 827     region_start_addr = mr.start();
 828     blk_start_addr    = block_start(region_start_addr);
 829   }
 830   HeapWord* region_end_addr = mr.end();
 831   MemRegion derived_mr(region_start_addr, region_end_addr);
 832   while (blk_start_addr < region_end_addr) {
 833     const size_t size = block_size(blk_start_addr);
 834     if (block_is_obj(blk_start_addr)) {
 835       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 836     } else {
 837       last_was_obj_array = false;
 838     }
 839     blk_start_addr += size;
 840   }
 841   if (!last_was_obj_array) {
 842     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 843            "Should be within (closed) used space");
 844     assert(blk_start_addr > prev, "Invariant");
 845     cl->set_previous(blk_start_addr); // min address for next time
 846   }
 847 }
 848 
 849 // Callers of this iterator beware: The closure application should
 850 // be robust in the face of uninitialized objects and should (always)
 851 // return a correct size so that the next addr + size below gives us a
 852 // valid block boundary. [See for instance,
 853 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 854 // in ConcurrentMarkSweepGeneration.cpp.]
 855 HeapWord*
 856 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 857   ObjectClosureCareful* cl) {
 858   assert_lock_strong(freelistLock());
 859   // Can't use used_region() below because it may not necessarily
 860   // be the same as [bottom(),end()); although we could
 861   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 862   // that appears too cumbersome, so we just do the simpler check
 863   // in the assertion below.
 864   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 865          "mr should be non-empty and within used space");
 866   HeapWord *addr, *end;
 867   size_t size;
 868   for (addr = block_start_careful(mr.start()), end  = mr.end();
 869        addr < end; addr += size) {
 870     FreeChunk* fc = (FreeChunk*)addr;
 871     if (fc->is_free()) {
 872       // Since we hold the free list lock, which protects direct
 873       // allocation in this generation by mutators, a free object
 874       // will remain free throughout this iteration code.
 875       size = fc->size();
 876     } else {
 877       // Note that the object need not necessarily be initialized,
 878       // because (for instance) the free list lock does NOT protect
 879       // object initialization. The closure application below must
 880       // therefore be correct in the face of uninitialized objects.
 881       size = cl->do_object_careful_m(oop(addr), mr);
 882       if (size == 0) {
 883         // An unparsable object found. Signal early termination.
 884         return addr;
 885       }
 886     }
 887   }
 888   return NULL;
 889 }
 890 
 891 
 892 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 893   NOT_PRODUCT(verify_objects_initialized());
 894   return _bt.block_start(p);
 895 }
 896 
 897 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 898   return _bt.block_start_careful(p);
 899 }
 900 
 901 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 902   NOT_PRODUCT(verify_objects_initialized());
 903   // This must be volatile, or else there is a danger that the compiler
 904   // will compile the code below into a sometimes-infinite loop, by keeping
 905   // the value read the first time in a register.
 906   while (true) {
 907     // We must do this until we get a consistent view of the object.
 908     if (FreeChunk::indicatesFreeChunk(p)) {
 909       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 910       size_t res = fc->size();
 911 
 912       // Bugfix for systems with weak memory model (PPC64/IA64). The
 913       // block's free bit was set and we have read the size of the
 914       // block. Acquire and check the free bit again. If the block is
 915       // still free, the read size is correct.
 916       OrderAccess::acquire();
 917 
 918       // If the object is still a free chunk, return the size, else it
 919       // has been allocated so try again.
 920       if (FreeChunk::indicatesFreeChunk(p)) {
 921         assert(res != 0, "Block size should not be 0");
 922         return res;
 923       }
 924     } else {
 925       // Ensure klass read before size.
 926       Klass* k = oop(p)->klass_or_null_acquire();
 927       if (k != NULL) {
 928         assert(k->is_klass(), "Should really be klass oop.");
 929         oop o = (oop)p;
 930         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 931 
 932         size_t res = o->size_given_klass(k);
 933         res = adjustObjectSize(res);
 934         assert(res != 0, "Block size should not be 0");
 935         return res;
 936       }
 937     }
 938   }
 939 }
 940 
 941 // TODO: Now that is_parsable is gone, we should combine these two functions.
 942 // A variant of the above that uses the Printezis bits for
 943 // unparsable but allocated objects. This avoids any possible
 944 // stalls waiting for mutators to initialize objects, and is
 945 // thus potentially faster than the variant above. However,
 946 // this variant may return a zero size for a block that is
 947 // under mutation and for which a consistent size cannot be
 948 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 949 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 950                                                      const CMSCollector* c)
 951 const {
 952   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 953   // This must be volatile, or else there is a danger that the compiler
 954   // will compile the code below into a sometimes-infinite loop, by keeping
 955   // the value read the first time in a register.
 956   DEBUG_ONLY(uint loops = 0;)
 957   while (true) {
 958     // We must do this until we get a consistent view of the object.
 959     if (FreeChunk::indicatesFreeChunk(p)) {
 960       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 961       size_t res = fc->size();
 962 
 963       // Bugfix for systems with weak memory model (PPC64/IA64). The
 964       // free bit of the block was set and we have read the size of
 965       // the block. Acquire and check the free bit again. If the
 966       // block is still free, the read size is correct.
 967       OrderAccess::acquire();
 968 
 969       if (FreeChunk::indicatesFreeChunk(p)) {
 970         assert(res != 0, "Block size should not be 0");
 971         assert(loops == 0, "Should be 0");
 972         return res;
 973       }
 974     } else {
 975       // Ensure klass read before size.
 976       Klass* k = oop(p)->klass_or_null_acquire();
 977       if (k != NULL) {
 978         assert(k->is_klass(), "Should really be klass oop.");
 979         oop o = (oop)p;
 980         assert(o->is_oop(), "Should be an oop");
 981 
 982         size_t res = o->size_given_klass(k);
 983         res = adjustObjectSize(res);
 984         assert(res != 0, "Block size should not be 0");
 985         return res;
 986       } else {
 987         // May return 0 if P-bits not present.
 988         return c->block_size_if_printezis_bits(p);
 989       }
 990     }
 991     assert(loops == 0, "Can loop at most once");
 992     DEBUG_ONLY(loops++;)
 993   }
 994 }
 995 
 996 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 997   NOT_PRODUCT(verify_objects_initialized());
 998   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 999   FreeChunk* fc = (FreeChunk*)p;
1000   if (fc->is_free()) {
1001     return fc->size();
1002   } else {
1003     // Ignore mark word because this may be a recently promoted
1004     // object whose mark word is used to chain together grey
1005     // objects (the last one would have a null value).
1006     assert(oop(p)->is_oop(true), "Should be an oop");
1007     return adjustObjectSize(oop(p)->size());
1008   }
1009 }
1010 
1011 // This implementation assumes that the property of "being an object" is
1012 // stable.  But being a free chunk may not be (because of parallel
1013 // promotion.)
1014 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1015   FreeChunk* fc = (FreeChunk*)p;
1016   assert(is_in_reserved(p), "Should be in space");
1017   if (FreeChunk::indicatesFreeChunk(p)) return false;
1018   Klass* k = oop(p)->klass_or_null_acquire();
1019   if (k != NULL) {
1020     // Ignore mark word because it may have been used to
1021     // chain together promoted objects (the last one
1022     // would have a null value).
1023     assert(oop(p)->is_oop(true), "Should be an oop");
1024     return true;
1025   } else {
1026     return false;  // Was not an object at the start of collection.
1027   }
1028 }
1029 
1030 // Check if the object is alive. This fact is checked either by consulting
1031 // the main marking bitmap in the sweeping phase or, if it's a permanent
1032 // generation and we're not in the sweeping phase, by checking the
1033 // perm_gen_verify_bit_map where we store the "deadness" information if
1034 // we did not sweep the perm gen in the most recent previous GC cycle.
1035 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1036   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1037          "Else races are possible");
1038   assert(block_is_obj(p), "The address should point to an object");
1039 
1040   // If we're sweeping, we use object liveness information from the main bit map
1041   // for both perm gen and old gen.
1042   // We don't need to lock the bitmap (live_map or dead_map below), because
1043   // EITHER we are in the middle of the sweeping phase, and the
1044   // main marking bit map (live_map below) is locked,
1045   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1046   // is stable, because it's mutated only in the sweeping phase.
1047   // NOTE: This method is also used by jmap where, if class unloading is
1048   // off, the results can return "false" for legitimate perm objects,
1049   // when we are not in the midst of a sweeping phase, which can result
1050   // in jmap not reporting certain perm gen objects. This will be moot
1051   // if/when the perm gen goes away in the future.
1052   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1053     CMSBitMap* live_map = _collector->markBitMap();
1054     return live_map->par_isMarked((HeapWord*) p);
1055   }
1056   return true;
1057 }
1058 
1059 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1060   FreeChunk* fc = (FreeChunk*)p;
1061   assert(is_in_reserved(p), "Should be in space");
1062   assert(_bt.block_start(p) == p, "Should be a block boundary");
1063   if (!fc->is_free()) {
1064     // Ignore mark word because it may have been used to
1065     // chain together promoted objects (the last one
1066     // would have a null value).
1067     assert(oop(p)->is_oop(true), "Should be an oop");
1068     return true;
1069   }
1070   return false;
1071 }
1072 
1073 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1074 // approximate answer if you don't hold the freelistlock when you call this.
1075 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1076   size_t size = 0;
1077   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1078     debug_only(
1079       // We may be calling here without the lock in which case we
1080       // won't do this modest sanity check.
1081       if (freelistLock()->owned_by_self()) {
1082         size_t total_list_size = 0;
1083         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1084           fc = fc->next()) {
1085           total_list_size += i;
1086         }
1087         assert(total_list_size == i * _indexedFreeList[i].count(),
1088                "Count in list is incorrect");
1089       }
1090     )
1091     size += i * _indexedFreeList[i].count();
1092   }
1093   return size;
1094 }
1095 
1096 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1097   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1098   return allocate(size);
1099 }
1100 
1101 HeapWord*
1102 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1103   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1104 }
1105 
1106 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1107   assert_lock_strong(freelistLock());
1108   HeapWord* res = NULL;
1109   assert(size == adjustObjectSize(size),
1110          "use adjustObjectSize() before calling into allocate()");
1111 
1112   res = allocate_adaptive_freelists(size);
1113 
1114   if (res != NULL) {
1115     // check that res does lie in this space!
1116     assert(is_in_reserved(res), "Not in this space!");
1117     assert(is_aligned((void*)res), "alignment check");
1118 
1119     FreeChunk* fc = (FreeChunk*)res;
1120     fc->markNotFree();
1121     assert(!fc->is_free(), "shouldn't be marked free");
1122     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1123     // Verify that the block offset table shows this to
1124     // be a single block, but not one which is unallocated.
1125     _bt.verify_single_block(res, size);
1126     _bt.verify_not_unallocated(res, size);
1127     // mangle a just allocated object with a distinct pattern.
1128     debug_only(fc->mangleAllocated(size));
1129   }
1130 
1131   return res;
1132 }
1133 
1134 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1135   assert_lock_strong(freelistLock());
1136   HeapWord* res = NULL;
1137   assert(size == adjustObjectSize(size),
1138          "use adjustObjectSize() before calling into allocate()");
1139 
1140   // Strategy
1141   //   if small
1142   //     exact size from small object indexed list if small
1143   //     small or large linear allocation block (linAB) as appropriate
1144   //     take from lists of greater sized chunks
1145   //   else
1146   //     dictionary
1147   //     small or large linear allocation block if it has the space
1148   // Try allocating exact size from indexTable first
1149   if (size < IndexSetSize) {
1150     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1151     if(res != NULL) {
1152       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1153         "Not removed from free list");
1154       // no block offset table adjustment is necessary on blocks in
1155       // the indexed lists.
1156 
1157     // Try allocating from the small LinAB
1158     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1159         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1160         // if successful, the above also adjusts block offset table
1161         // Note that this call will refill the LinAB to
1162         // satisfy the request.  This is different that
1163         // evm.
1164         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1165     } else {
1166       // Raid the exact free lists larger than size, even if they are not
1167       // overpopulated.
1168       res = (HeapWord*) getChunkFromGreater(size);
1169     }
1170   } else {
1171     // Big objects get allocated directly from the dictionary.
1172     res = (HeapWord*) getChunkFromDictionaryExact(size);
1173     if (res == NULL) {
1174       // Try hard not to fail since an allocation failure will likely
1175       // trigger a synchronous GC.  Try to get the space from the
1176       // allocation blocks.
1177       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1178     }
1179   }
1180 
1181   return res;
1182 }
1183 
1184 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1185 // when promoting obj.
1186 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1187   // Depending on the object size, expansion may require refilling either a
1188   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1189   // is added because the dictionary may over-allocate to avoid fragmentation.
1190   size_t space = obj_size;
1191   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1192   return space;
1193 }
1194 
1195 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1196   FreeChunk* ret;
1197 
1198   assert(numWords >= MinChunkSize, "Size is less than minimum");
1199   assert(linearAllocationWouldFail() || bestFitFirst(),
1200     "Should not be here");
1201 
1202   size_t i;
1203   size_t currSize = numWords + MinChunkSize;
1204   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1205   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1206     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1207     if (fl->head()) {
1208       ret = getFromListGreater(fl, numWords);
1209       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1210       return ret;
1211     }
1212   }
1213 
1214   currSize = MAX2((size_t)SmallForDictionary,
1215                   (size_t)(numWords + MinChunkSize));
1216 
1217   /* Try to get a chunk that satisfies request, while avoiding
1218      fragmentation that can't be handled. */
1219   {
1220     ret =  dictionary()->get_chunk(currSize);
1221     if (ret != NULL) {
1222       assert(ret->size() - numWords >= MinChunkSize,
1223              "Chunk is too small");
1224       _bt.allocated((HeapWord*)ret, ret->size());
1225       /* Carve returned chunk. */
1226       (void) splitChunkAndReturnRemainder(ret, numWords);
1227       /* Label this as no longer a free chunk. */
1228       assert(ret->is_free(), "This chunk should be free");
1229       ret->link_prev(NULL);
1230     }
1231     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1232     return ret;
1233   }
1234   ShouldNotReachHere();
1235 }
1236 
1237 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1238   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1239   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1240 }
1241 
1242 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1243   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1244          (_smallLinearAllocBlock._word_size == fc->size()),
1245          "Linear allocation block shows incorrect size");
1246   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1247           (_smallLinearAllocBlock._word_size == fc->size()));
1248 }
1249 
1250 // Check if the purported free chunk is present either as a linear
1251 // allocation block, the size-indexed table of (smaller) free blocks,
1252 // or the larger free blocks kept in the binary tree dictionary.
1253 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1254   if (verify_chunk_is_linear_alloc_block(fc)) {
1255     return true;
1256   } else if (fc->size() < IndexSetSize) {
1257     return verifyChunkInIndexedFreeLists(fc);
1258   } else {
1259     return dictionary()->verify_chunk_in_free_list(fc);
1260   }
1261 }
1262 
1263 #ifndef PRODUCT
1264 void CompactibleFreeListSpace::assert_locked() const {
1265   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1266 }
1267 
1268 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1269   CMSLockVerifier::assert_locked(lock);
1270 }
1271 #endif
1272 
1273 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1274   // In the parallel case, the main thread holds the free list lock
1275   // on behalf the parallel threads.
1276   FreeChunk* fc;
1277   {
1278     // If GC is parallel, this might be called by several threads.
1279     // This should be rare enough that the locking overhead won't affect
1280     // the sequential code.
1281     MutexLockerEx x(parDictionaryAllocLock(),
1282                     Mutex::_no_safepoint_check_flag);
1283     fc = getChunkFromDictionary(size);
1284   }
1285   if (fc != NULL) {
1286     fc->dontCoalesce();
1287     assert(fc->is_free(), "Should be free, but not coalescable");
1288     // Verify that the block offset table shows this to
1289     // be a single block, but not one which is unallocated.
1290     _bt.verify_single_block((HeapWord*)fc, fc->size());
1291     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1292   }
1293   return fc;
1294 }
1295 
1296 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1297   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1298   assert_locked();
1299 
1300   // if we are tracking promotions, then first ensure space for
1301   // promotion (including spooling space for saving header if necessary).
1302   // then allocate and copy, then track promoted info if needed.
1303   // When tracking (see PromotionInfo::track()), the mark word may
1304   // be displaced and in this case restoration of the mark word
1305   // occurs in the (oop_since_save_marks_)iterate phase.
1306   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1307     return NULL;
1308   }
1309   // Call the allocate(size_t, bool) form directly to avoid the
1310   // additional call through the allocate(size_t) form.  Having
1311   // the compile inline the call is problematic because allocate(size_t)
1312   // is a virtual method.
1313   HeapWord* res = allocate(adjustObjectSize(obj_size));
1314   if (res != NULL) {
1315     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1316     // if we should be tracking promotions, do so.
1317     if (_promoInfo.tracking()) {
1318         _promoInfo.track((PromotedObject*)res);
1319     }
1320   }
1321   return oop(res);
1322 }
1323 
1324 HeapWord*
1325 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1326   assert_locked();
1327   assert(size >= MinChunkSize, "minimum chunk size");
1328   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1329     "maximum from smallLinearAllocBlock");
1330   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1331 }
1332 
1333 HeapWord*
1334 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1335                                                        size_t size) {
1336   assert_locked();
1337   assert(size >= MinChunkSize, "too small");
1338   HeapWord* res = NULL;
1339   // Try to do linear allocation from blk, making sure that
1340   if (blk->_word_size == 0) {
1341     // We have probably been unable to fill this either in the prologue or
1342     // when it was exhausted at the last linear allocation. Bail out until
1343     // next time.
1344     assert(blk->_ptr == NULL, "consistency check");
1345     return NULL;
1346   }
1347   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1348   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1349   if (res != NULL) return res;
1350 
1351   // about to exhaust this linear allocation block
1352   if (blk->_word_size == size) { // exactly satisfied
1353     res = blk->_ptr;
1354     _bt.allocated(res, blk->_word_size);
1355   } else if (size + MinChunkSize <= blk->_refillSize) {
1356     size_t sz = blk->_word_size;
1357     // Update _unallocated_block if the size is such that chunk would be
1358     // returned to the indexed free list.  All other chunks in the indexed
1359     // free lists are allocated from the dictionary so that _unallocated_block
1360     // has already been adjusted for them.  Do it here so that the cost
1361     // for all chunks added back to the indexed free lists.
1362     if (sz < SmallForDictionary) {
1363       _bt.allocated(blk->_ptr, sz);
1364     }
1365     // Return the chunk that isn't big enough, and then refill below.
1366     addChunkToFreeLists(blk->_ptr, sz);
1367     split_birth(sz);
1368     // Don't keep statistics on adding back chunk from a LinAB.
1369   } else {
1370     // A refilled block would not satisfy the request.
1371     return NULL;
1372   }
1373 
1374   blk->_ptr = NULL; blk->_word_size = 0;
1375   refillLinearAllocBlock(blk);
1376   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1377          "block was replenished");
1378   if (res != NULL) {
1379     split_birth(size);
1380     repairLinearAllocBlock(blk);
1381   } else if (blk->_ptr != NULL) {
1382     res = blk->_ptr;
1383     size_t blk_size = blk->_word_size;
1384     blk->_word_size -= size;
1385     blk->_ptr  += size;
1386     split_birth(size);
1387     repairLinearAllocBlock(blk);
1388     // Update BOT last so that other (parallel) GC threads see a consistent
1389     // view of the BOT and free blocks.
1390     // Above must occur before BOT is updated below.
1391     OrderAccess::storestore();
1392     _bt.split_block(res, blk_size, size);  // adjust block offset table
1393   }
1394   return res;
1395 }
1396 
1397 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1398                                         LinearAllocBlock* blk,
1399                                         size_t size) {
1400   assert_locked();
1401   assert(size >= MinChunkSize, "too small");
1402 
1403   HeapWord* res = NULL;
1404   // This is the common case.  Keep it simple.
1405   if (blk->_word_size >= size + MinChunkSize) {
1406     assert(blk->_ptr != NULL, "consistency check");
1407     res = blk->_ptr;
1408     // Note that the BOT is up-to-date for the linAB before allocation.  It
1409     // indicates the start of the linAB.  The split_block() updates the
1410     // BOT for the linAB after the allocation (indicates the start of the
1411     // next chunk to be allocated).
1412     size_t blk_size = blk->_word_size;
1413     blk->_word_size -= size;
1414     blk->_ptr  += size;
1415     split_birth(size);
1416     repairLinearAllocBlock(blk);
1417     // Update BOT last so that other (parallel) GC threads see a consistent
1418     // view of the BOT and free blocks.
1419     // Above must occur before BOT is updated below.
1420     OrderAccess::storestore();
1421     _bt.split_block(res, blk_size, size);  // adjust block offset table
1422     _bt.allocated(res, size);
1423   }
1424   return res;
1425 }
1426 
1427 FreeChunk*
1428 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1429   assert_locked();
1430   assert(size < SmallForDictionary, "just checking");
1431   FreeChunk* res;
1432   res = _indexedFreeList[size].get_chunk_at_head();
1433   if (res == NULL) {
1434     res = getChunkFromIndexedFreeListHelper(size);
1435   }
1436   _bt.verify_not_unallocated((HeapWord*) res, size);
1437   assert(res == NULL || res->size() == size, "Incorrect block size");
1438   return res;
1439 }
1440 
1441 FreeChunk*
1442 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1443   bool replenish) {
1444   assert_locked();
1445   FreeChunk* fc = NULL;
1446   if (size < SmallForDictionary) {
1447     assert(_indexedFreeList[size].head() == NULL ||
1448       _indexedFreeList[size].surplus() <= 0,
1449       "List for this size should be empty or under populated");
1450     // Try best fit in exact lists before replenishing the list
1451     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1452       // Replenish list.
1453       //
1454       // Things tried that failed.
1455       //   Tried allocating out of the two LinAB's first before
1456       // replenishing lists.
1457       //   Tried small linAB of size 256 (size in indexed list)
1458       // and replenishing indexed lists from the small linAB.
1459       //
1460       FreeChunk* newFc = NULL;
1461       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1462       if (replenish_size < SmallForDictionary) {
1463         // Do not replenish from an underpopulated size.
1464         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1465             _indexedFreeList[replenish_size].head() != NULL) {
1466           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1467         } else if (bestFitFirst()) {
1468           newFc = bestFitSmall(replenish_size);
1469         }
1470       }
1471       if (newFc == NULL && replenish_size > size) {
1472         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1473         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1474       }
1475       // Note: The stats update re split-death of block obtained above
1476       // will be recorded below precisely when we know we are going to
1477       // be actually splitting it into more than one pieces below.
1478       if (newFc != NULL) {
1479         if  (replenish || CMSReplenishIntermediate) {
1480           // Replenish this list and return one block to caller.
1481           size_t i;
1482           FreeChunk *curFc, *nextFc;
1483           size_t num_blk = newFc->size() / size;
1484           assert(num_blk >= 1, "Smaller than requested?");
1485           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1486           if (num_blk > 1) {
1487             // we are sure we will be splitting the block just obtained
1488             // into multiple pieces; record the split-death of the original
1489             splitDeath(replenish_size);
1490           }
1491           // carve up and link blocks 0, ..., num_blk - 2
1492           // The last chunk is not added to the lists but is returned as the
1493           // free chunk.
1494           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1495                i = 0;
1496                i < (num_blk - 1);
1497                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1498                i++) {
1499             curFc->set_size(size);
1500             // Don't record this as a return in order to try and
1501             // determine the "returns" from a GC.
1502             _bt.verify_not_unallocated((HeapWord*) fc, size);
1503             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1504             _bt.mark_block((HeapWord*)curFc, size);
1505             split_birth(size);
1506             // Don't record the initial population of the indexed list
1507             // as a split birth.
1508           }
1509 
1510           // check that the arithmetic was OK above
1511           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1512             "inconsistency in carving newFc");
1513           curFc->set_size(size);
1514           _bt.mark_block((HeapWord*)curFc, size);
1515           split_birth(size);
1516           fc = curFc;
1517         } else {
1518           // Return entire block to caller
1519           fc = newFc;
1520         }
1521       }
1522     }
1523   } else {
1524     // Get a free chunk from the free chunk dictionary to be returned to
1525     // replenish the indexed free list.
1526     fc = getChunkFromDictionaryExact(size);
1527   }
1528   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1529   return fc;
1530 }
1531 
1532 FreeChunk*
1533 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1534   assert_locked();
1535   FreeChunk* fc = _dictionary->get_chunk(size,
1536                                          FreeBlockDictionary<FreeChunk>::atLeast);
1537   if (fc == NULL) {
1538     return NULL;
1539   }
1540   _bt.allocated((HeapWord*)fc, fc->size());
1541   if (fc->size() >= size + MinChunkSize) {
1542     fc = splitChunkAndReturnRemainder(fc, size);
1543   }
1544   assert(fc->size() >= size, "chunk too small");
1545   assert(fc->size() < size + MinChunkSize, "chunk too big");
1546   _bt.verify_single_block((HeapWord*)fc, fc->size());
1547   return fc;
1548 }
1549 
1550 FreeChunk*
1551 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1552   assert_locked();
1553   FreeChunk* fc = _dictionary->get_chunk(size,
1554                                          FreeBlockDictionary<FreeChunk>::atLeast);
1555   if (fc == NULL) {
1556     return fc;
1557   }
1558   _bt.allocated((HeapWord*)fc, fc->size());
1559   if (fc->size() == size) {
1560     _bt.verify_single_block((HeapWord*)fc, size);
1561     return fc;
1562   }
1563   assert(fc->size() > size, "get_chunk() guarantee");
1564   if (fc->size() < size + MinChunkSize) {
1565     // Return the chunk to the dictionary and go get a bigger one.
1566     returnChunkToDictionary(fc);
1567     fc = _dictionary->get_chunk(size + MinChunkSize,
1568                                 FreeBlockDictionary<FreeChunk>::atLeast);
1569     if (fc == NULL) {
1570       return NULL;
1571     }
1572     _bt.allocated((HeapWord*)fc, fc->size());
1573   }
1574   assert(fc->size() >= size + MinChunkSize, "tautology");
1575   fc = splitChunkAndReturnRemainder(fc, size);
1576   assert(fc->size() == size, "chunk is wrong size");
1577   _bt.verify_single_block((HeapWord*)fc, size);
1578   return fc;
1579 }
1580 
1581 void
1582 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1583   assert_locked();
1584 
1585   size_t size = chunk->size();
1586   _bt.verify_single_block((HeapWord*)chunk, size);
1587   // adjust _unallocated_block downward, as necessary
1588   _bt.freed((HeapWord*)chunk, size);
1589   _dictionary->return_chunk(chunk);
1590 #ifndef PRODUCT
1591   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1592     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1593     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1594     tl->verify_stats();
1595   }
1596 #endif // PRODUCT
1597 }
1598 
1599 void
1600 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1601   assert_locked();
1602   size_t size = fc->size();
1603   _bt.verify_single_block((HeapWord*) fc, size);
1604   _bt.verify_not_unallocated((HeapWord*) fc, size);
1605   _indexedFreeList[size].return_chunk_at_tail(fc);
1606 #ifndef PRODUCT
1607   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1608      _indexedFreeList[size].verify_stats();
1609   }
1610 #endif // PRODUCT
1611 }
1612 
1613 // Add chunk to end of last block -- if it's the largest
1614 // block -- and update BOT and census data. We would
1615 // of course have preferred to coalesce it with the
1616 // last block, but it's currently less expensive to find the
1617 // largest block than it is to find the last.
1618 void
1619 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1620   HeapWord* chunk, size_t     size) {
1621   // check that the chunk does lie in this space!
1622   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1623   // One of the parallel gc task threads may be here
1624   // whilst others are allocating.
1625   Mutex* lock = &_parDictionaryAllocLock;
1626   FreeChunk* ec;
1627   {
1628     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1629     ec = dictionary()->find_largest_dict();  // get largest block
1630     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1631       // It's a coterminal block - we can coalesce.
1632       size_t old_size = ec->size();
1633       coalDeath(old_size);
1634       removeChunkFromDictionary(ec);
1635       size += old_size;
1636     } else {
1637       ec = (FreeChunk*)chunk;
1638     }
1639   }
1640   ec->set_size(size);
1641   debug_only(ec->mangleFreed(size));
1642   if (size < SmallForDictionary) {
1643     lock = _indexedFreeListParLocks[size];
1644   }
1645   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1646   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1647   // record the birth under the lock since the recording involves
1648   // manipulation of the list on which the chunk lives and
1649   // if the chunk is allocated and is the last on the list,
1650   // the list can go away.
1651   coalBirth(size);
1652 }
1653 
1654 void
1655 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1656                                               size_t     size) {
1657   // check that the chunk does lie in this space!
1658   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1659   assert_locked();
1660   _bt.verify_single_block(chunk, size);
1661 
1662   FreeChunk* fc = (FreeChunk*) chunk;
1663   fc->set_size(size);
1664   debug_only(fc->mangleFreed(size));
1665   if (size < SmallForDictionary) {
1666     returnChunkToFreeList(fc);
1667   } else {
1668     returnChunkToDictionary(fc);
1669   }
1670 }
1671 
1672 void
1673 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1674   size_t size, bool coalesced) {
1675   assert_locked();
1676   assert(chunk != NULL, "null chunk");
1677   if (coalesced) {
1678     // repair BOT
1679     _bt.single_block(chunk, size);
1680   }
1681   addChunkToFreeLists(chunk, size);
1682 }
1683 
1684 // We _must_ find the purported chunk on our free lists;
1685 // we assert if we don't.
1686 void
1687 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1688   size_t size = fc->size();
1689   assert_locked();
1690   debug_only(verifyFreeLists());
1691   if (size < SmallForDictionary) {
1692     removeChunkFromIndexedFreeList(fc);
1693   } else {
1694     removeChunkFromDictionary(fc);
1695   }
1696   _bt.verify_single_block((HeapWord*)fc, size);
1697   debug_only(verifyFreeLists());
1698 }
1699 
1700 void
1701 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1702   size_t size = fc->size();
1703   assert_locked();
1704   assert(fc != NULL, "null chunk");
1705   _bt.verify_single_block((HeapWord*)fc, size);
1706   _dictionary->remove_chunk(fc);
1707   // adjust _unallocated_block upward, as necessary
1708   _bt.allocated((HeapWord*)fc, size);
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1713   assert_locked();
1714   size_t size = fc->size();
1715   _bt.verify_single_block((HeapWord*)fc, size);
1716   NOT_PRODUCT(
1717     if (FLSVerifyIndexTable) {
1718       verifyIndexedFreeList(size);
1719     }
1720   )
1721   _indexedFreeList[size].remove_chunk(fc);
1722   NOT_PRODUCT(
1723     if (FLSVerifyIndexTable) {
1724       verifyIndexedFreeList(size);
1725     }
1726   )
1727 }
1728 
1729 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1730   /* A hint is the next larger size that has a surplus.
1731      Start search at a size large enough to guarantee that
1732      the excess is >= MIN_CHUNK. */
1733   size_t start = align_object_size(numWords + MinChunkSize);
1734   if (start < IndexSetSize) {
1735     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1736     size_t    hint = _indexedFreeList[start].hint();
1737     while (hint < IndexSetSize) {
1738       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1739       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1740       if (fl->surplus() > 0 && fl->head() != NULL) {
1741         // Found a list with surplus, reset original hint
1742         // and split out a free chunk which is returned.
1743         _indexedFreeList[start].set_hint(hint);
1744         FreeChunk* res = getFromListGreater(fl, numWords);
1745         assert(res == NULL || res->is_free(),
1746           "Should be returning a free chunk");
1747         return res;
1748       }
1749       hint = fl->hint(); /* keep looking */
1750     }
1751     /* None found. */
1752     it[start].set_hint(IndexSetSize);
1753   }
1754   return NULL;
1755 }
1756 
1757 /* Requires fl->size >= numWords + MinChunkSize */
1758 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1759   size_t numWords) {
1760   FreeChunk *curr = fl->head();
1761   size_t oldNumWords = curr->size();
1762   assert(numWords >= MinChunkSize, "Word size is too small");
1763   assert(curr != NULL, "List is empty");
1764   assert(oldNumWords >= numWords + MinChunkSize,
1765         "Size of chunks in the list is too small");
1766 
1767   fl->remove_chunk(curr);
1768   // recorded indirectly by splitChunkAndReturnRemainder -
1769   // smallSplit(oldNumWords, numWords);
1770   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1771   // Does anything have to be done for the remainder in terms of
1772   // fixing the card table?
1773   assert(new_chunk == NULL || new_chunk->is_free(),
1774     "Should be returning a free chunk");
1775   return new_chunk;
1776 }
1777 
1778 FreeChunk*
1779 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1780   size_t new_size) {
1781   assert_locked();
1782   size_t size = chunk->size();
1783   assert(size > new_size, "Split from a smaller block?");
1784   assert(is_aligned(chunk), "alignment problem");
1785   assert(size == adjustObjectSize(size), "alignment problem");
1786   size_t rem_sz = size - new_size;
1787   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1788   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1789   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1790   assert(is_aligned(ffc), "alignment problem");
1791   ffc->set_size(rem_sz);
1792   ffc->link_next(NULL);
1793   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1794   // Above must occur before BOT is updated below.
1795   // adjust block offset table
1796   OrderAccess::storestore();
1797   assert(chunk->is_free() && ffc->is_free(), "Error");
1798   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1799   if (rem_sz < SmallForDictionary) {
1800     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1801     bool is_par = Thread::current()->is_GC_task_thread();
1802     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1803     returnChunkToFreeList(ffc);
1804     split(size, rem_sz);
1805     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1806   } else {
1807     returnChunkToDictionary(ffc);
1808     split(size, rem_sz);
1809   }
1810   chunk->set_size(new_size);
1811   return chunk;
1812 }
1813 
1814 void
1815 CompactibleFreeListSpace::sweep_completed() {
1816   // Now that space is probably plentiful, refill linear
1817   // allocation blocks as needed.
1818   refillLinearAllocBlocksIfNeeded();
1819 }
1820 
1821 void
1822 CompactibleFreeListSpace::gc_prologue() {
1823   assert_locked();
1824   reportFreeListStatistics("Before GC:");
1825   refillLinearAllocBlocksIfNeeded();
1826 }
1827 
1828 void
1829 CompactibleFreeListSpace::gc_epilogue() {
1830   assert_locked();
1831   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1832   _promoInfo.stopTrackingPromotions();
1833   repairLinearAllocationBlocks();
1834   reportFreeListStatistics("After GC:");
1835 }
1836 
1837 // Iteration support, mostly delegated from a CMS generation
1838 
1839 void CompactibleFreeListSpace::save_marks() {
1840   assert(Thread::current()->is_VM_thread(),
1841          "Global variable should only be set when single-threaded");
1842   // Mark the "end" of the used space at the time of this call;
1843   // note, however, that promoted objects from this point
1844   // on are tracked in the _promoInfo below.
1845   set_saved_mark_word(unallocated_block());
1846 #ifdef ASSERT
1847   // Check the sanity of save_marks() etc.
1848   MemRegion ur    = used_region();
1849   MemRegion urasm = used_region_at_save_marks();
1850   assert(ur.contains(urasm),
1851          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1852          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1853          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1854 #endif
1855   // inform allocator that promotions should be tracked.
1856   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1857   _promoInfo.startTrackingPromotions();
1858 }
1859 
1860 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1861   assert(_promoInfo.tracking(), "No preceding save_marks?");
1862   return _promoInfo.noPromotions();
1863 }
1864 
1865 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1866                                                                             \
1867 void CompactibleFreeListSpace::                                             \
1868 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1869   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1870   /*                                                                        \
1871    * This also restores any displaced headers and removes the elements from \
1872    * the iteration set as they are processed, so that we have a clean slate \
1873    * at the end of the iteration. Note, thus, that if new objects are       \
1874    * promoted as a result of the iteration they are iterated over as well.  \
1875    */                                                                       \
1876   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1877 }
1878 
1879 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1880 
1881 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1882   return _smallLinearAllocBlock._word_size == 0;
1883 }
1884 
1885 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1886   // Fix up linear allocation blocks to look like free blocks
1887   repairLinearAllocBlock(&_smallLinearAllocBlock);
1888 }
1889 
1890 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1891   assert_locked();
1892   if (blk->_ptr != NULL) {
1893     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1894            "Minimum block size requirement");
1895     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1896     fc->set_size(blk->_word_size);
1897     fc->link_prev(NULL);   // mark as free
1898     fc->dontCoalesce();
1899     assert(fc->is_free(), "just marked it free");
1900     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1901   }
1902 }
1903 
1904 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1905   assert_locked();
1906   if (_smallLinearAllocBlock._ptr == NULL) {
1907     assert(_smallLinearAllocBlock._word_size == 0,
1908       "Size of linAB should be zero if the ptr is NULL");
1909     // Reset the linAB refill and allocation size limit.
1910     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1911   }
1912   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1913 }
1914 
1915 void
1916 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1917   assert_locked();
1918   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1919          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1920          "blk invariant");
1921   if (blk->_ptr == NULL) {
1922     refillLinearAllocBlock(blk);
1923   }
1924 }
1925 
1926 void
1927 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1928   assert_locked();
1929   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1930          "linear allocation block should be empty");
1931   FreeChunk* fc;
1932   if (blk->_refillSize < SmallForDictionary &&
1933       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1934     // A linAB's strategy might be to use small sizes to reduce
1935     // fragmentation but still get the benefits of allocation from a
1936     // linAB.
1937   } else {
1938     fc = getChunkFromDictionary(blk->_refillSize);
1939   }
1940   if (fc != NULL) {
1941     blk->_ptr  = (HeapWord*)fc;
1942     blk->_word_size = fc->size();
1943     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1944   }
1945 }
1946 
1947 // Support for compaction
1948 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1949   scan_and_forward(this, cp);
1950   // Prepare_for_compaction() uses the space between live objects
1951   // so that later phase can skip dead space quickly.  So verification
1952   // of the free lists doesn't work after.
1953 }
1954 
1955 void CompactibleFreeListSpace::adjust_pointers() {
1956   // In other versions of adjust_pointers(), a bail out
1957   // based on the amount of live data in the generation
1958   // (i.e., if 0, bail out) may be used.
1959   // Cannot test used() == 0 here because the free lists have already
1960   // been mangled by the compaction.
1961 
1962   scan_and_adjust_pointers(this);
1963   // See note about verification in prepare_for_compaction().
1964 }
1965 
1966 void CompactibleFreeListSpace::compact() {
1967   scan_and_compact(this);
1968 }
1969 
1970 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1971 // where fbs is free block sizes
1972 double CompactibleFreeListSpace::flsFrag() const {
1973   size_t itabFree = totalSizeInIndexedFreeLists();
1974   double frag = 0.0;
1975   size_t i;
1976 
1977   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1978     double sz  = i;
1979     frag      += _indexedFreeList[i].count() * (sz * sz);
1980   }
1981 
1982   double totFree = itabFree +
1983                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1984   if (totFree > 0) {
1985     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1986             (totFree * totFree));
1987     frag = (double)1.0  - frag;
1988   } else {
1989     assert(frag == 0.0, "Follows from totFree == 0");
1990   }
1991   return frag;
1992 }
1993 
1994 void CompactibleFreeListSpace::beginSweepFLCensus(
1995   float inter_sweep_current,
1996   float inter_sweep_estimate,
1997   float intra_sweep_estimate) {
1998   assert_locked();
1999   size_t i;
2000   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2001     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2002     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2003     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2004     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2005     fl->set_before_sweep(fl->count());
2006     fl->set_bfr_surp(fl->surplus());
2007   }
2008   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2009                                     inter_sweep_current,
2010                                     inter_sweep_estimate,
2011                                     intra_sweep_estimate);
2012 }
2013 
2014 void CompactibleFreeListSpace::setFLSurplus() {
2015   assert_locked();
2016   size_t i;
2017   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2018     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2019     fl->set_surplus(fl->count() -
2020                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2021   }
2022 }
2023 
2024 void CompactibleFreeListSpace::setFLHints() {
2025   assert_locked();
2026   size_t i;
2027   size_t h = IndexSetSize;
2028   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2029     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2030     fl->set_hint(h);
2031     if (fl->surplus() > 0) {
2032       h = i;
2033     }
2034   }
2035 }
2036 
2037 void CompactibleFreeListSpace::clearFLCensus() {
2038   assert_locked();
2039   size_t i;
2040   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2041     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2042     fl->set_prev_sweep(fl->count());
2043     fl->set_coal_births(0);
2044     fl->set_coal_deaths(0);
2045     fl->set_split_births(0);
2046     fl->set_split_deaths(0);
2047   }
2048 }
2049 
2050 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2051   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2052   setFLSurplus();
2053   setFLHints();
2054   printFLCensus(sweep_count);
2055   clearFLCensus();
2056   assert_locked();
2057   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2058 }
2059 
2060 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2061   if (size < SmallForDictionary) {
2062     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2063     return (fl->coal_desired() < 0) ||
2064            ((int)fl->count() > fl->coal_desired());
2065   } else {
2066     return dictionary()->coal_dict_over_populated(size);
2067   }
2068 }
2069 
2070 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2071   assert(size < SmallForDictionary, "Size too large for indexed list");
2072   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2073   fl->increment_coal_births();
2074   fl->increment_surplus();
2075 }
2076 
2077 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2078   assert(size < SmallForDictionary, "Size too large for indexed list");
2079   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2080   fl->increment_coal_deaths();
2081   fl->decrement_surplus();
2082 }
2083 
2084 void CompactibleFreeListSpace::coalBirth(size_t size) {
2085   if (size  < SmallForDictionary) {
2086     smallCoalBirth(size);
2087   } else {
2088     dictionary()->dict_census_update(size,
2089                                    false /* split */,
2090                                    true /* birth */);
2091   }
2092 }
2093 
2094 void CompactibleFreeListSpace::coalDeath(size_t size) {
2095   if(size  < SmallForDictionary) {
2096     smallCoalDeath(size);
2097   } else {
2098     dictionary()->dict_census_update(size,
2099                                    false /* split */,
2100                                    false /* birth */);
2101   }
2102 }
2103 
2104 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2105   assert(size < SmallForDictionary, "Size too large for indexed list");
2106   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2107   fl->increment_split_births();
2108   fl->increment_surplus();
2109 }
2110 
2111 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2112   assert(size < SmallForDictionary, "Size too large for indexed list");
2113   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2114   fl->increment_split_deaths();
2115   fl->decrement_surplus();
2116 }
2117 
2118 void CompactibleFreeListSpace::split_birth(size_t size) {
2119   if (size  < SmallForDictionary) {
2120     smallSplitBirth(size);
2121   } else {
2122     dictionary()->dict_census_update(size,
2123                                    true /* split */,
2124                                    true /* birth */);
2125   }
2126 }
2127 
2128 void CompactibleFreeListSpace::splitDeath(size_t size) {
2129   if (size  < SmallForDictionary) {
2130     smallSplitDeath(size);
2131   } else {
2132     dictionary()->dict_census_update(size,
2133                                    true /* split */,
2134                                    false /* birth */);
2135   }
2136 }
2137 
2138 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2139   size_t to2 = from - to1;
2140   splitDeath(from);
2141   split_birth(to1);
2142   split_birth(to2);
2143 }
2144 
2145 void CompactibleFreeListSpace::print() const {
2146   print_on(tty);
2147 }
2148 
2149 void CompactibleFreeListSpace::prepare_for_verify() {
2150   assert_locked();
2151   repairLinearAllocationBlocks();
2152   // Verify that the SpoolBlocks look like free blocks of
2153   // appropriate sizes... To be done ...
2154 }
2155 
2156 class VerifyAllBlksClosure: public BlkClosure {
2157  private:
2158   const CompactibleFreeListSpace* _sp;
2159   const MemRegion                 _span;
2160   HeapWord*                       _last_addr;
2161   size_t                          _last_size;
2162   bool                            _last_was_obj;
2163   bool                            _last_was_live;
2164 
2165  public:
2166   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2167     MemRegion span) :  _sp(sp), _span(span),
2168                        _last_addr(NULL), _last_size(0),
2169                        _last_was_obj(false), _last_was_live(false) { }
2170 
2171   virtual size_t do_blk(HeapWord* addr) {
2172     size_t res;
2173     bool   was_obj  = false;
2174     bool   was_live = false;
2175     if (_sp->block_is_obj(addr)) {
2176       was_obj = true;
2177       oop p = oop(addr);
2178       guarantee(p->is_oop(), "Should be an oop");
2179       res = _sp->adjustObjectSize(p->size());
2180       if (_sp->obj_is_alive(addr)) {
2181         was_live = true;
2182         p->verify();
2183       }
2184     } else {
2185       FreeChunk* fc = (FreeChunk*)addr;
2186       res = fc->size();
2187       if (FLSVerifyLists && !fc->cantCoalesce()) {
2188         guarantee(_sp->verify_chunk_in_free_list(fc),
2189                   "Chunk should be on a free list");
2190       }
2191     }
2192     if (res == 0) {
2193       Log(gc, verify) log;
2194       log.error("Livelock: no rank reduction!");
2195       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2196                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2197         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2198         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2199       ResourceMark rm;
2200       _sp->print_on(log.error_stream());
2201       guarantee(false, "Verification failed.");
2202     }
2203     _last_addr = addr;
2204     _last_size = res;
2205     _last_was_obj  = was_obj;
2206     _last_was_live = was_live;
2207     return res;
2208   }
2209 };
2210 
2211 class VerifyAllOopsClosure: public OopClosure {
2212  private:
2213   const CMSCollector*             _collector;
2214   const CompactibleFreeListSpace* _sp;
2215   const MemRegion                 _span;
2216   const bool                      _past_remark;
2217   const CMSBitMap*                _bit_map;
2218 
2219  protected:
2220   void do_oop(void* p, oop obj) {
2221     if (_span.contains(obj)) { // the interior oop points into CMS heap
2222       if (!_span.contains(p)) { // reference from outside CMS heap
2223         // Should be a valid object; the first disjunct below allows
2224         // us to sidestep an assertion in block_is_obj() that insists
2225         // that p be in _sp. Note that several generations (and spaces)
2226         // are spanned by _span (CMS heap) above.
2227         guarantee(!_sp->is_in_reserved(obj) ||
2228                   _sp->block_is_obj((HeapWord*)obj),
2229                   "Should be an object");
2230         guarantee(obj->is_oop(), "Should be an oop");
2231         obj->verify();
2232         if (_past_remark) {
2233           // Remark has been completed, the object should be marked
2234           _bit_map->isMarked((HeapWord*)obj);
2235         }
2236       } else { // reference within CMS heap
2237         if (_past_remark) {
2238           // Remark has been completed -- so the referent should have
2239           // been marked, if referring object is.
2240           if (_bit_map->isMarked(_collector->block_start(p))) {
2241             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2242           }
2243         }
2244       }
2245     } else if (_sp->is_in_reserved(p)) {
2246       // the reference is from FLS, and points out of FLS
2247       guarantee(obj->is_oop(), "Should be an oop");
2248       obj->verify();
2249     }
2250   }
2251 
2252   template <class T> void do_oop_work(T* p) {
2253     T heap_oop = oopDesc::load_heap_oop(p);
2254     if (!oopDesc::is_null(heap_oop)) {
2255       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2256       do_oop(p, obj);
2257     }
2258   }
2259 
2260  public:
2261   VerifyAllOopsClosure(const CMSCollector* collector,
2262     const CompactibleFreeListSpace* sp, MemRegion span,
2263     bool past_remark, CMSBitMap* bit_map) :
2264     _collector(collector), _sp(sp), _span(span),
2265     _past_remark(past_remark), _bit_map(bit_map) { }
2266 
2267   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2268   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2269 };
2270 
2271 void CompactibleFreeListSpace::verify() const {
2272   assert_lock_strong(&_freelistLock);
2273   verify_objects_initialized();
2274   MemRegion span = _collector->_span;
2275   bool past_remark = (_collector->abstract_state() ==
2276                       CMSCollector::Sweeping);
2277 
2278   ResourceMark rm;
2279   HandleMark  hm;
2280 
2281   // Check integrity of CFL data structures
2282   _promoInfo.verify();
2283   _dictionary->verify();
2284   if (FLSVerifyIndexTable) {
2285     verifyIndexedFreeLists();
2286   }
2287   // Check integrity of all objects and free blocks in space
2288   {
2289     VerifyAllBlksClosure cl(this, span);
2290     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2291   }
2292   // Check that all references in the heap to FLS
2293   // are to valid objects in FLS or that references in
2294   // FLS are to valid objects elsewhere in the heap
2295   if (FLSVerifyAllHeapReferences)
2296   {
2297     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2298       _collector->markBitMap());
2299 
2300     // Iterate over all oops in the heap. Uses the _no_header version
2301     // since we are not interested in following the klass pointers.
2302     GenCollectedHeap::heap()->oop_iterate_no_header(&cl);
2303   }
2304 
2305   if (VerifyObjectStartArray) {
2306     // Verify the block offset table
2307     _bt.verify();
2308   }
2309 }
2310 
2311 #ifndef PRODUCT
2312 void CompactibleFreeListSpace::verifyFreeLists() const {
2313   if (FLSVerifyLists) {
2314     _dictionary->verify();
2315     verifyIndexedFreeLists();
2316   } else {
2317     if (FLSVerifyDictionary) {
2318       _dictionary->verify();
2319     }
2320     if (FLSVerifyIndexTable) {
2321       verifyIndexedFreeLists();
2322     }
2323   }
2324 }
2325 #endif
2326 
2327 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2328   size_t i = 0;
2329   for (; i < IndexSetStart; i++) {
2330     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2331   }
2332   for (; i < IndexSetSize; i++) {
2333     verifyIndexedFreeList(i);
2334   }
2335 }
2336 
2337 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2338   FreeChunk* fc   =  _indexedFreeList[size].head();
2339   FreeChunk* tail =  _indexedFreeList[size].tail();
2340   size_t    num = _indexedFreeList[size].count();
2341   size_t      n = 0;
2342   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2343             "Slot should have been empty");
2344   for (; fc != NULL; fc = fc->next(), n++) {
2345     guarantee(fc->size() == size, "Size inconsistency");
2346     guarantee(fc->is_free(), "!free?");
2347     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2348     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2349   }
2350   guarantee(n == num, "Incorrect count");
2351 }
2352 
2353 #ifndef PRODUCT
2354 void CompactibleFreeListSpace::check_free_list_consistency() const {
2355   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2356     "Some sizes can't be allocated without recourse to"
2357     " linear allocation buffers");
2358   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2359     "else MIN_TREE_CHUNK_SIZE is wrong");
2360   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2361   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2362 }
2363 #endif
2364 
2365 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2366   assert_lock_strong(&_freelistLock);
2367   LogTarget(Debug, gc, freelist, census) log;
2368   if (!log.is_enabled()) {
2369     return;
2370   }
2371   AdaptiveFreeList<FreeChunk> total;
2372   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2373   ResourceMark rm;
2374   outputStream* out = log.stream();
2375   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2376   size_t total_free = 0;
2377   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2378     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2379     total_free += fl->count() * fl->size();
2380     if (i % (40*IndexSetStride) == 0) {
2381       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2382     }
2383     fl->print_on(out);
2384     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2385     total.set_surplus(    total.surplus()     + fl->surplus()    );
2386     total.set_desired(    total.desired()     + fl->desired()    );
2387     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2388     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2389     total.set_count(      total.count()       + fl->count()      );
2390     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2391     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2392     total.set_split_births(total.split_births() + fl->split_births());
2393     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2394   }
2395   total.print_on(out, "TOTAL");
2396   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2397   log.print("growth: %8.5f  deficit: %8.5f",
2398             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2399                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2400             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2401   _dictionary->print_dict_census(out);
2402 }
2403 
2404 ///////////////////////////////////////////////////////////////////////////
2405 // CompactibleFreeListSpaceLAB
2406 ///////////////////////////////////////////////////////////////////////////
2407 
2408 #define VECTOR_257(x)                                                                                  \
2409   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2410   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2411      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2412      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2413      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2417      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2418      x }
2419 
2420 // Initialize with default setting for CMS, _not_
2421 // generic OldPLABSize, whose static default is different; if overridden at the
2422 // command-line, this will get reinitialized via a call to
2423 // modify_initialization() below.
2424 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2425   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2426 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2427 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2428 
2429 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2430   _cfls(cfls)
2431 {
2432   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2433   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2434        i < CompactibleFreeListSpace::IndexSetSize;
2435        i += CompactibleFreeListSpace::IndexSetStride) {
2436     _indexedFreeList[i].set_size(i);
2437     _num_blocks[i] = 0;
2438   }
2439 }
2440 
2441 static bool _CFLS_LAB_modified = false;
2442 
2443 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2444   assert(!_CFLS_LAB_modified, "Call only once");
2445   _CFLS_LAB_modified = true;
2446   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2447        i < CompactibleFreeListSpace::IndexSetSize;
2448        i += CompactibleFreeListSpace::IndexSetStride) {
2449     _blocks_to_claim[i].modify(n, wt, true /* force */);
2450   }
2451 }
2452 
2453 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2454   FreeChunk* res;
2455   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2456   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2457     // This locking manages sync with other large object allocations.
2458     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2459                     Mutex::_no_safepoint_check_flag);
2460     res = _cfls->getChunkFromDictionaryExact(word_sz);
2461     if (res == NULL) return NULL;
2462   } else {
2463     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2464     if (fl->count() == 0) {
2465       // Attempt to refill this local free list.
2466       get_from_global_pool(word_sz, fl);
2467       // If it didn't work, give up.
2468       if (fl->count() == 0) return NULL;
2469     }
2470     res = fl->get_chunk_at_head();
2471     assert(res != NULL, "Why was count non-zero?");
2472   }
2473   res->markNotFree();
2474   assert(!res->is_free(), "shouldn't be marked free");
2475   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2476   // mangle a just allocated object with a distinct pattern.
2477   debug_only(res->mangleAllocated(word_sz));
2478   return (HeapWord*)res;
2479 }
2480 
2481 // Get a chunk of blocks of the right size and update related
2482 // book-keeping stats
2483 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2484   // Get the #blocks we want to claim
2485   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2486   assert(n_blks > 0, "Error");
2487   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2488   // In some cases, when the application has a phase change,
2489   // there may be a sudden and sharp shift in the object survival
2490   // profile, and updating the counts at the end of a scavenge
2491   // may not be quick enough, giving rise to large scavenge pauses
2492   // during these phase changes. It is beneficial to detect such
2493   // changes on-the-fly during a scavenge and avoid such a phase-change
2494   // pothole. The following code is a heuristic attempt to do that.
2495   // It is protected by a product flag until we have gained
2496   // enough experience with this heuristic and fine-tuned its behavior.
2497   // WARNING: This might increase fragmentation if we overreact to
2498   // small spikes, so some kind of historical smoothing based on
2499   // previous experience with the greater reactivity might be useful.
2500   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2501   // default.
2502   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2503     //
2504     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2505     // which is why there is a cast to double.
2506     //
2507     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2508     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2509     n_blks = MIN2(n_blks, CMSOldPLABMax);
2510   }
2511   assert(n_blks > 0, "Error");
2512   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2513   // Update stats table entry for this block size
2514   _num_blocks[word_sz] += fl->count();
2515 }
2516 
2517 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2518   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2519        i < CompactibleFreeListSpace::IndexSetSize;
2520        i += CompactibleFreeListSpace::IndexSetStride) {
2521     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2522            "Counter inconsistency");
2523     if (_global_num_workers[i] > 0) {
2524       // Need to smooth wrt historical average
2525       if (ResizeOldPLAB) {
2526         _blocks_to_claim[i].sample(
2527           MAX2(CMSOldPLABMin,
2528           MIN2(CMSOldPLABMax,
2529                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2530       }
2531       // Reset counters for next round
2532       _global_num_workers[i] = 0;
2533       _global_num_blocks[i] = 0;
2534       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2535     }
2536   }
2537 }
2538 
2539 // If this is changed in the future to allow parallel
2540 // access, one would need to take the FL locks and,
2541 // depending on how it is used, stagger access from
2542 // parallel threads to reduce contention.
2543 void CompactibleFreeListSpaceLAB::retire(int tid) {
2544   // We run this single threaded with the world stopped;
2545   // so no need for locks and such.
2546   NOT_PRODUCT(Thread* t = Thread::current();)
2547   assert(Thread::current()->is_VM_thread(), "Error");
2548   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2549        i < CompactibleFreeListSpace::IndexSetSize;
2550        i += CompactibleFreeListSpace::IndexSetStride) {
2551     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2552            "Can't retire more than what we obtained");
2553     if (_num_blocks[i] > 0) {
2554       size_t num_retire =  _indexedFreeList[i].count();
2555       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2556       {
2557         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2558         //                Mutex::_no_safepoint_check_flag);
2559 
2560         // Update globals stats for num_blocks used
2561         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2562         _global_num_workers[i]++;
2563         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2564         if (num_retire > 0) {
2565           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2566           // Reset this list.
2567           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2568           _indexedFreeList[i].set_size(i);
2569         }
2570       }
2571       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2572                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2573       // Reset stats for next round
2574       _num_blocks[i]         = 0;
2575     }
2576   }
2577 }
2578 
2579 // Used by par_get_chunk_of_blocks() for the chunks from the
2580 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2581 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2582 // to the free list "fl".  "n" is the maximum number of chunks to
2583 // be added to "fl".
2584 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2585 
2586   // We'll try all multiples of word_sz in the indexed set, starting with
2587   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2588   // then try getting a big chunk and splitting it.
2589   {
2590     bool found;
2591     int  k;
2592     size_t cur_sz;
2593     for (k = 1, cur_sz = k * word_sz, found = false;
2594          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2595          (CMSSplitIndexedFreeListBlocks || k <= 1);
2596          k++, cur_sz = k * word_sz) {
2597       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2598       fl_for_cur_sz.set_size(cur_sz);
2599       {
2600         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2601                         Mutex::_no_safepoint_check_flag);
2602         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2603         if (gfl->count() != 0) {
2604           // nn is the number of chunks of size cur_sz that
2605           // we'd need to split k-ways each, in order to create
2606           // "n" chunks of size word_sz each.
2607           const size_t nn = MAX2(n/k, (size_t)1);
2608           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2609           found = true;
2610           if (k > 1) {
2611             // Update split death stats for the cur_sz-size blocks list:
2612             // we increment the split death count by the number of blocks
2613             // we just took from the cur_sz-size blocks list and which
2614             // we will be splitting below.
2615             ssize_t deaths = gfl->split_deaths() +
2616                              fl_for_cur_sz.count();
2617             gfl->set_split_deaths(deaths);
2618           }
2619         }
2620       }
2621       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2622       if (found) {
2623         if (k == 1) {
2624           fl->prepend(&fl_for_cur_sz);
2625         } else {
2626           // Divide each block on fl_for_cur_sz up k ways.
2627           FreeChunk* fc;
2628           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2629             // Must do this in reverse order, so that anybody attempting to
2630             // access the main chunk sees it as a single free block until we
2631             // change it.
2632             size_t fc_size = fc->size();
2633             assert(fc->is_free(), "Error");
2634             for (int i = k-1; i >= 0; i--) {
2635               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2636               assert((i != 0) ||
2637                         ((fc == ffc) && ffc->is_free() &&
2638                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2639                         "Counting error");
2640               ffc->set_size(word_sz);
2641               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2642               ffc->link_next(NULL);
2643               // Above must occur before BOT is updated below.
2644               OrderAccess::storestore();
2645               // splitting from the right, fc_size == i * word_sz
2646               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2647               fc_size -= word_sz;
2648               assert(fc_size == i*word_sz, "Error");
2649               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2650               _bt.verify_single_block((HeapWord*)fc, fc_size);
2651               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2652               // Push this on "fl".
2653               fl->return_chunk_at_head(ffc);
2654             }
2655             // TRAP
2656             assert(fl->tail()->next() == NULL, "List invariant.");
2657           }
2658         }
2659         // Update birth stats for this block size.
2660         size_t num = fl->count();
2661         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2662                         Mutex::_no_safepoint_check_flag);
2663         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2664         _indexedFreeList[word_sz].set_split_births(births);
2665         return true;
2666       }
2667     }
2668     return found;
2669   }
2670 }
2671 
2672 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2673 
2674   FreeChunk* fc = NULL;
2675   FreeChunk* rem_fc = NULL;
2676   size_t rem;
2677   {
2678     MutexLockerEx x(parDictionaryAllocLock(),
2679                     Mutex::_no_safepoint_check_flag);
2680     while (n > 0) {
2681       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()),
2682                                   FreeBlockDictionary<FreeChunk>::atLeast);
2683       if (fc != NULL) {
2684         break;
2685       } else {
2686         n--;
2687       }
2688     }
2689     if (fc == NULL) return NULL;
2690     // Otherwise, split up that block.
2691     assert((ssize_t)n >= 1, "Control point invariant");
2692     assert(fc->is_free(), "Error: should be a free block");
2693     _bt.verify_single_block((HeapWord*)fc, fc->size());
2694     const size_t nn = fc->size() / word_sz;
2695     n = MIN2(nn, n);
2696     assert((ssize_t)n >= 1, "Control point invariant");
2697     rem = fc->size() - n * word_sz;
2698     // If there is a remainder, and it's too small, allocate one fewer.
2699     if (rem > 0 && rem < MinChunkSize) {
2700       n--; rem += word_sz;
2701     }
2702     // Note that at this point we may have n == 0.
2703     assert((ssize_t)n >= 0, "Control point invariant");
2704 
2705     // If n is 0, the chunk fc that was found is not large
2706     // enough to leave a viable remainder.  We are unable to
2707     // allocate even one block.  Return fc to the
2708     // dictionary and return, leaving "fl" empty.
2709     if (n == 0) {
2710       returnChunkToDictionary(fc);
2711       return NULL;
2712     }
2713 
2714     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2715     dictionary()->dict_census_update(fc->size(),
2716                                      true /*split*/,
2717                                      false /*birth*/);
2718 
2719     // First return the remainder, if any.
2720     // Note that we hold the lock until we decide if we're going to give
2721     // back the remainder to the dictionary, since a concurrent allocation
2722     // may otherwise see the heap as empty.  (We're willing to take that
2723     // hit if the block is a small block.)
2724     if (rem > 0) {
2725       size_t prefix_size = n * word_sz;
2726       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2727       rem_fc->set_size(rem);
2728       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2729       rem_fc->link_next(NULL);
2730       // Above must occur before BOT is updated below.
2731       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2732       OrderAccess::storestore();
2733       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2734       assert(fc->is_free(), "Error");
2735       fc->set_size(prefix_size);
2736       if (rem >= IndexSetSize) {
2737         returnChunkToDictionary(rem_fc);
2738         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2739         rem_fc = NULL;
2740       }
2741       // Otherwise, return it to the small list below.
2742     }
2743   }
2744   if (rem_fc != NULL) {
2745     MutexLockerEx x(_indexedFreeListParLocks[rem],
2746                     Mutex::_no_safepoint_check_flag);
2747     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2748     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2749     smallSplitBirth(rem);
2750   }
2751   assert(n * word_sz == fc->size(),
2752          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2753          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2754          fc->size(), n, word_sz);
2755   return fc;
2756 }
2757 
2758 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2759 
2760   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2761 
2762   if (fc == NULL) {
2763     return;
2764   }
2765 
2766   size_t n = fc->size() / word_sz;
2767 
2768   assert((ssize_t)n > 0, "Consistency");
2769   // Now do the splitting up.
2770   // Must do this in reverse order, so that anybody attempting to
2771   // access the main chunk sees it as a single free block until we
2772   // change it.
2773   size_t fc_size = n * word_sz;
2774   // All but first chunk in this loop
2775   for (ssize_t i = n-1; i > 0; i--) {
2776     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2777     ffc->set_size(word_sz);
2778     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2779     ffc->link_next(NULL);
2780     // Above must occur before BOT is updated below.
2781     OrderAccess::storestore();
2782     // splitting from the right, fc_size == (n - i + 1) * wordsize
2783     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2784     fc_size -= word_sz;
2785     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2786     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2787     _bt.verify_single_block((HeapWord*)fc, fc_size);
2788     // Push this on "fl".
2789     fl->return_chunk_at_head(ffc);
2790   }
2791   // First chunk
2792   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2793   // The blocks above should show their new sizes before the first block below
2794   fc->set_size(word_sz);
2795   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2796   fc->link_next(NULL);
2797   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2798   _bt.verify_single_block((HeapWord*)fc, fc->size());
2799   fl->return_chunk_at_head(fc);
2800 
2801   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2802   {
2803     // Update the stats for this block size.
2804     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2805                     Mutex::_no_safepoint_check_flag);
2806     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2807     _indexedFreeList[word_sz].set_split_births(births);
2808     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2809     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2810   }
2811 
2812   // TRAP
2813   assert(fl->tail()->next() == NULL, "List invariant.");
2814 }
2815 
2816 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2817   assert(fl->count() == 0, "Precondition.");
2818   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2819          "Precondition");
2820 
2821   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2822     // Got it
2823     return;
2824   }
2825 
2826   // Otherwise, we'll split a block from the dictionary.
2827   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2828 }
2829 
2830 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2831   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
2832   return ergo_max;
2833 }
2834 
2835 // Set up the space's par_seq_tasks structure for work claiming
2836 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2837 // XXX Need to suitably abstract and generalize this and the next
2838 // method into one.
2839 void
2840 CompactibleFreeListSpace::
2841 initialize_sequential_subtasks_for_rescan(int n_threads) {
2842   // The "size" of each task is fixed according to rescan_task_size.
2843   assert(n_threads > 0, "Unexpected n_threads argument");
2844   const size_t task_size = rescan_task_size();
2845   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2846   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2847   assert(n_tasks == 0 ||
2848          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2849           (used_region().start() + n_tasks*task_size >= used_region().end())),
2850          "n_tasks calculation incorrect");
2851   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2852   assert(!pst->valid(), "Clobbering existing data?");
2853   // Sets the condition for completion of the subtask (how many threads
2854   // need to finish in order to be done).
2855   pst->set_n_threads(n_threads);
2856   pst->set_n_tasks((int)n_tasks);
2857 }
2858 
2859 // Set up the space's par_seq_tasks structure for work claiming
2860 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2861 void
2862 CompactibleFreeListSpace::
2863 initialize_sequential_subtasks_for_marking(int n_threads,
2864                                            HeapWord* low) {
2865   // The "size" of each task is fixed according to rescan_task_size.
2866   assert(n_threads > 0, "Unexpected n_threads argument");
2867   const size_t task_size = marking_task_size();
2868   assert(task_size > CardTable::card_size_in_words &&
2869          (task_size %  CardTable::card_size_in_words == 0),
2870          "Otherwise arithmetic below would be incorrect");
2871   MemRegion span = _old_gen->reserved();
2872   if (low != NULL) {
2873     if (span.contains(low)) {
2874       // Align low down to  a card boundary so that
2875       // we can use block_offset_careful() on span boundaries.
2876       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2877                                  CardTable::card_size);
2878       // Clip span prefix at aligned_low
2879       span = span.intersection(MemRegion(aligned_low, span.end()));
2880     } else if (low > span.end()) {
2881       span = MemRegion(low, low);  // Null region
2882     } // else use entire span
2883   }
2884   assert(span.is_empty() ||
2885          ((uintptr_t)span.start() %  CardTable::card_size == 0),
2886         "span should start at a card boundary");
2887   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2888   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2889   assert(n_tasks == 0 ||
2890          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2891           (span.start() + n_tasks*task_size >= span.end())),
2892          "n_tasks calculation incorrect");
2893   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2894   assert(!pst->valid(), "Clobbering existing data?");
2895   // Sets the condition for completion of the subtask (how many threads
2896   // need to finish in order to be done).
2897   pst->set_n_threads(n_threads);
2898   pst->set_n_tasks((int)n_tasks);
2899 }