1 /*
   2  * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/cms/cmsHeap.hpp"
  27 #include "gc/cms/cmsLockVerifier.hpp"
  28 #include "gc/cms/compactibleFreeListSpace.hpp"
  29 #include "gc/cms/concurrentMarkSweepGeneration.inline.hpp"
  30 #include "gc/cms/concurrentMarkSweepThread.hpp"
  31 #include "gc/shared/blockOffsetTable.inline.hpp"
  32 #include "gc/shared/collectedHeap.inline.hpp"
  33 #include "gc/shared/space.inline.hpp"
  34 #include "gc/shared/spaceDecorator.hpp"
  35 #include "logging/log.hpp"
  36 #include "logging/logStream.hpp"
  37 #include "memory/allocation.inline.hpp"
  38 #include "memory/resourceArea.hpp"
  39 #include "memory/universe.hpp"
  40 #include "oops/access.inline.hpp"
  41 #include "oops/compressedOops.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/globals.hpp"
  44 #include "runtime/handles.inline.hpp"
  45 #include "runtime/init.hpp"
  46 #include "runtime/java.hpp"
  47 #include "runtime/orderAccess.inline.hpp"
  48 #include "runtime/vmThread.hpp"
  49 #include "utilities/align.hpp"
  50 #include "utilities/copy.hpp"
  51 
  52 /////////////////////////////////////////////////////////////////////////
  53 //// CompactibleFreeListSpace
  54 /////////////////////////////////////////////////////////////////////////
  55 
  56 // highest ranked  free list lock rank
  57 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  58 
  59 // Defaults are 0 so things will break badly if incorrectly initialized.
  60 size_t CompactibleFreeListSpace::IndexSetStart  = 0;
  61 size_t CompactibleFreeListSpace::IndexSetStride = 0;
  62 size_t CompactibleFreeListSpace::_min_chunk_size_in_bytes = 0;
  63 
  64 size_t MinChunkSize = 0;
  65 
  66 void CompactibleFreeListSpace::set_cms_values() {
  67   // Set CMS global values
  68   assert(MinChunkSize == 0, "already set");
  69 
  70   // MinChunkSize should be a multiple of MinObjAlignment and be large enough
  71   // for chunks to contain a FreeChunk.
  72   _min_chunk_size_in_bytes = align_up(sizeof(FreeChunk), MinObjAlignmentInBytes);
  73   MinChunkSize = _min_chunk_size_in_bytes / BytesPerWord;
  74 
  75   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  76   IndexSetStart  = MinChunkSize;
  77   IndexSetStride = MinObjAlignment;
  78 }
  79 
  80 // Constructor
  81 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs, MemRegion mr) :
  82   _bt(bs, mr),
  83   // free list locks are in the range of values taken by _lockRank
  84   // This range currently is [_leaf+2, _leaf+3]
  85   // Note: this requires that CFLspace c'tors
  86   // are called serially in the order in which the locks are
  87   // are acquired in the program text. This is true today.
  88   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true,
  89                 Monitor::_safepoint_check_sometimes),
  90   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  91                           "CompactibleFreeListSpace._dict_par_lock", true,
  92                           Monitor::_safepoint_check_never),
  93   _rescan_task_size(CardTable::card_size_in_words * BitsPerWord *
  94                     CMSRescanMultiple),
  95   _marking_task_size(CardTable::card_size_in_words * BitsPerWord *
  96                     CMSConcMarkMultiple),
  97   _collector(NULL),
  98   _preconsumptionDirtyCardClosure(NULL)
  99 {
 100   assert(sizeof(FreeChunk) / BytesPerWord <= MinChunkSize,
 101          "FreeChunk is larger than expected");
 102   _bt.set_space(this);
 103   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
 104 
 105   _dictionary = new AFLBinaryTreeDictionary(mr);
 106 
 107   assert(_dictionary != NULL, "CMS dictionary initialization");
 108   // The indexed free lists are initially all empty and are lazily
 109   // filled in on demand. Initialize the array elements to NULL.
 110   initializeIndexedFreeListArray();
 111 
 112   _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 113                              SmallForLinearAlloc);
 114 
 115   // CMSIndexedFreeListReplenish should be at least 1
 116   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 117   _promoInfo.setSpace(this);
 118   if (UseCMSBestFit) {
 119     _fitStrategy = FreeBlockBestFitFirst;
 120   } else {
 121     _fitStrategy = FreeBlockStrategyNone;
 122   }
 123   check_free_list_consistency();
 124 
 125   // Initialize locks for parallel case.
 126   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 127     _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 128                                             "a freelist par lock", true, Mutex::_safepoint_check_sometimes);
 129     DEBUG_ONLY(
 130       _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 131     )
 132   }
 133   _dictionary->set_par_lock(&_parDictionaryAllocLock);
 134 }
 135 
 136 // Like CompactibleSpace forward() but always calls cross_threshold() to
 137 // update the block offset table.  Removed initialize_threshold call because
 138 // CFLS does not use a block offset array for contiguous spaces.
 139 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 140                                     CompactPoint* cp, HeapWord* compact_top) {
 141   // q is alive
 142   // First check if we should switch compaction space
 143   assert(this == cp->space, "'this' should be current compaction space.");
 144   size_t compaction_max_size = pointer_delta(end(), compact_top);
 145   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 146     "virtual adjustObjectSize_v() method is not correct");
 147   size_t adjusted_size = adjustObjectSize(size);
 148   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 149          "no small fragments allowed");
 150   assert(minimum_free_block_size() == MinChunkSize,
 151          "for de-virtualized reference below");
 152   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 153   if (adjusted_size + MinChunkSize > compaction_max_size &&
 154       adjusted_size != compaction_max_size) {
 155     do {
 156       // switch to next compaction space
 157       cp->space->set_compaction_top(compact_top);
 158       cp->space = cp->space->next_compaction_space();
 159       if (cp->space == NULL) {
 160         cp->gen = CMSHeap::heap()->young_gen();
 161         assert(cp->gen != NULL, "compaction must succeed");
 162         cp->space = cp->gen->first_compaction_space();
 163         assert(cp->space != NULL, "generation must have a first compaction space");
 164       }
 165       compact_top = cp->space->bottom();
 166       cp->space->set_compaction_top(compact_top);
 167       // The correct adjusted_size may not be the same as that for this method
 168       // (i.e., cp->space may no longer be "this" so adjust the size again.
 169       // Use the virtual method which is not used above to save the virtual
 170       // dispatch.
 171       adjusted_size = cp->space->adjust_object_size_v(size);
 172       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 173       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 174     } while (adjusted_size > compaction_max_size);
 175   }
 176 
 177   // store the forwarding pointer into the mark word
 178   if ((HeapWord*)q != compact_top) {
 179     q->forward_to(oop(compact_top));
 180     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 181   } else {
 182     // if the object isn't moving we can just set the mark to the default
 183     // mark and handle it specially later on.
 184     q->init_mark();
 185     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 186   }
 187 
 188   compact_top += adjusted_size;
 189 
 190   // we need to update the offset table so that the beginnings of objects can be
 191   // found during scavenge.  Note that we are updating the offset table based on
 192   // where the object will be once the compaction phase finishes.
 193 
 194   // Always call cross_threshold().  A contiguous space can only call it when
 195   // the compaction_top exceeds the current threshold but not for an
 196   // non-contiguous space.
 197   cp->threshold =
 198     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 199   return compact_top;
 200 }
 201 
 202 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 203 // and use of single_block instead of alloc_block.  The name here is not really
 204 // appropriate - maybe a more general name could be invented for both the
 205 // contiguous and noncontiguous spaces.
 206 
 207 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 208   _bt.single_block(start, the_end);
 209   return end();
 210 }
 211 
 212 // Initialize them to NULL.
 213 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 214   for (size_t i = 0; i < IndexSetSize; i++) {
 215     // Note that on platforms where objects are double word aligned,
 216     // the odd array elements are not used.  It is convenient, however,
 217     // to map directly from the object size to the array element.
 218     _indexedFreeList[i].reset(IndexSetSize);
 219     _indexedFreeList[i].set_size(i);
 220     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 221     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 222     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 223     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 224   }
 225 }
 226 
 227 size_t CompactibleFreeListSpace::obj_size(const HeapWord* addr) const {
 228   return adjustObjectSize(oop(addr)->size());
 229 }
 230 
 231 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 232   for (size_t i = 1; i < IndexSetSize; i++) {
 233     assert(_indexedFreeList[i].size() == (size_t) i,
 234       "Indexed free list sizes are incorrect");
 235     _indexedFreeList[i].reset(IndexSetSize);
 236     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 237     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 238     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 239     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 240   }
 241 }
 242 
 243 void CompactibleFreeListSpace::reset(MemRegion mr) {
 244   resetIndexedFreeListArray();
 245   dictionary()->reset();
 246   if (BlockOffsetArrayUseUnallocatedBlock) {
 247     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 248     // Everything's allocated until proven otherwise.
 249     _bt.set_unallocated_block(end());
 250   }
 251   if (!mr.is_empty()) {
 252     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 253     _bt.single_block(mr.start(), mr.word_size());
 254     FreeChunk* fc = (FreeChunk*) mr.start();
 255     fc->set_size(mr.word_size());
 256     if (mr.word_size() >= IndexSetSize ) {
 257       returnChunkToDictionary(fc);
 258     } else {
 259       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 260       _indexedFreeList[mr.word_size()].return_chunk_at_head(fc);
 261     }
 262     coalBirth(mr.word_size());
 263   }
 264   _promoInfo.reset();
 265   _smallLinearAllocBlock._ptr = NULL;
 266   _smallLinearAllocBlock._word_size = 0;
 267 }
 268 
 269 void CompactibleFreeListSpace::reset_after_compaction() {
 270   // Reset the space to the new reality - one free chunk.
 271   MemRegion mr(compaction_top(), end());
 272   reset(mr);
 273   // Now refill the linear allocation block(s) if possible.
 274   refillLinearAllocBlocksIfNeeded();
 275 }
 276 
 277 // Walks the entire dictionary, returning a coterminal
 278 // chunk, if it exists. Use with caution since it involves
 279 // a potentially complete walk of a potentially large tree.
 280 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 281 
 282   assert_lock_strong(&_freelistLock);
 283 
 284   return dictionary()->find_chunk_ends_at(end());
 285 }
 286 
 287 
 288 #ifndef PRODUCT
 289 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 290   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 291     _indexedFreeList[i].allocation_stats()->set_returned_bytes(0);
 292   }
 293 }
 294 
 295 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 296   size_t sum = 0;
 297   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 298     sum += _indexedFreeList[i].allocation_stats()->returned_bytes();
 299   }
 300   return sum;
 301 }
 302 
 303 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 304   size_t count = 0;
 305   for (size_t i = IndexSetStart; i < IndexSetSize; i++) {
 306     debug_only(
 307       ssize_t total_list_count = 0;
 308       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 309          fc = fc->next()) {
 310         total_list_count++;
 311       }
 312       assert(total_list_count ==  _indexedFreeList[i].count(),
 313         "Count in list is incorrect");
 314     )
 315     count += _indexedFreeList[i].count();
 316   }
 317   return count;
 318 }
 319 
 320 size_t CompactibleFreeListSpace::totalCount() {
 321   size_t num = totalCountInIndexedFreeLists();
 322   num +=  dictionary()->total_count();
 323   if (_smallLinearAllocBlock._word_size != 0) {
 324     num++;
 325   }
 326   return num;
 327 }
 328 #endif
 329 
 330 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 331   FreeChunk* fc = (FreeChunk*) p;
 332   return fc->is_free();
 333 }
 334 
 335 size_t CompactibleFreeListSpace::used() const {
 336   return capacity() - free();
 337 }
 338 
 339 size_t CompactibleFreeListSpace::free() const {
 340   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 341   // if you do this while the structures are in flux you
 342   // may get an approximate answer only; for instance
 343   // because there is concurrent allocation either
 344   // directly by mutators or for promotion during a GC.
 345   // It's "MT-safe", however, in the sense that you are guaranteed
 346   // not to crash and burn, for instance, because of walking
 347   // pointers that could disappear as you were walking them.
 348   // The approximation is because the various components
 349   // that are read below are not read atomically (and
 350   // further the computation of totalSizeInIndexedFreeLists()
 351   // is itself a non-atomic computation. The normal use of
 352   // this is during a resize operation at the end of GC
 353   // and at that time you are guaranteed to get the
 354   // correct actual value. However, for instance, this is
 355   // also read completely asynchronously by the "perf-sampler"
 356   // that supports jvmstat, and you are apt to see the values
 357   // flicker in such cases.
 358   assert(_dictionary != NULL, "No _dictionary?");
 359   return (_dictionary->total_chunk_size(DEBUG_ONLY(freelistLock())) +
 360           totalSizeInIndexedFreeLists() +
 361           _smallLinearAllocBlock._word_size) * HeapWordSize;
 362 }
 363 
 364 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 365   assert(_dictionary != NULL, "No _dictionary?");
 366   assert_locked();
 367   size_t res = _dictionary->max_chunk_size();
 368   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 369                        (size_t) SmallForLinearAlloc - 1));
 370   // XXX the following could potentially be pretty slow;
 371   // should one, pessimistically for the rare cases when res
 372   // calculated above is less than IndexSetSize,
 373   // just return res calculated above? My reasoning was that
 374   // those cases will be so rare that the extra time spent doesn't
 375   // really matter....
 376   // Note: do not change the loop test i >= res + IndexSetStride
 377   // to i > res below, because i is unsigned and res may be zero.
 378   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 379        i -= IndexSetStride) {
 380     if (_indexedFreeList[i].head() != NULL) {
 381       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 382       return i;
 383     }
 384   }
 385   return res;
 386 }
 387 
 388 void LinearAllocBlock::print_on(outputStream* st) const {
 389   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 390             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 391             p2i(_ptr), _word_size, _refillSize, _allocation_size_limit);
 392 }
 393 
 394 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 395   st->print_cr("COMPACTIBLE FREELIST SPACE");
 396   st->print_cr(" Space:");
 397   Space::print_on(st);
 398 
 399   st->print_cr("promoInfo:");
 400   _promoInfo.print_on(st);
 401 
 402   st->print_cr("_smallLinearAllocBlock");
 403   _smallLinearAllocBlock.print_on(st);
 404 
 405   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 406 
 407   st->print_cr(" _fitStrategy = %s", BOOL_TO_STR(_fitStrategy));
 408 }
 409 
 410 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 411 const {
 412   reportIndexedFreeListStatistics(st);
 413   st->print_cr("Layout of Indexed Freelists");
 414   st->print_cr("---------------------------");
 415   AdaptiveFreeList<FreeChunk>::print_labels_on(st, "size");
 416   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 417     _indexedFreeList[i].print_on(st);
 418     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL; fc = fc->next()) {
 419       st->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 420                    p2i(fc), p2i((HeapWord*)fc + i),
 421                    fc->cantCoalesce() ? "\t CC" : "");
 422     }
 423   }
 424 }
 425 
 426 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 427 const {
 428   _promoInfo.print_on(st);
 429 }
 430 
 431 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 432 const {
 433   _dictionary->report_statistics(st);
 434   st->print_cr("Layout of Freelists in Tree");
 435   st->print_cr("---------------------------");
 436   _dictionary->print_free_lists(st);
 437 }
 438 
 439 class BlkPrintingClosure: public BlkClosure {
 440   const CMSCollector*             _collector;
 441   const CompactibleFreeListSpace* _sp;
 442   const CMSBitMap*                _live_bit_map;
 443   const bool                      _post_remark;
 444   outputStream*                   _st;
 445 public:
 446   BlkPrintingClosure(const CMSCollector* collector,
 447                      const CompactibleFreeListSpace* sp,
 448                      const CMSBitMap* live_bit_map,
 449                      outputStream* st):
 450     _collector(collector),
 451     _sp(sp),
 452     _live_bit_map(live_bit_map),
 453     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 454     _st(st) { }
 455   size_t do_blk(HeapWord* addr);
 456 };
 457 
 458 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 459   size_t sz = _sp->block_size_no_stall(addr, _collector);
 460   assert(sz != 0, "Should always be able to compute a size");
 461   if (_sp->block_is_obj(addr)) {
 462     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 463     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 464       p2i(addr),
 465       dead ? "dead" : "live",
 466       sz,
 467       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 468     if (CMSPrintObjectsInDump && !dead) {
 469       oop(addr)->print_on(_st);
 470       _st->print_cr("--------------------------------------");
 471     }
 472   } else { // free block
 473     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 474       p2i(addr), sz, CMSPrintChunksInDump ? ":" : ".");
 475     if (CMSPrintChunksInDump) {
 476       ((FreeChunk*)addr)->print_on(_st);
 477       _st->print_cr("--------------------------------------");
 478     }
 479   }
 480   return sz;
 481 }
 482 
 483 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c, outputStream* st) {
 484   st->print_cr("=========================");
 485   st->print_cr("Block layout in CMS Heap:");
 486   st->print_cr("=========================");
 487   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 488   blk_iterate(&bpcl);
 489 
 490   st->print_cr("=======================================");
 491   st->print_cr("Order & Layout of Promotion Info Blocks");
 492   st->print_cr("=======================================");
 493   print_promo_info_blocks(st);
 494 
 495   st->print_cr("===========================");
 496   st->print_cr("Order of Indexed Free Lists");
 497   st->print_cr("=========================");
 498   print_indexed_free_lists(st);
 499 
 500   st->print_cr("=================================");
 501   st->print_cr("Order of Free Lists in Dictionary");
 502   st->print_cr("=================================");
 503   print_dictionary_free_lists(st);
 504 }
 505 
 506 
 507 void CompactibleFreeListSpace::reportFreeListStatistics(const char* title) const {
 508   assert_lock_strong(&_freelistLock);
 509   Log(gc, freelist, stats) log;
 510   if (!log.is_debug()) {
 511     return;
 512   }
 513   log.debug("%s", title);
 514 
 515   LogStream out(log.debug());
 516   _dictionary->report_statistics(&out);
 517 
 518   if (log.is_trace()) {
 519     LogStream trace_out(log.trace());
 520     reportIndexedFreeListStatistics(&trace_out);
 521     size_t total_size = totalSizeInIndexedFreeLists() +
 522                        _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
 523     log.trace(" free=" SIZE_FORMAT " frag=%1.4f", total_size, flsFrag());
 524   }
 525 }
 526 
 527 void CompactibleFreeListSpace::reportIndexedFreeListStatistics(outputStream* st) const {
 528   assert_lock_strong(&_freelistLock);
 529   st->print_cr("Statistics for IndexedFreeLists:");
 530   st->print_cr("--------------------------------");
 531   size_t total_size = totalSizeInIndexedFreeLists();
 532   size_t free_blocks = numFreeBlocksInIndexedFreeLists();
 533   st->print_cr("Total Free Space: " SIZE_FORMAT, total_size);
 534   st->print_cr("Max   Chunk Size: " SIZE_FORMAT, maxChunkSizeInIndexedFreeLists());
 535   st->print_cr("Number of Blocks: " SIZE_FORMAT, free_blocks);
 536   if (free_blocks != 0) {
 537     st->print_cr("Av.  Block  Size: " SIZE_FORMAT, total_size/free_blocks);
 538   }
 539 }
 540 
 541 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 542   size_t res = 0;
 543   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 544     debug_only(
 545       ssize_t recount = 0;
 546       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 547          fc = fc->next()) {
 548         recount += 1;
 549       }
 550       assert(recount == _indexedFreeList[i].count(),
 551         "Incorrect count in list");
 552     )
 553     res += _indexedFreeList[i].count();
 554   }
 555   return res;
 556 }
 557 
 558 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 559   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 560     if (_indexedFreeList[i].head() != NULL) {
 561       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 562       return (size_t)i;
 563     }
 564   }
 565   return 0;
 566 }
 567 
 568 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 569   HeapWord* prevEnd = end();
 570   assert(prevEnd != value, "unnecessary set_end call");
 571   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 572         "New end is below unallocated block");
 573   _end = value;
 574   if (prevEnd != NULL) {
 575     // Resize the underlying block offset table.
 576     _bt.resize(pointer_delta(value, bottom()));
 577     if (value <= prevEnd) {
 578       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 579              "New end is below unallocated block");
 580     } else {
 581       // Now, take this new chunk and add it to the free blocks.
 582       // Note that the BOT has not yet been updated for this block.
 583       size_t newFcSize = pointer_delta(value, prevEnd);
 584       // Add the block to the free lists, if possible coalescing it
 585       // with the last free block, and update the BOT and census data.
 586       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 587     }
 588   }
 589 }
 590 
 591 class FreeListSpaceDCTOC : public FilteringDCTOC {
 592   CompactibleFreeListSpace* _cfls;
 593   CMSCollector* _collector;
 594   bool _parallel;
 595 protected:
 596   // Override.
 597 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 598   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 599                                        HeapWord* bottom, HeapWord* top, \
 600                                        ClosureType* cl);                \
 601       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 602                                        HeapWord* bottom, HeapWord* top, \
 603                                        ClosureType* cl);                \
 604     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 605                                        HeapWord* bottom, HeapWord* top, \
 606                                        ClosureType* cl)
 607   walk_mem_region_with_cl_DECL(ExtendedOopClosure);
 608   walk_mem_region_with_cl_DECL(FilteringClosure);
 609 
 610 public:
 611   FreeListSpaceDCTOC(CompactibleFreeListSpace* sp,
 612                      CMSCollector* collector,
 613                      ExtendedOopClosure* cl,
 614                      CardTable::PrecisionStyle precision,
 615                      HeapWord* boundary,
 616                      bool parallel) :
 617     FilteringDCTOC(sp, cl, precision, boundary),
 618     _cfls(sp), _collector(collector), _parallel(parallel) {}
 619 };
 620 
 621 // We de-virtualize the block-related calls below, since we know that our
 622 // space is a CompactibleFreeListSpace.
 623 
 624 #define FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ClosureType)           \
 625 void FreeListSpaceDCTOC::walk_mem_region_with_cl(MemRegion mr,                  \
 626                                                  HeapWord* bottom,              \
 627                                                  HeapWord* top,                 \
 628                                                  ClosureType* cl) {             \
 629    if (_parallel) {                                                             \
 630      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 631    } else {                                                                     \
 632      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 633    }                                                                            \
 634 }                                                                               \
 635 void FreeListSpaceDCTOC::walk_mem_region_with_cl_par(MemRegion mr,              \
 636                                                      HeapWord* bottom,          \
 637                                                      HeapWord* top,             \
 638                                                      ClosureType* cl) {         \
 639   /* Skip parts that are before "mr", in case "block_start" sent us             \
 640      back too far. */                                                           \
 641   HeapWord* mr_start = mr.start();                                              \
 642   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 643   HeapWord* next = bottom + bot_size;                                           \
 644   while (next < mr_start) {                                                     \
 645     bottom = next;                                                              \
 646     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 647     next = bottom + bot_size;                                                   \
 648   }                                                                             \
 649                                                                                 \
 650   while (bottom < top) {                                                        \
 651     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 652         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 653                     oop(bottom)) &&                                             \
 654         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 655       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 656       bottom += _cfls->adjustObjectSize(word_sz);                               \
 657     } else {                                                                    \
 658       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 659     }                                                                           \
 660   }                                                                             \
 661 }                                                                               \
 662 void FreeListSpaceDCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,            \
 663                                                        HeapWord* bottom,        \
 664                                                        HeapWord* top,           \
 665                                                        ClosureType* cl) {       \
 666   /* Skip parts that are before "mr", in case "block_start" sent us             \
 667      back too far. */                                                           \
 668   HeapWord* mr_start = mr.start();                                              \
 669   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 670   HeapWord* next = bottom + bot_size;                                           \
 671   while (next < mr_start) {                                                     \
 672     bottom = next;                                                              \
 673     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 674     next = bottom + bot_size;                                                   \
 675   }                                                                             \
 676                                                                                 \
 677   while (bottom < top) {                                                        \
 678     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 679         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 680                     oop(bottom)) &&                                             \
 681         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 682       size_t word_sz = oop(bottom)->oop_iterate_size(cl, mr);                   \
 683       bottom += _cfls->adjustObjectSize(word_sz);                               \
 684     } else {                                                                    \
 685       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 686     }                                                                           \
 687   }                                                                             \
 688 }
 689 
 690 // (There are only two of these, rather than N, because the split is due
 691 // only to the introduction of the FilteringClosure, a local part of the
 692 // impl of this abstraction.)
 693 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(ExtendedOopClosure)
 694 FreeListSpaceDCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 695 
 696 DirtyCardToOopClosure*
 697 CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl,
 698                                       CardTable::PrecisionStyle precision,
 699                                       HeapWord* boundary,
 700                                       bool parallel) {
 701   return new FreeListSpaceDCTOC(this, _collector, cl, precision, boundary, parallel);
 702 }
 703 
 704 
 705 // Note on locking for the space iteration functions:
 706 // since the collector's iteration activities are concurrent with
 707 // allocation activities by mutators, absent a suitable mutual exclusion
 708 // mechanism the iterators may go awry. For instance a block being iterated
 709 // may suddenly be allocated or divided up and part of it allocated and
 710 // so on.
 711 
 712 // Apply the given closure to each block in the space.
 713 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 714   assert_lock_strong(freelistLock());
 715   HeapWord *cur, *limit;
 716   for (cur = bottom(), limit = end(); cur < limit;
 717        cur += cl->do_blk_careful(cur));
 718 }
 719 
 720 // Apply the given closure to each block in the space.
 721 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 722   assert_lock_strong(freelistLock());
 723   HeapWord *cur, *limit;
 724   for (cur = bottom(), limit = end(); cur < limit;
 725        cur += cl->do_blk(cur));
 726 }
 727 
 728 // Apply the given closure to each oop in the space.
 729 void CompactibleFreeListSpace::oop_iterate(ExtendedOopClosure* cl) {
 730   assert_lock_strong(freelistLock());
 731   HeapWord *cur, *limit;
 732   size_t curSize;
 733   for (cur = bottom(), limit = end(); cur < limit;
 734        cur += curSize) {
 735     curSize = block_size(cur);
 736     if (block_is_obj(cur)) {
 737       oop(cur)->oop_iterate(cl);
 738     }
 739   }
 740 }
 741 
 742 // NOTE: In the following methods, in order to safely be able to
 743 // apply the closure to an object, we need to be sure that the
 744 // object has been initialized. We are guaranteed that an object
 745 // is initialized if we are holding the Heap_lock with the
 746 // world stopped.
 747 void CompactibleFreeListSpace::verify_objects_initialized() const {
 748   if (is_init_completed()) {
 749     assert_locked_or_safepoint(Heap_lock);
 750     if (Universe::is_fully_initialized()) {
 751       guarantee(SafepointSynchronize::is_at_safepoint(),
 752                 "Required for objects to be initialized");
 753     }
 754   } // else make a concession at vm start-up
 755 }
 756 
 757 // Apply the given closure to each object in the space
 758 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 759   assert_lock_strong(freelistLock());
 760   NOT_PRODUCT(verify_objects_initialized());
 761   HeapWord *cur, *limit;
 762   size_t curSize;
 763   for (cur = bottom(), limit = end(); cur < limit;
 764        cur += curSize) {
 765     curSize = block_size(cur);
 766     if (block_is_obj(cur)) {
 767       blk->do_object(oop(cur));
 768     }
 769   }
 770 }
 771 
 772 // Apply the given closure to each live object in the space
 773 //   The usage of CompactibleFreeListSpace
 774 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 775 // objects in the space with references to objects that are no longer
 776 // valid.  For example, an object may reference another object
 777 // that has already been sweep up (collected).  This method uses
 778 // obj_is_alive() to determine whether it is safe to apply the closure to
 779 // an object.  See obj_is_alive() for details on how liveness of an
 780 // object is decided.
 781 
 782 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 783   assert_lock_strong(freelistLock());
 784   NOT_PRODUCT(verify_objects_initialized());
 785   HeapWord *cur, *limit;
 786   size_t curSize;
 787   for (cur = bottom(), limit = end(); cur < limit;
 788        cur += curSize) {
 789     curSize = block_size(cur);
 790     if (block_is_obj(cur) && obj_is_alive(cur)) {
 791       blk->do_object(oop(cur));
 792     }
 793   }
 794 }
 795 
 796 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 797                                                   UpwardsObjectClosure* cl) {
 798   assert_locked(freelistLock());
 799   NOT_PRODUCT(verify_objects_initialized());
 800   assert(!mr.is_empty(), "Should be non-empty");
 801   // We use MemRegion(bottom(), end()) rather than used_region() below
 802   // because the two are not necessarily equal for some kinds of
 803   // spaces, in particular, certain kinds of free list spaces.
 804   // We could use the more complicated but more precise:
 805   // MemRegion(used_region().start(), align_up(used_region().end(), CardSize))
 806   // but the slight imprecision seems acceptable in the assertion check.
 807   assert(MemRegion(bottom(), end()).contains(mr),
 808          "Should be within used space");
 809   HeapWord* prev = cl->previous();   // max address from last time
 810   if (prev >= mr.end()) { // nothing to do
 811     return;
 812   }
 813   // This assert will not work when we go from cms space to perm
 814   // space, and use same closure. Easy fix deferred for later. XXX YSR
 815   // assert(prev == NULL || contains(prev), "Should be within space");
 816 
 817   bool last_was_obj_array = false;
 818   HeapWord *blk_start_addr, *region_start_addr;
 819   if (prev > mr.start()) {
 820     region_start_addr = prev;
 821     blk_start_addr    = prev;
 822     // The previous invocation may have pushed "prev" beyond the
 823     // last allocated block yet there may be still be blocks
 824     // in this region due to a particular coalescing policy.
 825     // Relax the assertion so that the case where the unallocated
 826     // block is maintained and "prev" is beyond the unallocated
 827     // block does not cause the assertion to fire.
 828     assert((BlockOffsetArrayUseUnallocatedBlock &&
 829             (!is_in(prev))) ||
 830            (blk_start_addr == block_start(region_start_addr)), "invariant");
 831   } else {
 832     region_start_addr = mr.start();
 833     blk_start_addr    = block_start(region_start_addr);
 834   }
 835   HeapWord* region_end_addr = mr.end();
 836   MemRegion derived_mr(region_start_addr, region_end_addr);
 837   while (blk_start_addr < region_end_addr) {
 838     const size_t size = block_size(blk_start_addr);
 839     if (block_is_obj(blk_start_addr)) {
 840       last_was_obj_array = cl->do_object_bm(oop(blk_start_addr), derived_mr);
 841     } else {
 842       last_was_obj_array = false;
 843     }
 844     blk_start_addr += size;
 845   }
 846   if (!last_was_obj_array) {
 847     assert((bottom() <= blk_start_addr) && (blk_start_addr <= end()),
 848            "Should be within (closed) used space");
 849     assert(blk_start_addr > prev, "Invariant");
 850     cl->set_previous(blk_start_addr); // min address for next time
 851   }
 852 }
 853 
 854 // Callers of this iterator beware: The closure application should
 855 // be robust in the face of uninitialized objects and should (always)
 856 // return a correct size so that the next addr + size below gives us a
 857 // valid block boundary. [See for instance,
 858 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 859 // in ConcurrentMarkSweepGeneration.cpp.]
 860 HeapWord*
 861 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 862   ObjectClosureCareful* cl) {
 863   assert_lock_strong(freelistLock());
 864   // Can't use used_region() below because it may not necessarily
 865   // be the same as [bottom(),end()); although we could
 866   // use [used_region().start(),align_up(used_region().end(),CardSize)),
 867   // that appears too cumbersome, so we just do the simpler check
 868   // in the assertion below.
 869   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 870          "mr should be non-empty and within used space");
 871   HeapWord *addr, *end;
 872   size_t size;
 873   for (addr = block_start_careful(mr.start()), end  = mr.end();
 874        addr < end; addr += size) {
 875     FreeChunk* fc = (FreeChunk*)addr;
 876     if (fc->is_free()) {
 877       // Since we hold the free list lock, which protects direct
 878       // allocation in this generation by mutators, a free object
 879       // will remain free throughout this iteration code.
 880       size = fc->size();
 881     } else {
 882       // Note that the object need not necessarily be initialized,
 883       // because (for instance) the free list lock does NOT protect
 884       // object initialization. The closure application below must
 885       // therefore be correct in the face of uninitialized objects.
 886       size = cl->do_object_careful_m(oop(addr), mr);
 887       if (size == 0) {
 888         // An unparsable object found. Signal early termination.
 889         return addr;
 890       }
 891     }
 892   }
 893   return NULL;
 894 }
 895 
 896 
 897 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 898   NOT_PRODUCT(verify_objects_initialized());
 899   return _bt.block_start(p);
 900 }
 901 
 902 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 903   return _bt.block_start_careful(p);
 904 }
 905 
 906 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 907   NOT_PRODUCT(verify_objects_initialized());
 908   // This must be volatile, or else there is a danger that the compiler
 909   // will compile the code below into a sometimes-infinite loop, by keeping
 910   // the value read the first time in a register.
 911   while (true) {
 912     // We must do this until we get a consistent view of the object.
 913     if (FreeChunk::indicatesFreeChunk(p)) {
 914       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 915       size_t res = fc->size();
 916 
 917       // Bugfix for systems with weak memory model (PPC64/IA64). The
 918       // block's free bit was set and we have read the size of the
 919       // block. Acquire and check the free bit again. If the block is
 920       // still free, the read size is correct.
 921       OrderAccess::acquire();
 922 
 923       // If the object is still a free chunk, return the size, else it
 924       // has been allocated so try again.
 925       if (FreeChunk::indicatesFreeChunk(p)) {
 926         assert(res != 0, "Block size should not be 0");
 927         return res;
 928       }
 929     } else {
 930       // Ensure klass read before size.
 931       Klass* k = oop(p)->klass_or_null_acquire();
 932       if (k != NULL) {
 933         assert(k->is_klass(), "Should really be klass oop.");
 934         oop o = (oop)p;
 935         assert(oopDesc::is_oop(o, true /* ignore mark word */), "Should be an oop.");
 936 
 937         size_t res = o->size_given_klass(k);
 938         res = adjustObjectSize(res);
 939         assert(res != 0, "Block size should not be 0");
 940         return res;
 941       }
 942     }
 943   }
 944 }
 945 
 946 // TODO: Now that is_parsable is gone, we should combine these two functions.
 947 // A variant of the above that uses the Printezis bits for
 948 // unparsable but allocated objects. This avoids any possible
 949 // stalls waiting for mutators to initialize objects, and is
 950 // thus potentially faster than the variant above. However,
 951 // this variant may return a zero size for a block that is
 952 // under mutation and for which a consistent size cannot be
 953 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 954 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 955                                                      const CMSCollector* c)
 956 const {
 957   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 958   // This must be volatile, or else there is a danger that the compiler
 959   // will compile the code below into a sometimes-infinite loop, by keeping
 960   // the value read the first time in a register.
 961   DEBUG_ONLY(uint loops = 0;)
 962   while (true) {
 963     // We must do this until we get a consistent view of the object.
 964     if (FreeChunk::indicatesFreeChunk(p)) {
 965       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 966       size_t res = fc->size();
 967 
 968       // Bugfix for systems with weak memory model (PPC64/IA64). The
 969       // free bit of the block was set and we have read the size of
 970       // the block. Acquire and check the free bit again. If the
 971       // block is still free, the read size is correct.
 972       OrderAccess::acquire();
 973 
 974       if (FreeChunk::indicatesFreeChunk(p)) {
 975         assert(res != 0, "Block size should not be 0");
 976         assert(loops == 0, "Should be 0");
 977         return res;
 978       }
 979     } else {
 980       // Ensure klass read before size.
 981       Klass* k = oop(p)->klass_or_null_acquire();
 982       if (k != NULL) {
 983         assert(k->is_klass(), "Should really be klass oop.");
 984         oop o = (oop)p;
 985         assert(oopDesc::is_oop(o), "Should be an oop");
 986 
 987         size_t res = o->size_given_klass(k);
 988         res = adjustObjectSize(res);
 989         assert(res != 0, "Block size should not be 0");
 990         return res;
 991       } else {
 992         // May return 0 if P-bits not present.
 993         return c->block_size_if_printezis_bits(p);
 994       }
 995     }
 996     assert(loops == 0, "Can loop at most once");
 997     DEBUG_ONLY(loops++;)
 998   }
 999 }
1000 
1001 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1002   NOT_PRODUCT(verify_objects_initialized());
1003   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1004   FreeChunk* fc = (FreeChunk*)p;
1005   if (fc->is_free()) {
1006     return fc->size();
1007   } else {
1008     // Ignore mark word because this may be a recently promoted
1009     // object whose mark word is used to chain together grey
1010     // objects (the last one would have a null value).
1011     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1012     return adjustObjectSize(oop(p)->size());
1013   }
1014 }
1015 
1016 // This implementation assumes that the property of "being an object" is
1017 // stable.  But being a free chunk may not be (because of parallel
1018 // promotion.)
1019 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1020   FreeChunk* fc = (FreeChunk*)p;
1021   assert(is_in_reserved(p), "Should be in space");
1022   if (FreeChunk::indicatesFreeChunk(p)) return false;
1023   Klass* k = oop(p)->klass_or_null_acquire();
1024   if (k != NULL) {
1025     // Ignore mark word because it may have been used to
1026     // chain together promoted objects (the last one
1027     // would have a null value).
1028     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1029     return true;
1030   } else {
1031     return false;  // Was not an object at the start of collection.
1032   }
1033 }
1034 
1035 // Check if the object is alive. This fact is checked either by consulting
1036 // the main marking bitmap in the sweeping phase or, if it's a permanent
1037 // generation and we're not in the sweeping phase, by checking the
1038 // perm_gen_verify_bit_map where we store the "deadness" information if
1039 // we did not sweep the perm gen in the most recent previous GC cycle.
1040 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1041   assert(SafepointSynchronize::is_at_safepoint() || !is_init_completed(),
1042          "Else races are possible");
1043   assert(block_is_obj(p), "The address should point to an object");
1044 
1045   // If we're sweeping, we use object liveness information from the main bit map
1046   // for both perm gen and old gen.
1047   // We don't need to lock the bitmap (live_map or dead_map below), because
1048   // EITHER we are in the middle of the sweeping phase, and the
1049   // main marking bit map (live_map below) is locked,
1050   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1051   // is stable, because it's mutated only in the sweeping phase.
1052   // NOTE: This method is also used by jmap where, if class unloading is
1053   // off, the results can return "false" for legitimate perm objects,
1054   // when we are not in the midst of a sweeping phase, which can result
1055   // in jmap not reporting certain perm gen objects. This will be moot
1056   // if/when the perm gen goes away in the future.
1057   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1058     CMSBitMap* live_map = _collector->markBitMap();
1059     return live_map->par_isMarked((HeapWord*) p);
1060   }
1061   return true;
1062 }
1063 
1064 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1065   FreeChunk* fc = (FreeChunk*)p;
1066   assert(is_in_reserved(p), "Should be in space");
1067   assert(_bt.block_start(p) == p, "Should be a block boundary");
1068   if (!fc->is_free()) {
1069     // Ignore mark word because it may have been used to
1070     // chain together promoted objects (the last one
1071     // would have a null value).
1072     assert(oopDesc::is_oop(oop(p), true), "Should be an oop");
1073     return true;
1074   }
1075   return false;
1076 }
1077 
1078 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1079 // approximate answer if you don't hold the freelistlock when you call this.
1080 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1081   size_t size = 0;
1082   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1083     debug_only(
1084       // We may be calling here without the lock in which case we
1085       // won't do this modest sanity check.
1086       if (freelistLock()->owned_by_self()) {
1087         size_t total_list_size = 0;
1088         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1089           fc = fc->next()) {
1090           total_list_size += i;
1091         }
1092         assert(total_list_size == i * _indexedFreeList[i].count(),
1093                "Count in list is incorrect");
1094       }
1095     )
1096     size += i * _indexedFreeList[i].count();
1097   }
1098   return size;
1099 }
1100 
1101 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1102   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1103   return allocate(size);
1104 }
1105 
1106 HeapWord*
1107 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1108   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1109 }
1110 
1111 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1112   assert_lock_strong(freelistLock());
1113   HeapWord* res = NULL;
1114   assert(size == adjustObjectSize(size),
1115          "use adjustObjectSize() before calling into allocate()");
1116 
1117   res = allocate_adaptive_freelists(size);
1118 
1119   if (res != NULL) {
1120     // check that res does lie in this space!
1121     assert(is_in_reserved(res), "Not in this space!");
1122     assert(is_aligned((void*)res), "alignment check");
1123 
1124     FreeChunk* fc = (FreeChunk*)res;
1125     fc->markNotFree();
1126     assert(!fc->is_free(), "shouldn't be marked free");
1127     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1128     // Verify that the block offset table shows this to
1129     // be a single block, but not one which is unallocated.
1130     _bt.verify_single_block(res, size);
1131     _bt.verify_not_unallocated(res, size);
1132     // mangle a just allocated object with a distinct pattern.
1133     debug_only(fc->mangleAllocated(size));
1134   }
1135 
1136   return res;
1137 }
1138 
1139 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1140   assert_lock_strong(freelistLock());
1141   HeapWord* res = NULL;
1142   assert(size == adjustObjectSize(size),
1143          "use adjustObjectSize() before calling into allocate()");
1144 
1145   // Strategy
1146   //   if small
1147   //     exact size from small object indexed list if small
1148   //     small or large linear allocation block (linAB) as appropriate
1149   //     take from lists of greater sized chunks
1150   //   else
1151   //     dictionary
1152   //     small or large linear allocation block if it has the space
1153   // Try allocating exact size from indexTable first
1154   if (size < IndexSetSize) {
1155     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1156     if(res != NULL) {
1157       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1158         "Not removed from free list");
1159       // no block offset table adjustment is necessary on blocks in
1160       // the indexed lists.
1161 
1162     // Try allocating from the small LinAB
1163     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1164         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1165         // if successful, the above also adjusts block offset table
1166         // Note that this call will refill the LinAB to
1167         // satisfy the request.  This is different that
1168         // evm.
1169         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1170     } else {
1171       // Raid the exact free lists larger than size, even if they are not
1172       // overpopulated.
1173       res = (HeapWord*) getChunkFromGreater(size);
1174     }
1175   } else {
1176     // Big objects get allocated directly from the dictionary.
1177     res = (HeapWord*) getChunkFromDictionaryExact(size);
1178     if (res == NULL) {
1179       // Try hard not to fail since an allocation failure will likely
1180       // trigger a synchronous GC.  Try to get the space from the
1181       // allocation blocks.
1182       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1183     }
1184   }
1185 
1186   return res;
1187 }
1188 
1189 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1190 // when promoting obj.
1191 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1192   // Depending on the object size, expansion may require refilling either a
1193   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1194   // is added because the dictionary may over-allocate to avoid fragmentation.
1195   size_t space = obj_size;
1196   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1197   return space;
1198 }
1199 
1200 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1201   FreeChunk* ret;
1202 
1203   assert(numWords >= MinChunkSize, "Size is less than minimum");
1204   assert(linearAllocationWouldFail() || bestFitFirst(),
1205     "Should not be here");
1206 
1207   size_t i;
1208   size_t currSize = numWords + MinChunkSize;
1209   assert(is_object_aligned(currSize), "currSize should be aligned");
1210   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1211     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[i];
1212     if (fl->head()) {
1213       ret = getFromListGreater(fl, numWords);
1214       assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1215       return ret;
1216     }
1217   }
1218 
1219   currSize = MAX2((size_t)SmallForDictionary,
1220                   (size_t)(numWords + MinChunkSize));
1221 
1222   /* Try to get a chunk that satisfies request, while avoiding
1223      fragmentation that can't be handled. */
1224   {
1225     ret =  dictionary()->get_chunk(currSize);
1226     if (ret != NULL) {
1227       assert(ret->size() - numWords >= MinChunkSize,
1228              "Chunk is too small");
1229       _bt.allocated((HeapWord*)ret, ret->size());
1230       /* Carve returned chunk. */
1231       (void) splitChunkAndReturnRemainder(ret, numWords);
1232       /* Label this as no longer a free chunk. */
1233       assert(ret->is_free(), "This chunk should be free");
1234       ret->link_prev(NULL);
1235     }
1236     assert(ret == NULL || ret->is_free(), "Should be returning a free chunk");
1237     return ret;
1238   }
1239   ShouldNotReachHere();
1240 }
1241 
1242 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) const {
1243   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1244   return _indexedFreeList[fc->size()].verify_chunk_in_free_list(fc);
1245 }
1246 
1247 bool CompactibleFreeListSpace::verify_chunk_is_linear_alloc_block(FreeChunk* fc) const {
1248   assert((_smallLinearAllocBlock._ptr != (HeapWord*)fc) ||
1249          (_smallLinearAllocBlock._word_size == fc->size()),
1250          "Linear allocation block shows incorrect size");
1251   return ((_smallLinearAllocBlock._ptr == (HeapWord*)fc) &&
1252           (_smallLinearAllocBlock._word_size == fc->size()));
1253 }
1254 
1255 // Check if the purported free chunk is present either as a linear
1256 // allocation block, the size-indexed table of (smaller) free blocks,
1257 // or the larger free blocks kept in the binary tree dictionary.
1258 bool CompactibleFreeListSpace::verify_chunk_in_free_list(FreeChunk* fc) const {
1259   if (verify_chunk_is_linear_alloc_block(fc)) {
1260     return true;
1261   } else if (fc->size() < IndexSetSize) {
1262     return verifyChunkInIndexedFreeLists(fc);
1263   } else {
1264     return dictionary()->verify_chunk_in_free_list(fc);
1265   }
1266 }
1267 
1268 #ifndef PRODUCT
1269 void CompactibleFreeListSpace::assert_locked() const {
1270   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1271 }
1272 
1273 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1274   CMSLockVerifier::assert_locked(lock);
1275 }
1276 #endif
1277 
1278 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1279   // In the parallel case, the main thread holds the free list lock
1280   // on behalf the parallel threads.
1281   FreeChunk* fc;
1282   {
1283     // If GC is parallel, this might be called by several threads.
1284     // This should be rare enough that the locking overhead won't affect
1285     // the sequential code.
1286     MutexLockerEx x(parDictionaryAllocLock(),
1287                     Mutex::_no_safepoint_check_flag);
1288     fc = getChunkFromDictionary(size);
1289   }
1290   if (fc != NULL) {
1291     fc->dontCoalesce();
1292     assert(fc->is_free(), "Should be free, but not coalescable");
1293     // Verify that the block offset table shows this to
1294     // be a single block, but not one which is unallocated.
1295     _bt.verify_single_block((HeapWord*)fc, fc->size());
1296     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1297   }
1298   return fc;
1299 }
1300 
1301 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1302   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1303   assert_locked();
1304 
1305   // if we are tracking promotions, then first ensure space for
1306   // promotion (including spooling space for saving header if necessary).
1307   // then allocate and copy, then track promoted info if needed.
1308   // When tracking (see PromotionInfo::track()), the mark word may
1309   // be displaced and in this case restoration of the mark word
1310   // occurs in the (oop_since_save_marks_)iterate phase.
1311   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1312     return NULL;
1313   }
1314   // Call the allocate(size_t, bool) form directly to avoid the
1315   // additional call through the allocate(size_t) form.  Having
1316   // the compile inline the call is problematic because allocate(size_t)
1317   // is a virtual method.
1318   HeapWord* res = allocate(adjustObjectSize(obj_size));
1319   if (res != NULL) {
1320     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1321     // if we should be tracking promotions, do so.
1322     if (_promoInfo.tracking()) {
1323         _promoInfo.track((PromotedObject*)res);
1324     }
1325   }
1326   return oop(res);
1327 }
1328 
1329 HeapWord*
1330 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1331   assert_locked();
1332   assert(size >= MinChunkSize, "minimum chunk size");
1333   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1334     "maximum from smallLinearAllocBlock");
1335   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1336 }
1337 
1338 HeapWord*
1339 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1340                                                        size_t size) {
1341   assert_locked();
1342   assert(size >= MinChunkSize, "too small");
1343   HeapWord* res = NULL;
1344   // Try to do linear allocation from blk, making sure that
1345   if (blk->_word_size == 0) {
1346     // We have probably been unable to fill this either in the prologue or
1347     // when it was exhausted at the last linear allocation. Bail out until
1348     // next time.
1349     assert(blk->_ptr == NULL, "consistency check");
1350     return NULL;
1351   }
1352   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1353   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1354   if (res != NULL) return res;
1355 
1356   // about to exhaust this linear allocation block
1357   if (blk->_word_size == size) { // exactly satisfied
1358     res = blk->_ptr;
1359     _bt.allocated(res, blk->_word_size);
1360   } else if (size + MinChunkSize <= blk->_refillSize) {
1361     size_t sz = blk->_word_size;
1362     // Update _unallocated_block if the size is such that chunk would be
1363     // returned to the indexed free list.  All other chunks in the indexed
1364     // free lists are allocated from the dictionary so that _unallocated_block
1365     // has already been adjusted for them.  Do it here so that the cost
1366     // for all chunks added back to the indexed free lists.
1367     if (sz < SmallForDictionary) {
1368       _bt.allocated(blk->_ptr, sz);
1369     }
1370     // Return the chunk that isn't big enough, and then refill below.
1371     addChunkToFreeLists(blk->_ptr, sz);
1372     split_birth(sz);
1373     // Don't keep statistics on adding back chunk from a LinAB.
1374   } else {
1375     // A refilled block would not satisfy the request.
1376     return NULL;
1377   }
1378 
1379   blk->_ptr = NULL; blk->_word_size = 0;
1380   refillLinearAllocBlock(blk);
1381   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1382          "block was replenished");
1383   if (res != NULL) {
1384     split_birth(size);
1385     repairLinearAllocBlock(blk);
1386   } else if (blk->_ptr != NULL) {
1387     res = blk->_ptr;
1388     size_t blk_size = blk->_word_size;
1389     blk->_word_size -= size;
1390     blk->_ptr  += size;
1391     split_birth(size);
1392     repairLinearAllocBlock(blk);
1393     // Update BOT last so that other (parallel) GC threads see a consistent
1394     // view of the BOT and free blocks.
1395     // Above must occur before BOT is updated below.
1396     OrderAccess::storestore();
1397     _bt.split_block(res, blk_size, size);  // adjust block offset table
1398   }
1399   return res;
1400 }
1401 
1402 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1403                                         LinearAllocBlock* blk,
1404                                         size_t size) {
1405   assert_locked();
1406   assert(size >= MinChunkSize, "too small");
1407 
1408   HeapWord* res = NULL;
1409   // This is the common case.  Keep it simple.
1410   if (blk->_word_size >= size + MinChunkSize) {
1411     assert(blk->_ptr != NULL, "consistency check");
1412     res = blk->_ptr;
1413     // Note that the BOT is up-to-date for the linAB before allocation.  It
1414     // indicates the start of the linAB.  The split_block() updates the
1415     // BOT for the linAB after the allocation (indicates the start of the
1416     // next chunk to be allocated).
1417     size_t blk_size = blk->_word_size;
1418     blk->_word_size -= size;
1419     blk->_ptr  += size;
1420     split_birth(size);
1421     repairLinearAllocBlock(blk);
1422     // Update BOT last so that other (parallel) GC threads see a consistent
1423     // view of the BOT and free blocks.
1424     // Above must occur before BOT is updated below.
1425     OrderAccess::storestore();
1426     _bt.split_block(res, blk_size, size);  // adjust block offset table
1427     _bt.allocated(res, size);
1428   }
1429   return res;
1430 }
1431 
1432 FreeChunk*
1433 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1434   assert_locked();
1435   assert(size < SmallForDictionary, "just checking");
1436   FreeChunk* res;
1437   res = _indexedFreeList[size].get_chunk_at_head();
1438   if (res == NULL) {
1439     res = getChunkFromIndexedFreeListHelper(size);
1440   }
1441   _bt.verify_not_unallocated((HeapWord*) res, size);
1442   assert(res == NULL || res->size() == size, "Incorrect block size");
1443   return res;
1444 }
1445 
1446 FreeChunk*
1447 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1448   bool replenish) {
1449   assert_locked();
1450   FreeChunk* fc = NULL;
1451   if (size < SmallForDictionary) {
1452     assert(_indexedFreeList[size].head() == NULL ||
1453       _indexedFreeList[size].surplus() <= 0,
1454       "List for this size should be empty or under populated");
1455     // Try best fit in exact lists before replenishing the list
1456     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1457       // Replenish list.
1458       //
1459       // Things tried that failed.
1460       //   Tried allocating out of the two LinAB's first before
1461       // replenishing lists.
1462       //   Tried small linAB of size 256 (size in indexed list)
1463       // and replenishing indexed lists from the small linAB.
1464       //
1465       FreeChunk* newFc = NULL;
1466       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1467       if (replenish_size < SmallForDictionary) {
1468         // Do not replenish from an underpopulated size.
1469         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1470             _indexedFreeList[replenish_size].head() != NULL) {
1471           newFc = _indexedFreeList[replenish_size].get_chunk_at_head();
1472         } else if (bestFitFirst()) {
1473           newFc = bestFitSmall(replenish_size);
1474         }
1475       }
1476       if (newFc == NULL && replenish_size > size) {
1477         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1478         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1479       }
1480       // Note: The stats update re split-death of block obtained above
1481       // will be recorded below precisely when we know we are going to
1482       // be actually splitting it into more than one pieces below.
1483       if (newFc != NULL) {
1484         if  (replenish || CMSReplenishIntermediate) {
1485           // Replenish this list and return one block to caller.
1486           size_t i;
1487           FreeChunk *curFc, *nextFc;
1488           size_t num_blk = newFc->size() / size;
1489           assert(num_blk >= 1, "Smaller than requested?");
1490           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1491           if (num_blk > 1) {
1492             // we are sure we will be splitting the block just obtained
1493             // into multiple pieces; record the split-death of the original
1494             splitDeath(replenish_size);
1495           }
1496           // carve up and link blocks 0, ..., num_blk - 2
1497           // The last chunk is not added to the lists but is returned as the
1498           // free chunk.
1499           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1500                i = 0;
1501                i < (num_blk - 1);
1502                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1503                i++) {
1504             curFc->set_size(size);
1505             // Don't record this as a return in order to try and
1506             // determine the "returns" from a GC.
1507             _bt.verify_not_unallocated((HeapWord*) fc, size);
1508             _indexedFreeList[size].return_chunk_at_tail(curFc, false);
1509             _bt.mark_block((HeapWord*)curFc, size);
1510             split_birth(size);
1511             // Don't record the initial population of the indexed list
1512             // as a split birth.
1513           }
1514 
1515           // check that the arithmetic was OK above
1516           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1517             "inconsistency in carving newFc");
1518           curFc->set_size(size);
1519           _bt.mark_block((HeapWord*)curFc, size);
1520           split_birth(size);
1521           fc = curFc;
1522         } else {
1523           // Return entire block to caller
1524           fc = newFc;
1525         }
1526       }
1527     }
1528   } else {
1529     // Get a free chunk from the free chunk dictionary to be returned to
1530     // replenish the indexed free list.
1531     fc = getChunkFromDictionaryExact(size);
1532   }
1533   // assert(fc == NULL || fc->is_free(), "Should be returning a free chunk");
1534   return fc;
1535 }
1536 
1537 FreeChunk*
1538 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1539   assert_locked();
1540   FreeChunk* fc = _dictionary->get_chunk(size);
1541   if (fc == NULL) {
1542     return NULL;
1543   }
1544   _bt.allocated((HeapWord*)fc, fc->size());
1545   if (fc->size() >= size + MinChunkSize) {
1546     fc = splitChunkAndReturnRemainder(fc, size);
1547   }
1548   assert(fc->size() >= size, "chunk too small");
1549   assert(fc->size() < size + MinChunkSize, "chunk too big");
1550   _bt.verify_single_block((HeapWord*)fc, fc->size());
1551   return fc;
1552 }
1553 
1554 FreeChunk*
1555 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1556   assert_locked();
1557   FreeChunk* fc = _dictionary->get_chunk(size);
1558   if (fc == NULL) {
1559     return fc;
1560   }
1561   _bt.allocated((HeapWord*)fc, fc->size());
1562   if (fc->size() == size) {
1563     _bt.verify_single_block((HeapWord*)fc, size);
1564     return fc;
1565   }
1566   assert(fc->size() > size, "get_chunk() guarantee");
1567   if (fc->size() < size + MinChunkSize) {
1568     // Return the chunk to the dictionary and go get a bigger one.
1569     returnChunkToDictionary(fc);
1570     fc = _dictionary->get_chunk(size + MinChunkSize);
1571     if (fc == NULL) {
1572       return NULL;
1573     }
1574     _bt.allocated((HeapWord*)fc, fc->size());
1575   }
1576   assert(fc->size() >= size + MinChunkSize, "tautology");
1577   fc = splitChunkAndReturnRemainder(fc, size);
1578   assert(fc->size() == size, "chunk is wrong size");
1579   _bt.verify_single_block((HeapWord*)fc, size);
1580   return fc;
1581 }
1582 
1583 void
1584 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1585   assert_locked();
1586 
1587   size_t size = chunk->size();
1588   _bt.verify_single_block((HeapWord*)chunk, size);
1589   // adjust _unallocated_block downward, as necessary
1590   _bt.freed((HeapWord*)chunk, size);
1591   _dictionary->return_chunk(chunk);
1592 #ifndef PRODUCT
1593   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1594     TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >* tc = TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::as_TreeChunk(chunk);
1595     TreeList<FreeChunk, AdaptiveFreeList<FreeChunk> >* tl = tc->list();
1596     tl->verify_stats();
1597   }
1598 #endif // PRODUCT
1599 }
1600 
1601 void
1602 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1603   assert_locked();
1604   size_t size = fc->size();
1605   _bt.verify_single_block((HeapWord*) fc, size);
1606   _bt.verify_not_unallocated((HeapWord*) fc, size);
1607   _indexedFreeList[size].return_chunk_at_tail(fc);
1608 #ifndef PRODUCT
1609   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1610      _indexedFreeList[size].verify_stats();
1611   }
1612 #endif // PRODUCT
1613 }
1614 
1615 // Add chunk to end of last block -- if it's the largest
1616 // block -- and update BOT and census data. We would
1617 // of course have preferred to coalesce it with the
1618 // last block, but it's currently less expensive to find the
1619 // largest block than it is to find the last.
1620 void
1621 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1622   HeapWord* chunk, size_t     size) {
1623   // check that the chunk does lie in this space!
1624   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1625   // One of the parallel gc task threads may be here
1626   // whilst others are allocating.
1627   Mutex* lock = &_parDictionaryAllocLock;
1628   FreeChunk* ec;
1629   {
1630     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1631     ec = dictionary()->find_largest_dict();  // get largest block
1632     if (ec != NULL && ec->end() == (uintptr_t*) chunk) {
1633       // It's a coterminal block - we can coalesce.
1634       size_t old_size = ec->size();
1635       coalDeath(old_size);
1636       removeChunkFromDictionary(ec);
1637       size += old_size;
1638     } else {
1639       ec = (FreeChunk*)chunk;
1640     }
1641   }
1642   ec->set_size(size);
1643   debug_only(ec->mangleFreed(size));
1644   if (size < SmallForDictionary) {
1645     lock = _indexedFreeListParLocks[size];
1646   }
1647   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1648   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1649   // record the birth under the lock since the recording involves
1650   // manipulation of the list on which the chunk lives and
1651   // if the chunk is allocated and is the last on the list,
1652   // the list can go away.
1653   coalBirth(size);
1654 }
1655 
1656 void
1657 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1658                                               size_t     size) {
1659   // check that the chunk does lie in this space!
1660   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1661   assert_locked();
1662   _bt.verify_single_block(chunk, size);
1663 
1664   FreeChunk* fc = (FreeChunk*) chunk;
1665   fc->set_size(size);
1666   debug_only(fc->mangleFreed(size));
1667   if (size < SmallForDictionary) {
1668     returnChunkToFreeList(fc);
1669   } else {
1670     returnChunkToDictionary(fc);
1671   }
1672 }
1673 
1674 void
1675 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1676   size_t size, bool coalesced) {
1677   assert_locked();
1678   assert(chunk != NULL, "null chunk");
1679   if (coalesced) {
1680     // repair BOT
1681     _bt.single_block(chunk, size);
1682   }
1683   addChunkToFreeLists(chunk, size);
1684 }
1685 
1686 // We _must_ find the purported chunk on our free lists;
1687 // we assert if we don't.
1688 void
1689 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1690   size_t size = fc->size();
1691   assert_locked();
1692   debug_only(verifyFreeLists());
1693   if (size < SmallForDictionary) {
1694     removeChunkFromIndexedFreeList(fc);
1695   } else {
1696     removeChunkFromDictionary(fc);
1697   }
1698   _bt.verify_single_block((HeapWord*)fc, size);
1699   debug_only(verifyFreeLists());
1700 }
1701 
1702 void
1703 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1704   size_t size = fc->size();
1705   assert_locked();
1706   assert(fc != NULL, "null chunk");
1707   _bt.verify_single_block((HeapWord*)fc, size);
1708   _dictionary->remove_chunk(fc);
1709   // adjust _unallocated_block upward, as necessary
1710   _bt.allocated((HeapWord*)fc, size);
1711 }
1712 
1713 void
1714 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1715   assert_locked();
1716   size_t size = fc->size();
1717   _bt.verify_single_block((HeapWord*)fc, size);
1718   NOT_PRODUCT(
1719     if (FLSVerifyIndexTable) {
1720       verifyIndexedFreeList(size);
1721     }
1722   )
1723   _indexedFreeList[size].remove_chunk(fc);
1724   NOT_PRODUCT(
1725     if (FLSVerifyIndexTable) {
1726       verifyIndexedFreeList(size);
1727     }
1728   )
1729 }
1730 
1731 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1732   /* A hint is the next larger size that has a surplus.
1733      Start search at a size large enough to guarantee that
1734      the excess is >= MIN_CHUNK. */
1735   size_t start = align_object_size(numWords + MinChunkSize);
1736   if (start < IndexSetSize) {
1737     AdaptiveFreeList<FreeChunk>* it   = _indexedFreeList;
1738     size_t    hint = _indexedFreeList[start].hint();
1739     while (hint < IndexSetSize) {
1740       assert(is_object_aligned(hint), "hint should be aligned");
1741       AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[hint];
1742       if (fl->surplus() > 0 && fl->head() != NULL) {
1743         // Found a list with surplus, reset original hint
1744         // and split out a free chunk which is returned.
1745         _indexedFreeList[start].set_hint(hint);
1746         FreeChunk* res = getFromListGreater(fl, numWords);
1747         assert(res == NULL || res->is_free(),
1748           "Should be returning a free chunk");
1749         return res;
1750       }
1751       hint = fl->hint(); /* keep looking */
1752     }
1753     /* None found. */
1754     it[start].set_hint(IndexSetSize);
1755   }
1756   return NULL;
1757 }
1758 
1759 /* Requires fl->size >= numWords + MinChunkSize */
1760 FreeChunk* CompactibleFreeListSpace::getFromListGreater(AdaptiveFreeList<FreeChunk>* fl,
1761   size_t numWords) {
1762   FreeChunk *curr = fl->head();
1763   size_t oldNumWords = curr->size();
1764   assert(numWords >= MinChunkSize, "Word size is too small");
1765   assert(curr != NULL, "List is empty");
1766   assert(oldNumWords >= numWords + MinChunkSize,
1767         "Size of chunks in the list is too small");
1768 
1769   fl->remove_chunk(curr);
1770   // recorded indirectly by splitChunkAndReturnRemainder -
1771   // smallSplit(oldNumWords, numWords);
1772   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1773   // Does anything have to be done for the remainder in terms of
1774   // fixing the card table?
1775   assert(new_chunk == NULL || new_chunk->is_free(),
1776     "Should be returning a free chunk");
1777   return new_chunk;
1778 }
1779 
1780 FreeChunk*
1781 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1782   size_t new_size) {
1783   assert_locked();
1784   size_t size = chunk->size();
1785   assert(size > new_size, "Split from a smaller block?");
1786   assert(is_aligned(chunk), "alignment problem");
1787   assert(size == adjustObjectSize(size), "alignment problem");
1788   size_t rem_sz = size - new_size;
1789   assert(rem_sz == adjustObjectSize(rem_sz), "alignment problem");
1790   assert(rem_sz >= MinChunkSize, "Free chunk smaller than minimum");
1791   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1792   assert(is_aligned(ffc), "alignment problem");
1793   ffc->set_size(rem_sz);
1794   ffc->link_next(NULL);
1795   ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
1796   // Above must occur before BOT is updated below.
1797   // adjust block offset table
1798   OrderAccess::storestore();
1799   assert(chunk->is_free() && ffc->is_free(), "Error");
1800   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1801   if (rem_sz < SmallForDictionary) {
1802     // The freeList lock is held, but multiple GC task threads might be executing in parallel.
1803     bool is_par = Thread::current()->is_GC_task_thread();
1804     if (is_par) _indexedFreeListParLocks[rem_sz]->lock();
1805     returnChunkToFreeList(ffc);
1806     split(size, rem_sz);
1807     if (is_par) _indexedFreeListParLocks[rem_sz]->unlock();
1808   } else {
1809     returnChunkToDictionary(ffc);
1810     split(size, rem_sz);
1811   }
1812   chunk->set_size(new_size);
1813   return chunk;
1814 }
1815 
1816 void
1817 CompactibleFreeListSpace::sweep_completed() {
1818   // Now that space is probably plentiful, refill linear
1819   // allocation blocks as needed.
1820   refillLinearAllocBlocksIfNeeded();
1821 }
1822 
1823 void
1824 CompactibleFreeListSpace::gc_prologue() {
1825   assert_locked();
1826   reportFreeListStatistics("Before GC:");
1827   refillLinearAllocBlocksIfNeeded();
1828 }
1829 
1830 void
1831 CompactibleFreeListSpace::gc_epilogue() {
1832   assert_locked();
1833   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1834   _promoInfo.stopTrackingPromotions();
1835   repairLinearAllocationBlocks();
1836   reportFreeListStatistics("After GC:");
1837 }
1838 
1839 // Iteration support, mostly delegated from a CMS generation
1840 
1841 void CompactibleFreeListSpace::save_marks() {
1842   assert(Thread::current()->is_VM_thread(),
1843          "Global variable should only be set when single-threaded");
1844   // Mark the "end" of the used space at the time of this call;
1845   // note, however, that promoted objects from this point
1846   // on are tracked in the _promoInfo below.
1847   set_saved_mark_word(unallocated_block());
1848 #ifdef ASSERT
1849   // Check the sanity of save_marks() etc.
1850   MemRegion ur    = used_region();
1851   MemRegion urasm = used_region_at_save_marks();
1852   assert(ur.contains(urasm),
1853          " Error at save_marks(): [" PTR_FORMAT "," PTR_FORMAT ")"
1854          " should contain [" PTR_FORMAT "," PTR_FORMAT ")",
1855          p2i(ur.start()), p2i(ur.end()), p2i(urasm.start()), p2i(urasm.end()));
1856 #endif
1857   // inform allocator that promotions should be tracked.
1858   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1859   _promoInfo.startTrackingPromotions();
1860 }
1861 
1862 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1863   assert(_promoInfo.tracking(), "No preceding save_marks?");
1864   return _promoInfo.noPromotions();
1865 }
1866 
1867 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1868                                                                             \
1869 void CompactibleFreeListSpace::                                             \
1870 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1871   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1872   /*                                                                        \
1873    * This also restores any displaced headers and removes the elements from \
1874    * the iteration set as they are processed, so that we have a clean slate \
1875    * at the end of the iteration. Note, thus, that if new objects are       \
1876    * promoted as a result of the iteration they are iterated over as well.  \
1877    */                                                                       \
1878   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1879 }
1880 
1881 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1882 
1883 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1884   return _smallLinearAllocBlock._word_size == 0;
1885 }
1886 
1887 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1888   // Fix up linear allocation blocks to look like free blocks
1889   repairLinearAllocBlock(&_smallLinearAllocBlock);
1890 }
1891 
1892 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1893   assert_locked();
1894   if (blk->_ptr != NULL) {
1895     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1896            "Minimum block size requirement");
1897     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1898     fc->set_size(blk->_word_size);
1899     fc->link_prev(NULL);   // mark as free
1900     fc->dontCoalesce();
1901     assert(fc->is_free(), "just marked it free");
1902     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1903   }
1904 }
1905 
1906 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1907   assert_locked();
1908   if (_smallLinearAllocBlock._ptr == NULL) {
1909     assert(_smallLinearAllocBlock._word_size == 0,
1910       "Size of linAB should be zero if the ptr is NULL");
1911     // Reset the linAB refill and allocation size limit.
1912     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1913   }
1914   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1915 }
1916 
1917 void
1918 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1919   assert_locked();
1920   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1921          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1922          "blk invariant");
1923   if (blk->_ptr == NULL) {
1924     refillLinearAllocBlock(blk);
1925   }
1926 }
1927 
1928 void
1929 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1930   assert_locked();
1931   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1932          "linear allocation block should be empty");
1933   FreeChunk* fc;
1934   if (blk->_refillSize < SmallForDictionary &&
1935       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1936     // A linAB's strategy might be to use small sizes to reduce
1937     // fragmentation but still get the benefits of allocation from a
1938     // linAB.
1939   } else {
1940     fc = getChunkFromDictionary(blk->_refillSize);
1941   }
1942   if (fc != NULL) {
1943     blk->_ptr  = (HeapWord*)fc;
1944     blk->_word_size = fc->size();
1945     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1946   }
1947 }
1948 
1949 // Support for compaction
1950 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1951   scan_and_forward(this, cp);
1952   // Prepare_for_compaction() uses the space between live objects
1953   // so that later phase can skip dead space quickly.  So verification
1954   // of the free lists doesn't work after.
1955 }
1956 
1957 void CompactibleFreeListSpace::adjust_pointers() {
1958   // In other versions of adjust_pointers(), a bail out
1959   // based on the amount of live data in the generation
1960   // (i.e., if 0, bail out) may be used.
1961   // Cannot test used() == 0 here because the free lists have already
1962   // been mangled by the compaction.
1963 
1964   scan_and_adjust_pointers(this);
1965   // See note about verification in prepare_for_compaction().
1966 }
1967 
1968 void CompactibleFreeListSpace::compact() {
1969   scan_and_compact(this);
1970 }
1971 
1972 // Fragmentation metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1973 // where fbs is free block sizes
1974 double CompactibleFreeListSpace::flsFrag() const {
1975   size_t itabFree = totalSizeInIndexedFreeLists();
1976   double frag = 0.0;
1977   size_t i;
1978 
1979   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1980     double sz  = i;
1981     frag      += _indexedFreeList[i].count() * (sz * sz);
1982   }
1983 
1984   double totFree = itabFree +
1985                    _dictionary->total_chunk_size(DEBUG_ONLY(freelistLock()));
1986   if (totFree > 0) {
1987     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
1988             (totFree * totFree));
1989     frag = (double)1.0  - frag;
1990   } else {
1991     assert(frag == 0.0, "Follows from totFree == 0");
1992   }
1993   return frag;
1994 }
1995 
1996 void CompactibleFreeListSpace::beginSweepFLCensus(
1997   float inter_sweep_current,
1998   float inter_sweep_estimate,
1999   float intra_sweep_estimate) {
2000   assert_locked();
2001   size_t i;
2002   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2003     AdaptiveFreeList<FreeChunk>* fl    = &_indexedFreeList[i];
2004     log_trace(gc, freelist)("size[" SIZE_FORMAT "] : ", i);
2005     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2006     fl->set_coal_desired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2007     fl->set_before_sweep(fl->count());
2008     fl->set_bfr_surp(fl->surplus());
2009   }
2010   _dictionary->begin_sweep_dict_census(CMSLargeCoalSurplusPercent,
2011                                     inter_sweep_current,
2012                                     inter_sweep_estimate,
2013                                     intra_sweep_estimate);
2014 }
2015 
2016 void CompactibleFreeListSpace::setFLSurplus() {
2017   assert_locked();
2018   size_t i;
2019   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2020     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2021     fl->set_surplus(fl->count() -
2022                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2023   }
2024 }
2025 
2026 void CompactibleFreeListSpace::setFLHints() {
2027   assert_locked();
2028   size_t i;
2029   size_t h = IndexSetSize;
2030   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2031     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2032     fl->set_hint(h);
2033     if (fl->surplus() > 0) {
2034       h = i;
2035     }
2036   }
2037 }
2038 
2039 void CompactibleFreeListSpace::clearFLCensus() {
2040   assert_locked();
2041   size_t i;
2042   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2043     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2044     fl->set_prev_sweep(fl->count());
2045     fl->set_coal_births(0);
2046     fl->set_coal_deaths(0);
2047     fl->set_split_births(0);
2048     fl->set_split_deaths(0);
2049   }
2050 }
2051 
2052 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2053   log_debug(gc, freelist)("CMS: Large block " PTR_FORMAT, p2i(dictionary()->find_largest_dict()));
2054   setFLSurplus();
2055   setFLHints();
2056   printFLCensus(sweep_count);
2057   clearFLCensus();
2058   assert_locked();
2059   _dictionary->end_sweep_dict_census(CMSLargeSplitSurplusPercent);
2060 }
2061 
2062 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2063   if (size < SmallForDictionary) {
2064     AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2065     return (fl->coal_desired() < 0) ||
2066            ((int)fl->count() > fl->coal_desired());
2067   } else {
2068     return dictionary()->coal_dict_over_populated(size);
2069   }
2070 }
2071 
2072 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2073   assert(size < SmallForDictionary, "Size too large for indexed list");
2074   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2075   fl->increment_coal_births();
2076   fl->increment_surplus();
2077 }
2078 
2079 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2080   assert(size < SmallForDictionary, "Size too large for indexed list");
2081   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2082   fl->increment_coal_deaths();
2083   fl->decrement_surplus();
2084 }
2085 
2086 void CompactibleFreeListSpace::coalBirth(size_t size) {
2087   if (size  < SmallForDictionary) {
2088     smallCoalBirth(size);
2089   } else {
2090     dictionary()->dict_census_update(size,
2091                                    false /* split */,
2092                                    true /* birth */);
2093   }
2094 }
2095 
2096 void CompactibleFreeListSpace::coalDeath(size_t size) {
2097   if(size  < SmallForDictionary) {
2098     smallCoalDeath(size);
2099   } else {
2100     dictionary()->dict_census_update(size,
2101                                    false /* split */,
2102                                    false /* birth */);
2103   }
2104 }
2105 
2106 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2107   assert(size < SmallForDictionary, "Size too large for indexed list");
2108   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2109   fl->increment_split_births();
2110   fl->increment_surplus();
2111 }
2112 
2113 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2114   assert(size < SmallForDictionary, "Size too large for indexed list");
2115   AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[size];
2116   fl->increment_split_deaths();
2117   fl->decrement_surplus();
2118 }
2119 
2120 void CompactibleFreeListSpace::split_birth(size_t size) {
2121   if (size  < SmallForDictionary) {
2122     smallSplitBirth(size);
2123   } else {
2124     dictionary()->dict_census_update(size,
2125                                    true /* split */,
2126                                    true /* birth */);
2127   }
2128 }
2129 
2130 void CompactibleFreeListSpace::splitDeath(size_t size) {
2131   if (size  < SmallForDictionary) {
2132     smallSplitDeath(size);
2133   } else {
2134     dictionary()->dict_census_update(size,
2135                                    true /* split */,
2136                                    false /* birth */);
2137   }
2138 }
2139 
2140 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2141   size_t to2 = from - to1;
2142   splitDeath(from);
2143   split_birth(to1);
2144   split_birth(to2);
2145 }
2146 
2147 void CompactibleFreeListSpace::print() const {
2148   print_on(tty);
2149 }
2150 
2151 void CompactibleFreeListSpace::prepare_for_verify() {
2152   assert_locked();
2153   repairLinearAllocationBlocks();
2154   // Verify that the SpoolBlocks look like free blocks of
2155   // appropriate sizes... To be done ...
2156 }
2157 
2158 class VerifyAllBlksClosure: public BlkClosure {
2159  private:
2160   const CompactibleFreeListSpace* _sp;
2161   const MemRegion                 _span;
2162   HeapWord*                       _last_addr;
2163   size_t                          _last_size;
2164   bool                            _last_was_obj;
2165   bool                            _last_was_live;
2166 
2167  public:
2168   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2169     MemRegion span) :  _sp(sp), _span(span),
2170                        _last_addr(NULL), _last_size(0),
2171                        _last_was_obj(false), _last_was_live(false) { }
2172 
2173   virtual size_t do_blk(HeapWord* addr) {
2174     size_t res;
2175     bool   was_obj  = false;
2176     bool   was_live = false;
2177     if (_sp->block_is_obj(addr)) {
2178       was_obj = true;
2179       oop p = oop(addr);
2180       guarantee(oopDesc::is_oop(p), "Should be an oop");
2181       res = _sp->adjustObjectSize(p->size());
2182       if (_sp->obj_is_alive(addr)) {
2183         was_live = true;
2184         p->verify();
2185       }
2186     } else {
2187       FreeChunk* fc = (FreeChunk*)addr;
2188       res = fc->size();
2189       if (FLSVerifyLists && !fc->cantCoalesce()) {
2190         guarantee(_sp->verify_chunk_in_free_list(fc),
2191                   "Chunk should be on a free list");
2192       }
2193     }
2194     if (res == 0) {
2195       Log(gc, verify) log;
2196       log.error("Livelock: no rank reduction!");
2197       log.error(" Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2198                 " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2199         p2i(addr),       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2200         p2i(_last_addr), _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2201       LogStream ls(log.error());
2202       _sp->print_on(&ls);
2203       guarantee(false, "Verification failed.");
2204     }
2205     _last_addr = addr;
2206     _last_size = res;
2207     _last_was_obj  = was_obj;
2208     _last_was_live = was_live;
2209     return res;
2210   }
2211 };
2212 
2213 class VerifyAllOopsClosure: public OopClosure {
2214  private:
2215   const CMSCollector*             _collector;
2216   const CompactibleFreeListSpace* _sp;
2217   const MemRegion                 _span;
2218   const bool                      _past_remark;
2219   const CMSBitMap*                _bit_map;
2220 
2221  protected:
2222   void do_oop(void* p, oop obj) {
2223     if (_span.contains(obj)) { // the interior oop points into CMS heap
2224       if (!_span.contains(p)) { // reference from outside CMS heap
2225         // Should be a valid object; the first disjunct below allows
2226         // us to sidestep an assertion in block_is_obj() that insists
2227         // that p be in _sp. Note that several generations (and spaces)
2228         // are spanned by _span (CMS heap) above.
2229         guarantee(!_sp->is_in_reserved(obj) ||
2230                   _sp->block_is_obj((HeapWord*)obj),
2231                   "Should be an object");
2232         guarantee(oopDesc::is_oop(obj), "Should be an oop");
2233         obj->verify();
2234         if (_past_remark) {
2235           // Remark has been completed, the object should be marked
2236           _bit_map->isMarked((HeapWord*)obj);
2237         }
2238       } else { // reference within CMS heap
2239         if (_past_remark) {
2240           // Remark has been completed -- so the referent should have
2241           // been marked, if referring object is.
2242           if (_bit_map->isMarked(_collector->block_start(p))) {
2243             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2244           }
2245         }
2246       }
2247     } else if (_sp->is_in_reserved(p)) {
2248       // the reference is from FLS, and points out of FLS
2249       guarantee(oopDesc::is_oop(obj), "Should be an oop");
2250       obj->verify();
2251     }
2252   }
2253 
2254   template <class T> void do_oop_work(T* p) {
2255     T heap_oop = RawAccess<>::oop_load(p);
2256     if (!CompressedOops::is_null(heap_oop)) {
2257       oop obj = CompressedOops::decode_not_null(heap_oop);
2258       do_oop(p, obj);
2259     }
2260   }
2261 
2262  public:
2263   VerifyAllOopsClosure(const CMSCollector* collector,
2264     const CompactibleFreeListSpace* sp, MemRegion span,
2265     bool past_remark, CMSBitMap* bit_map) :
2266     _collector(collector), _sp(sp), _span(span),
2267     _past_remark(past_remark), _bit_map(bit_map) { }
2268 
2269   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2270   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2271 };
2272 
2273 void CompactibleFreeListSpace::verify() const {
2274   assert_lock_strong(&_freelistLock);
2275   verify_objects_initialized();
2276   MemRegion span = _collector->_span;
2277   bool past_remark = (_collector->abstract_state() ==
2278                       CMSCollector::Sweeping);
2279 
2280   ResourceMark rm;
2281   HandleMark  hm;
2282 
2283   // Check integrity of CFL data structures
2284   _promoInfo.verify();
2285   _dictionary->verify();
2286   if (FLSVerifyIndexTable) {
2287     verifyIndexedFreeLists();
2288   }
2289   // Check integrity of all objects and free blocks in space
2290   {
2291     VerifyAllBlksClosure cl(this, span);
2292     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2293   }
2294   // Check that all references in the heap to FLS
2295   // are to valid objects in FLS or that references in
2296   // FLS are to valid objects elsewhere in the heap
2297   if (FLSVerifyAllHeapReferences)
2298   {
2299     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2300       _collector->markBitMap());
2301 
2302     // Iterate over all oops in the heap. Uses the _no_header version
2303     // since we are not interested in following the klass pointers.
2304     CMSHeap::heap()->oop_iterate_no_header(&cl);
2305   }
2306 
2307   if (VerifyObjectStartArray) {
2308     // Verify the block offset table
2309     _bt.verify();
2310   }
2311 }
2312 
2313 #ifndef PRODUCT
2314 void CompactibleFreeListSpace::verifyFreeLists() const {
2315   if (FLSVerifyLists) {
2316     _dictionary->verify();
2317     verifyIndexedFreeLists();
2318   } else {
2319     if (FLSVerifyDictionary) {
2320       _dictionary->verify();
2321     }
2322     if (FLSVerifyIndexTable) {
2323       verifyIndexedFreeLists();
2324     }
2325   }
2326 }
2327 #endif
2328 
2329 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2330   size_t i = 0;
2331   for (; i < IndexSetStart; i++) {
2332     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2333   }
2334   for (; i < IndexSetSize; i++) {
2335     verifyIndexedFreeList(i);
2336   }
2337 }
2338 
2339 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2340   FreeChunk* fc   =  _indexedFreeList[size].head();
2341   FreeChunk* tail =  _indexedFreeList[size].tail();
2342   size_t    num = _indexedFreeList[size].count();
2343   size_t      n = 0;
2344   guarantee(((size >= IndexSetStart) && (size % IndexSetStride == 0)) || fc == NULL,
2345             "Slot should have been empty");
2346   for (; fc != NULL; fc = fc->next(), n++) {
2347     guarantee(fc->size() == size, "Size inconsistency");
2348     guarantee(fc->is_free(), "!free?");
2349     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2350     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2351   }
2352   guarantee(n == num, "Incorrect count");
2353 }
2354 
2355 #ifndef PRODUCT
2356 void CompactibleFreeListSpace::check_free_list_consistency() const {
2357   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size() <= IndexSetSize),
2358     "Some sizes can't be allocated without recourse to"
2359     " linear allocation buffers");
2360   assert((TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >::min_size()*HeapWordSize == sizeof(TreeChunk<FreeChunk, AdaptiveFreeList<FreeChunk> >)),
2361     "else MIN_TREE_CHUNK_SIZE is wrong");
2362   assert(IndexSetStart != 0, "IndexSetStart not initialized");
2363   assert(IndexSetStride != 0, "IndexSetStride not initialized");
2364 }
2365 #endif
2366 
2367 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2368   assert_lock_strong(&_freelistLock);
2369   LogTarget(Debug, gc, freelist, census) log;
2370   if (!log.is_enabled()) {
2371     return;
2372   }
2373   AdaptiveFreeList<FreeChunk> total;
2374   log.print("end sweep# " SIZE_FORMAT, sweep_count);
2375   ResourceMark rm;
2376   LogStream ls(log);
2377   outputStream* out = &ls;
2378   AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2379   size_t total_free = 0;
2380   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2381     const AdaptiveFreeList<FreeChunk> *fl = &_indexedFreeList[i];
2382     total_free += fl->count() * fl->size();
2383     if (i % (40*IndexSetStride) == 0) {
2384       AdaptiveFreeList<FreeChunk>::print_labels_on(out, "size");
2385     }
2386     fl->print_on(out);
2387     total.set_bfr_surp(    total.bfr_surp()     + fl->bfr_surp()    );
2388     total.set_surplus(    total.surplus()     + fl->surplus()    );
2389     total.set_desired(    total.desired()     + fl->desired()    );
2390     total.set_prev_sweep(  total.prev_sweep()   + fl->prev_sweep()  );
2391     total.set_before_sweep(total.before_sweep() + fl->before_sweep());
2392     total.set_count(      total.count()       + fl->count()      );
2393     total.set_coal_births( total.coal_births()  + fl->coal_births() );
2394     total.set_coal_deaths( total.coal_deaths()  + fl->coal_deaths() );
2395     total.set_split_births(total.split_births() + fl->split_births());
2396     total.set_split_deaths(total.split_deaths() + fl->split_deaths());
2397   }
2398   total.print_on(out, "TOTAL");
2399   log.print("Total free in indexed lists " SIZE_FORMAT " words", total_free);
2400   log.print("growth: %8.5f  deficit: %8.5f",
2401             (double)(total.split_births()+total.coal_births()-total.split_deaths()-total.coal_deaths())/
2402                     (total.prev_sweep() != 0 ? (double)total.prev_sweep() : 1.0),
2403             (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2404   _dictionary->print_dict_census(out);
2405 }
2406 
2407 ///////////////////////////////////////////////////////////////////////////
2408 // CompactibleFreeListSpaceLAB
2409 ///////////////////////////////////////////////////////////////////////////
2410 
2411 #define VECTOR_257(x)                                                                                  \
2412   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2413   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2414      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2415      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2416      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2417      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2418      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2419      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2420      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2421      x }
2422 
2423 // Initialize with default setting for CMS, _not_
2424 // generic OldPLABSize, whose static default is different; if overridden at the
2425 // command-line, this will get reinitialized via a call to
2426 // modify_initialization() below.
2427 AdaptiveWeightedAverage CompactibleFreeListSpaceLAB::_blocks_to_claim[]    =
2428   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CompactibleFreeListSpaceLAB::_default_dynamic_old_plab_size));
2429 size_t CompactibleFreeListSpaceLAB::_global_num_blocks[]  = VECTOR_257(0);
2430 uint   CompactibleFreeListSpaceLAB::_global_num_workers[] = VECTOR_257(0);
2431 
2432 CompactibleFreeListSpaceLAB::CompactibleFreeListSpaceLAB(CompactibleFreeListSpace* cfls) :
2433   _cfls(cfls)
2434 {
2435   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2436   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2437        i < CompactibleFreeListSpace::IndexSetSize;
2438        i += CompactibleFreeListSpace::IndexSetStride) {
2439     _indexedFreeList[i].set_size(i);
2440     _num_blocks[i] = 0;
2441   }
2442 }
2443 
2444 static bool _CFLS_LAB_modified = false;
2445 
2446 void CompactibleFreeListSpaceLAB::modify_initialization(size_t n, unsigned wt) {
2447   assert(!_CFLS_LAB_modified, "Call only once");
2448   _CFLS_LAB_modified = true;
2449   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2450        i < CompactibleFreeListSpace::IndexSetSize;
2451        i += CompactibleFreeListSpace::IndexSetStride) {
2452     _blocks_to_claim[i].modify(n, wt, true /* force */);
2453   }
2454 }
2455 
2456 HeapWord* CompactibleFreeListSpaceLAB::alloc(size_t word_sz) {
2457   FreeChunk* res;
2458   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2459   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2460     // This locking manages sync with other large object allocations.
2461     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2462                     Mutex::_no_safepoint_check_flag);
2463     res = _cfls->getChunkFromDictionaryExact(word_sz);
2464     if (res == NULL) return NULL;
2465   } else {
2466     AdaptiveFreeList<FreeChunk>* fl = &_indexedFreeList[word_sz];
2467     if (fl->count() == 0) {
2468       // Attempt to refill this local free list.
2469       get_from_global_pool(word_sz, fl);
2470       // If it didn't work, give up.
2471       if (fl->count() == 0) return NULL;
2472     }
2473     res = fl->get_chunk_at_head();
2474     assert(res != NULL, "Why was count non-zero?");
2475   }
2476   res->markNotFree();
2477   assert(!res->is_free(), "shouldn't be marked free");
2478   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2479   // mangle a just allocated object with a distinct pattern.
2480   debug_only(res->mangleAllocated(word_sz));
2481   return (HeapWord*)res;
2482 }
2483 
2484 // Get a chunk of blocks of the right size and update related
2485 // book-keeping stats
2486 void CompactibleFreeListSpaceLAB::get_from_global_pool(size_t word_sz, AdaptiveFreeList<FreeChunk>* fl) {
2487   // Get the #blocks we want to claim
2488   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2489   assert(n_blks > 0, "Error");
2490   assert(ResizeOldPLAB || n_blks == OldPLABSize, "Error");
2491   // In some cases, when the application has a phase change,
2492   // there may be a sudden and sharp shift in the object survival
2493   // profile, and updating the counts at the end of a scavenge
2494   // may not be quick enough, giving rise to large scavenge pauses
2495   // during these phase changes. It is beneficial to detect such
2496   // changes on-the-fly during a scavenge and avoid such a phase-change
2497   // pothole. The following code is a heuristic attempt to do that.
2498   // It is protected by a product flag until we have gained
2499   // enough experience with this heuristic and fine-tuned its behavior.
2500   // WARNING: This might increase fragmentation if we overreact to
2501   // small spikes, so some kind of historical smoothing based on
2502   // previous experience with the greater reactivity might be useful.
2503   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2504   // default.
2505   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2506     //
2507     // On a 32-bit VM, the denominator can become zero because of integer overflow,
2508     // which is why there is a cast to double.
2509     //
2510     size_t multiple = (size_t) (_num_blocks[word_sz]/(((double)CMSOldPLABToleranceFactor)*CMSOldPLABNumRefills*n_blks));
2511     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2512     n_blks = MIN2(n_blks, CMSOldPLABMax);
2513   }
2514   assert(n_blks > 0, "Error");
2515   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2516   // Update stats table entry for this block size
2517   _num_blocks[word_sz] += fl->count();
2518 }
2519 
2520 void CompactibleFreeListSpaceLAB::compute_desired_plab_size() {
2521   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2522        i < CompactibleFreeListSpace::IndexSetSize;
2523        i += CompactibleFreeListSpace::IndexSetStride) {
2524     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2525            "Counter inconsistency");
2526     if (_global_num_workers[i] > 0) {
2527       // Need to smooth wrt historical average
2528       if (ResizeOldPLAB) {
2529         _blocks_to_claim[i].sample(
2530           MAX2(CMSOldPLABMin,
2531           MIN2(CMSOldPLABMax,
2532                _global_num_blocks[i]/_global_num_workers[i]/CMSOldPLABNumRefills)));
2533       }
2534       // Reset counters for next round
2535       _global_num_workers[i] = 0;
2536       _global_num_blocks[i] = 0;
2537       log_trace(gc, plab)("[" SIZE_FORMAT "]: " SIZE_FORMAT, i, (size_t)_blocks_to_claim[i].average());
2538     }
2539   }
2540 }
2541 
2542 // If this is changed in the future to allow parallel
2543 // access, one would need to take the FL locks and,
2544 // depending on how it is used, stagger access from
2545 // parallel threads to reduce contention.
2546 void CompactibleFreeListSpaceLAB::retire(int tid) {
2547   // We run this single threaded with the world stopped;
2548   // so no need for locks and such.
2549   NOT_PRODUCT(Thread* t = Thread::current();)
2550   assert(Thread::current()->is_VM_thread(), "Error");
2551   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2552        i < CompactibleFreeListSpace::IndexSetSize;
2553        i += CompactibleFreeListSpace::IndexSetStride) {
2554     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2555            "Can't retire more than what we obtained");
2556     if (_num_blocks[i] > 0) {
2557       size_t num_retire =  _indexedFreeList[i].count();
2558       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2559       {
2560         // MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2561         //                Mutex::_no_safepoint_check_flag);
2562 
2563         // Update globals stats for num_blocks used
2564         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2565         _global_num_workers[i]++;
2566         assert(_global_num_workers[i] <= ParallelGCThreads, "Too big");
2567         if (num_retire > 0) {
2568           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2569           // Reset this list.
2570           _indexedFreeList[i] = AdaptiveFreeList<FreeChunk>();
2571           _indexedFreeList[i].set_size(i);
2572         }
2573       }
2574       log_trace(gc, plab)("%d[" SIZE_FORMAT "]: " SIZE_FORMAT "/" SIZE_FORMAT "/" SIZE_FORMAT,
2575                           tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2576       // Reset stats for next round
2577       _num_blocks[i]         = 0;
2578     }
2579   }
2580 }
2581 
2582 // Used by par_get_chunk_of_blocks() for the chunks from the
2583 // indexed_free_lists.  Looks for a chunk with size that is a multiple
2584 // of "word_sz" and if found, splits it into "word_sz" chunks and add
2585 // to the free list "fl".  "n" is the maximum number of chunks to
2586 // be added to "fl".
2587 bool CompactibleFreeListSpace:: par_get_chunk_of_blocks_IFL(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2588 
2589   // We'll try all multiples of word_sz in the indexed set, starting with
2590   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2591   // then try getting a big chunk and splitting it.
2592   {
2593     bool found;
2594     int  k;
2595     size_t cur_sz;
2596     for (k = 1, cur_sz = k * word_sz, found = false;
2597          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2598          (CMSSplitIndexedFreeListBlocks || k <= 1);
2599          k++, cur_sz = k * word_sz) {
2600       AdaptiveFreeList<FreeChunk> fl_for_cur_sz;  // Empty.
2601       fl_for_cur_sz.set_size(cur_sz);
2602       {
2603         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2604                         Mutex::_no_safepoint_check_flag);
2605         AdaptiveFreeList<FreeChunk>* gfl = &_indexedFreeList[cur_sz];
2606         if (gfl->count() != 0) {
2607           // nn is the number of chunks of size cur_sz that
2608           // we'd need to split k-ways each, in order to create
2609           // "n" chunks of size word_sz each.
2610           const size_t nn = MAX2(n/k, (size_t)1);
2611           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2612           found = true;
2613           if (k > 1) {
2614             // Update split death stats for the cur_sz-size blocks list:
2615             // we increment the split death count by the number of blocks
2616             // we just took from the cur_sz-size blocks list and which
2617             // we will be splitting below.
2618             ssize_t deaths = gfl->split_deaths() +
2619                              fl_for_cur_sz.count();
2620             gfl->set_split_deaths(deaths);
2621           }
2622         }
2623       }
2624       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2625       if (found) {
2626         if (k == 1) {
2627           fl->prepend(&fl_for_cur_sz);
2628         } else {
2629           // Divide each block on fl_for_cur_sz up k ways.
2630           FreeChunk* fc;
2631           while ((fc = fl_for_cur_sz.get_chunk_at_head()) != NULL) {
2632             // Must do this in reverse order, so that anybody attempting to
2633             // access the main chunk sees it as a single free block until we
2634             // change it.
2635             size_t fc_size = fc->size();
2636             assert(fc->is_free(), "Error");
2637             for (int i = k-1; i >= 0; i--) {
2638               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2639               assert((i != 0) ||
2640                         ((fc == ffc) && ffc->is_free() &&
2641                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2642                         "Counting error");
2643               ffc->set_size(word_sz);
2644               ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2645               ffc->link_next(NULL);
2646               // Above must occur before BOT is updated below.
2647               OrderAccess::storestore();
2648               // splitting from the right, fc_size == i * word_sz
2649               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2650               fc_size -= word_sz;
2651               assert(fc_size == i*word_sz, "Error");
2652               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2653               _bt.verify_single_block((HeapWord*)fc, fc_size);
2654               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2655               // Push this on "fl".
2656               fl->return_chunk_at_head(ffc);
2657             }
2658             // TRAP
2659             assert(fl->tail()->next() == NULL, "List invariant.");
2660           }
2661         }
2662         // Update birth stats for this block size.
2663         size_t num = fl->count();
2664         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2665                         Mutex::_no_safepoint_check_flag);
2666         ssize_t births = _indexedFreeList[word_sz].split_births() + num;
2667         _indexedFreeList[word_sz].set_split_births(births);
2668         return true;
2669       }
2670     }
2671     return found;
2672   }
2673 }
2674 
2675 FreeChunk* CompactibleFreeListSpace::get_n_way_chunk_to_split(size_t word_sz, size_t n) {
2676 
2677   FreeChunk* fc = NULL;
2678   FreeChunk* rem_fc = NULL;
2679   size_t rem;
2680   {
2681     MutexLockerEx x(parDictionaryAllocLock(),
2682                     Mutex::_no_safepoint_check_flag);
2683     while (n > 0) {
2684       fc = dictionary()->get_chunk(MAX2(n * word_sz, _dictionary->min_size()));
2685       if (fc != NULL) {
2686         break;
2687       } else {
2688         n--;
2689       }
2690     }
2691     if (fc == NULL) return NULL;
2692     // Otherwise, split up that block.
2693     assert((ssize_t)n >= 1, "Control point invariant");
2694     assert(fc->is_free(), "Error: should be a free block");
2695     _bt.verify_single_block((HeapWord*)fc, fc->size());
2696     const size_t nn = fc->size() / word_sz;
2697     n = MIN2(nn, n);
2698     assert((ssize_t)n >= 1, "Control point invariant");
2699     rem = fc->size() - n * word_sz;
2700     // If there is a remainder, and it's too small, allocate one fewer.
2701     if (rem > 0 && rem < MinChunkSize) {
2702       n--; rem += word_sz;
2703     }
2704     // Note that at this point we may have n == 0.
2705     assert((ssize_t)n >= 0, "Control point invariant");
2706 
2707     // If n is 0, the chunk fc that was found is not large
2708     // enough to leave a viable remainder.  We are unable to
2709     // allocate even one block.  Return fc to the
2710     // dictionary and return, leaving "fl" empty.
2711     if (n == 0) {
2712       returnChunkToDictionary(fc);
2713       return NULL;
2714     }
2715 
2716     _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2717     dictionary()->dict_census_update(fc->size(),
2718                                      true /*split*/,
2719                                      false /*birth*/);
2720 
2721     // First return the remainder, if any.
2722     // Note that we hold the lock until we decide if we're going to give
2723     // back the remainder to the dictionary, since a concurrent allocation
2724     // may otherwise see the heap as empty.  (We're willing to take that
2725     // hit if the block is a small block.)
2726     if (rem > 0) {
2727       size_t prefix_size = n * word_sz;
2728       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2729       rem_fc->set_size(rem);
2730       rem_fc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2731       rem_fc->link_next(NULL);
2732       // Above must occur before BOT is updated below.
2733       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2734       OrderAccess::storestore();
2735       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2736       assert(fc->is_free(), "Error");
2737       fc->set_size(prefix_size);
2738       if (rem >= IndexSetSize) {
2739         returnChunkToDictionary(rem_fc);
2740         dictionary()->dict_census_update(rem, true /*split*/, true /*birth*/);
2741         rem_fc = NULL;
2742       }
2743       // Otherwise, return it to the small list below.
2744     }
2745   }
2746   if (rem_fc != NULL) {
2747     MutexLockerEx x(_indexedFreeListParLocks[rem],
2748                     Mutex::_no_safepoint_check_flag);
2749     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2750     _indexedFreeList[rem].return_chunk_at_head(rem_fc);
2751     smallSplitBirth(rem);
2752   }
2753   assert(n * word_sz == fc->size(),
2754          "Chunk size " SIZE_FORMAT " is not exactly splittable by "
2755          SIZE_FORMAT " sized chunks of size " SIZE_FORMAT,
2756          fc->size(), n, word_sz);
2757   return fc;
2758 }
2759 
2760 void CompactibleFreeListSpace:: par_get_chunk_of_blocks_dictionary(size_t word_sz, size_t targetted_number_of_chunks, AdaptiveFreeList<FreeChunk>* fl) {
2761 
2762   FreeChunk* fc = get_n_way_chunk_to_split(word_sz, targetted_number_of_chunks);
2763 
2764   if (fc == NULL) {
2765     return;
2766   }
2767 
2768   size_t n = fc->size() / word_sz;
2769 
2770   assert((ssize_t)n > 0, "Consistency");
2771   // Now do the splitting up.
2772   // Must do this in reverse order, so that anybody attempting to
2773   // access the main chunk sees it as a single free block until we
2774   // change it.
2775   size_t fc_size = n * word_sz;
2776   // All but first chunk in this loop
2777   for (ssize_t i = n-1; i > 0; i--) {
2778     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2779     ffc->set_size(word_sz);
2780     ffc->link_prev(NULL); // Mark as a free block for other (parallel) GC threads.
2781     ffc->link_next(NULL);
2782     // Above must occur before BOT is updated below.
2783     OrderAccess::storestore();
2784     // splitting from the right, fc_size == (n - i + 1) * wordsize
2785     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2786     fc_size -= word_sz;
2787     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2788     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2789     _bt.verify_single_block((HeapWord*)fc, fc_size);
2790     // Push this on "fl".
2791     fl->return_chunk_at_head(ffc);
2792   }
2793   // First chunk
2794   assert(fc->is_free() && fc->size() == n*word_sz, "Error: should still be a free block");
2795   // The blocks above should show their new sizes before the first block below
2796   fc->set_size(word_sz);
2797   fc->link_prev(NULL);    // idempotent wrt free-ness, see assert above
2798   fc->link_next(NULL);
2799   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2800   _bt.verify_single_block((HeapWord*)fc, fc->size());
2801   fl->return_chunk_at_head(fc);
2802 
2803   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2804   {
2805     // Update the stats for this block size.
2806     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2807                     Mutex::_no_safepoint_check_flag);
2808     const ssize_t births = _indexedFreeList[word_sz].split_births() + n;
2809     _indexedFreeList[word_sz].set_split_births(births);
2810     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2811     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2812   }
2813 
2814   // TRAP
2815   assert(fl->tail()->next() == NULL, "List invariant.");
2816 }
2817 
2818 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, AdaptiveFreeList<FreeChunk>* fl) {
2819   assert(fl->count() == 0, "Precondition.");
2820   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2821          "Precondition");
2822 
2823   if (par_get_chunk_of_blocks_IFL(word_sz, n, fl)) {
2824     // Got it
2825     return;
2826   }
2827 
2828   // Otherwise, we'll split a block from the dictionary.
2829   par_get_chunk_of_blocks_dictionary(word_sz, n, fl);
2830 }
2831 
2832 const size_t CompactibleFreeListSpace::max_flag_size_for_task_size() const {
2833   const size_t ergo_max = _old_gen->reserved().word_size() / (CardTable::card_size_in_words * BitsPerWord);
2834   return ergo_max;
2835 }
2836 
2837 // Set up the space's par_seq_tasks structure for work claiming
2838 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2839 // XXX Need to suitably abstract and generalize this and the next
2840 // method into one.
2841 void
2842 CompactibleFreeListSpace::
2843 initialize_sequential_subtasks_for_rescan(int n_threads) {
2844   // The "size" of each task is fixed according to rescan_task_size.
2845   assert(n_threads > 0, "Unexpected n_threads argument");
2846   const size_t task_size = rescan_task_size();
2847   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2848   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2849   assert(n_tasks == 0 ||
2850          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2851           (used_region().start() + n_tasks*task_size >= used_region().end())),
2852          "n_tasks calculation incorrect");
2853   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2854   assert(!pst->valid(), "Clobbering existing data?");
2855   // Sets the condition for completion of the subtask (how many threads
2856   // need to finish in order to be done).
2857   pst->set_n_threads(n_threads);
2858   pst->set_n_tasks((int)n_tasks);
2859 }
2860 
2861 // Set up the space's par_seq_tasks structure for work claiming
2862 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2863 void
2864 CompactibleFreeListSpace::
2865 initialize_sequential_subtasks_for_marking(int n_threads,
2866                                            HeapWord* low) {
2867   // The "size" of each task is fixed according to rescan_task_size.
2868   assert(n_threads > 0, "Unexpected n_threads argument");
2869   const size_t task_size = marking_task_size();
2870   assert(task_size > CardTable::card_size_in_words &&
2871          (task_size %  CardTable::card_size_in_words == 0),
2872          "Otherwise arithmetic below would be incorrect");
2873   MemRegion span = _old_gen->reserved();
2874   if (low != NULL) {
2875     if (span.contains(low)) {
2876       // Align low down to  a card boundary so that
2877       // we can use block_offset_careful() on span boundaries.
2878       HeapWord* aligned_low = align_down(low, CardTable::card_size);
2879       // Clip span prefix at aligned_low
2880       span = span.intersection(MemRegion(aligned_low, span.end()));
2881     } else if (low > span.end()) {
2882       span = MemRegion(low, low);  // Null region
2883     } // else use entire span
2884   }
2885   assert(span.is_empty() ||
2886          ((uintptr_t)span.start() %  CardTable::card_size == 0),
2887         "span should start at a card boundary");
2888   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2889   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2890   assert(n_tasks == 0 ||
2891          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2892           (span.start() + n_tasks*task_size >= span.end())),
2893          "n_tasks calculation incorrect");
2894   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2895   assert(!pst->valid(), "Clobbering existing data?");
2896   // Sets the condition for completion of the subtask (how many threads
2897   // need to finish in order to be done).
2898   pst->set_n_threads(n_threads);
2899   pst->set_n_tasks((int)n_tasks);
2900 }