1 /*
   2  * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 # include "incls/_precompiled.incl"
  26 # include "incls/_compactibleFreeListSpace.cpp.incl"
  27 
  28 /////////////////////////////////////////////////////////////////////////
  29 //// CompactibleFreeListSpace
  30 /////////////////////////////////////////////////////////////////////////
  31 
  32 // highest ranked  free list lock rank
  33 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  34 
  35 // Defaults are 0 so things will break badly if incorrectly initialized.
  36 int CompactibleFreeListSpace::IndexSetStart  = 0;
  37 int CompactibleFreeListSpace::IndexSetStride = 0;
  38 
  39 size_t MinChunkSize = 0;
  40 
  41 void CompactibleFreeListSpace::set_cms_values() {
  42   // Set CMS global values
  43   assert(MinChunkSize == 0, "already set");
  44   #define numQuanta(x,y) ((x+y-1)/y)
  45   MinChunkSize = numQuanta(sizeof(FreeChunk), MinObjAlignmentInBytes) * MinObjAlignment;
  46 
  47   assert(IndexSetStart == 0 && IndexSetStride == 0, "already set");
  48   IndexSetStart  = MinObjAlignment;
  49   IndexSetStride = MinObjAlignment;
  50 }
  51 
  52 // Constructor
  53 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  54   MemRegion mr, bool use_adaptive_freelists,
  55   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
  56   _dictionaryChoice(dictionaryChoice),
  57   _adaptive_freelists(use_adaptive_freelists),
  58   _bt(bs, mr),
  59   // free list locks are in the range of values taken by _lockRank
  60   // This range currently is [_leaf+2, _leaf+3]
  61   // Note: this requires that CFLspace c'tors
  62   // are called serially in the order in which the locks are
  63   // are acquired in the program text. This is true today.
  64   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  65   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  66                           "CompactibleFreeListSpace._dict_par_lock", true),
  67   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  68                     CMSRescanMultiple),
  69   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  70                     CMSConcMarkMultiple),
  71   _collector(NULL)
  72 {
  73   _bt.set_space(this);
  74   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  75   // We have all of "mr", all of which we place in the dictionary
  76   // as one big chunk. We'll need to decide here which of several
  77   // possible alternative dictionary implementations to use. For
  78   // now the choice is easy, since we have only one working
  79   // implementation, namely, the simple binary tree (splaying
  80   // temporarily disabled).
  81   switch (dictionaryChoice) {
  82     case FreeBlockDictionary::dictionarySplayTree:
  83     case FreeBlockDictionary::dictionarySkipList:
  84     default:
  85       warning("dictionaryChoice: selected option not understood; using"
  86               " default BinaryTreeDictionary implementation instead.");
  87     case FreeBlockDictionary::dictionaryBinaryTree:
  88       _dictionary = new BinaryTreeDictionary(mr);
  89       break;
  90   }
  91   assert(_dictionary != NULL, "CMS dictionary initialization");
  92   // The indexed free lists are initially all empty and are lazily
  93   // filled in on demand. Initialize the array elements to NULL.
  94   initializeIndexedFreeListArray();
  95 
  96   // Not using adaptive free lists assumes that allocation is first
  97   // from the linAB's.  Also a cms perm gen which can be compacted
  98   // has to have the klass's klassKlass allocated at a lower
  99   // address in the heap than the klass so that the klassKlass is
 100   // moved to its new location before the klass is moved.
 101   // Set the _refillSize for the linear allocation blocks
 102   if (!use_adaptive_freelists) {
 103     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
 104     // The small linAB initially has all the space and will allocate
 105     // a chunk of any size.
 106     HeapWord* addr = (HeapWord*) fc;
 107     _smallLinearAllocBlock.set(addr, fc->size() ,
 108       1024*SmallForLinearAlloc, fc->size());
 109     // Note that _unallocated_block is not updated here.
 110     // Allocations from the linear allocation block should
 111     // update it.
 112   } else {
 113     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc,
 114                                SmallForLinearAlloc);
 115   }
 116   // CMSIndexedFreeListReplenish should be at least 1
 117   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 118   _promoInfo.setSpace(this);
 119   if (UseCMSBestFit) {
 120     _fitStrategy = FreeBlockBestFitFirst;
 121   } else {
 122     _fitStrategy = FreeBlockStrategyNone;
 123   }
 124   checkFreeListConsistency();
 125 
 126   // Initialize locks for parallel case.
 127 
 128   if (CollectedHeap::use_parallel_gc_threads()) {
 129     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 130       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 131                                               "a freelist par lock",
 132                                               true);
 133       if (_indexedFreeListParLocks[i] == NULL)
 134         vm_exit_during_initialization("Could not allocate a par lock");
 135       DEBUG_ONLY(
 136         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 137       )
 138     }
 139     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 140   }
 141 }
 142 
 143 // Like CompactibleSpace forward() but always calls cross_threshold() to
 144 // update the block offset table.  Removed initialize_threshold call because
 145 // CFLS does not use a block offset array for contiguous spaces.
 146 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size,
 147                                     CompactPoint* cp, HeapWord* compact_top) {
 148   // q is alive
 149   // First check if we should switch compaction space
 150   assert(this == cp->space, "'this' should be current compaction space.");
 151   size_t compaction_max_size = pointer_delta(end(), compact_top);
 152   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 153     "virtual adjustObjectSize_v() method is not correct");
 154   size_t adjusted_size = adjustObjectSize(size);
 155   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 156          "no small fragments allowed");
 157   assert(minimum_free_block_size() == MinChunkSize,
 158          "for de-virtualized reference below");
 159   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 160   if (adjusted_size + MinChunkSize > compaction_max_size &&
 161       adjusted_size != compaction_max_size) {
 162     do {
 163       // switch to next compaction space
 164       cp->space->set_compaction_top(compact_top);
 165       cp->space = cp->space->next_compaction_space();
 166       if (cp->space == NULL) {
 167         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 168         assert(cp->gen != NULL, "compaction must succeed");
 169         cp->space = cp->gen->first_compaction_space();
 170         assert(cp->space != NULL, "generation must have a first compaction space");
 171       }
 172       compact_top = cp->space->bottom();
 173       cp->space->set_compaction_top(compact_top);
 174       // The correct adjusted_size may not be the same as that for this method
 175       // (i.e., cp->space may no longer be "this" so adjust the size again.
 176       // Use the virtual method which is not used above to save the virtual
 177       // dispatch.
 178       adjusted_size = cp->space->adjust_object_size_v(size);
 179       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 180       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 181     } while (adjusted_size > compaction_max_size);
 182   }
 183 
 184   // store the forwarding pointer into the mark word
 185   if ((HeapWord*)q != compact_top) {
 186     q->forward_to(oop(compact_top));
 187     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 188   } else {
 189     // if the object isn't moving we can just set the mark to the default
 190     // mark and handle it specially later on.
 191     q->init_mark();
 192     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 193   }
 194 
 195   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
 196   compact_top += adjusted_size;
 197 
 198   // we need to update the offset table so that the beginnings of objects can be
 199   // found during scavenge.  Note that we are updating the offset table based on
 200   // where the object will be once the compaction phase finishes.
 201 
 202   // Always call cross_threshold().  A contiguous space can only call it when
 203   // the compaction_top exceeds the current threshold but not for an
 204   // non-contiguous space.
 205   cp->threshold =
 206     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 207   return compact_top;
 208 }
 209 
 210 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 211 // and use of single_block instead of alloc_block.  The name here is not really
 212 // appropriate - maybe a more general name could be invented for both the
 213 // contiguous and noncontiguous spaces.
 214 
 215 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 216   _bt.single_block(start, the_end);
 217   return end();
 218 }
 219 
 220 // Initialize them to NULL.
 221 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 222   for (size_t i = 0; i < IndexSetSize; i++) {
 223     // Note that on platforms where objects are double word aligned,
 224     // the odd array elements are not used.  It is convenient, however,
 225     // to map directly from the object size to the array element.
 226     _indexedFreeList[i].reset(IndexSetSize);
 227     _indexedFreeList[i].set_size(i);
 228     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 229     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 230     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 231     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 232   }
 233 }
 234 
 235 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 236   for (int i = 1; i < IndexSetSize; i++) {
 237     assert(_indexedFreeList[i].size() == (size_t) i,
 238       "Indexed free list sizes are incorrect");
 239     _indexedFreeList[i].reset(IndexSetSize);
 240     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 241     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 242     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 243     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 244   }
 245 }
 246 
 247 void CompactibleFreeListSpace::reset(MemRegion mr) {
 248   resetIndexedFreeListArray();
 249   dictionary()->reset();
 250   if (BlockOffsetArrayUseUnallocatedBlock) {
 251     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 252     // Everything's allocated until proven otherwise.
 253     _bt.set_unallocated_block(end());
 254   }
 255   if (!mr.is_empty()) {
 256     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 257     _bt.single_block(mr.start(), mr.word_size());
 258     FreeChunk* fc = (FreeChunk*) mr.start();
 259     fc->setSize(mr.word_size());
 260     if (mr.word_size() >= IndexSetSize ) {
 261       returnChunkToDictionary(fc);
 262     } else {
 263       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 264       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
 265     }
 266   }
 267   _promoInfo.reset();
 268   _smallLinearAllocBlock._ptr = NULL;
 269   _smallLinearAllocBlock._word_size = 0;
 270 }
 271 
 272 void CompactibleFreeListSpace::reset_after_compaction() {
 273   // Reset the space to the new reality - one free chunk.
 274   MemRegion mr(compaction_top(), end());
 275   reset(mr);
 276   // Now refill the linear allocation block(s) if possible.
 277   if (_adaptive_freelists) {
 278     refillLinearAllocBlocksIfNeeded();
 279   } else {
 280     // Place as much of mr in the linAB as we can get,
 281     // provided it was big enough to go into the dictionary.
 282     FreeChunk* fc = dictionary()->findLargestDict();
 283     if (fc != NULL) {
 284       assert(fc->size() == mr.word_size(),
 285              "Why was the chunk broken up?");
 286       removeChunkFromDictionary(fc);
 287       HeapWord* addr = (HeapWord*) fc;
 288       _smallLinearAllocBlock.set(addr, fc->size() ,
 289         1024*SmallForLinearAlloc, fc->size());
 290       // Note that _unallocated_block is not updated here.
 291     }
 292   }
 293 }
 294 
 295 // Walks the entire dictionary, returning a coterminal
 296 // chunk, if it exists. Use with caution since it involves
 297 // a potentially complete walk of a potentially large tree.
 298 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 299 
 300   assert_lock_strong(&_freelistLock);
 301 
 302   return dictionary()->find_chunk_ends_at(end());
 303 }
 304 
 305 
 306 #ifndef PRODUCT
 307 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 308   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 309     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
 310   }
 311 }
 312 
 313 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 314   size_t sum = 0;
 315   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 316     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
 317   }
 318   return sum;
 319 }
 320 
 321 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 322   size_t count = 0;
 323   for (int i = (int)MinChunkSize; i < IndexSetSize; i++) {
 324     debug_only(
 325       ssize_t total_list_count = 0;
 326       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 327          fc = fc->next()) {
 328         total_list_count++;
 329       }
 330       assert(total_list_count ==  _indexedFreeList[i].count(),
 331         "Count in list is incorrect");
 332     )
 333     count += _indexedFreeList[i].count();
 334   }
 335   return count;
 336 }
 337 
 338 size_t CompactibleFreeListSpace::totalCount() {
 339   size_t num = totalCountInIndexedFreeLists();
 340   num +=  dictionary()->totalCount();
 341   if (_smallLinearAllocBlock._word_size != 0) {
 342     num++;
 343   }
 344   return num;
 345 }
 346 #endif
 347 
 348 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 349   FreeChunk* fc = (FreeChunk*) p;
 350   return fc->isFree();
 351 }
 352 
 353 size_t CompactibleFreeListSpace::used() const {
 354   return capacity() - free();
 355 }
 356 
 357 size_t CompactibleFreeListSpace::free() const {
 358   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 359   // if you do this while the structures are in flux you
 360   // may get an approximate answer only; for instance
 361   // because there is concurrent allocation either
 362   // directly by mutators or for promotion during a GC.
 363   // It's "MT-safe", however, in the sense that you are guaranteed
 364   // not to crash and burn, for instance, because of walking
 365   // pointers that could disappear as you were walking them.
 366   // The approximation is because the various components
 367   // that are read below are not read atomically (and
 368   // further the computation of totalSizeInIndexedFreeLists()
 369   // is itself a non-atomic computation. The normal use of
 370   // this is during a resize operation at the end of GC
 371   // and at that time you are guaranteed to get the
 372   // correct actual value. However, for instance, this is
 373   // also read completely asynchronously by the "perf-sampler"
 374   // that supports jvmstat, and you are apt to see the values
 375   // flicker in such cases.
 376   assert(_dictionary != NULL, "No _dictionary?");
 377   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
 378           totalSizeInIndexedFreeLists() +
 379           _smallLinearAllocBlock._word_size) * HeapWordSize;
 380 }
 381 
 382 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 383   assert(_dictionary != NULL, "No _dictionary?");
 384   assert_locked();
 385   size_t res = _dictionary->maxChunkSize();
 386   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 387                        (size_t) SmallForLinearAlloc - 1));
 388   // XXX the following could potentially be pretty slow;
 389   // should one, pesimally for the rare cases when res
 390   // caclulated above is less than IndexSetSize,
 391   // just return res calculated above? My reasoning was that
 392   // those cases will be so rare that the extra time spent doesn't
 393   // really matter....
 394   // Note: do not change the loop test i >= res + IndexSetStride
 395   // to i > res below, because i is unsigned and res may be zero.
 396   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 397        i -= IndexSetStride) {
 398     if (_indexedFreeList[i].head() != NULL) {
 399       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 400       return i;
 401     }
 402   }
 403   return res;
 404 }
 405 
 406 void LinearAllocBlock::print_on(outputStream* st) const {
 407   st->print_cr(" LinearAllocBlock: ptr = " PTR_FORMAT ", word_size = " SIZE_FORMAT
 408             ", refillsize = " SIZE_FORMAT ", allocation_size_limit = " SIZE_FORMAT,
 409             _ptr, _word_size, _refillSize, _allocation_size_limit);
 410 }
 411 
 412 void CompactibleFreeListSpace::print_on(outputStream* st) const {
 413   st->print_cr("COMPACTIBLE FREELIST SPACE");
 414   st->print_cr(" Space:");
 415   Space::print_on(st);
 416 
 417   st->print_cr("promoInfo:");
 418   _promoInfo.print_on(st);
 419 
 420   st->print_cr("_smallLinearAllocBlock");
 421   _smallLinearAllocBlock.print_on(st);
 422 
 423   // dump_memory_block(_smallLinearAllocBlock->_ptr, 128);
 424 
 425   st->print_cr(" _fitStrategy = %s, _adaptive_freelists = %s",
 426                _fitStrategy?"true":"false", _adaptive_freelists?"true":"false");
 427 }
 428 
 429 void CompactibleFreeListSpace::print_indexed_free_lists(outputStream* st)
 430 const {
 431   reportIndexedFreeListStatistics();
 432   gclog_or_tty->print_cr("Layout of Indexed Freelists");
 433   gclog_or_tty->print_cr("---------------------------");
 434   FreeList::print_labels_on(st, "size");
 435   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 436     _indexedFreeList[i].print_on(gclog_or_tty);
 437     for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 438          fc = fc->next()) {
 439       gclog_or_tty->print_cr("\t[" PTR_FORMAT "," PTR_FORMAT ")  %s",
 440                           fc, (HeapWord*)fc + i,
 441                           fc->cantCoalesce() ? "\t CC" : "");
 442     }
 443   }
 444 }
 445 
 446 void CompactibleFreeListSpace::print_promo_info_blocks(outputStream* st)
 447 const {
 448   _promoInfo.print_on(st);
 449 }
 450 
 451 void CompactibleFreeListSpace::print_dictionary_free_lists(outputStream* st)
 452 const {
 453   _dictionary->reportStatistics();
 454   st->print_cr("Layout of Freelists in Tree");
 455   st->print_cr("---------------------------");
 456   _dictionary->print_free_lists(st);
 457 }
 458 
 459 class BlkPrintingClosure: public BlkClosure {
 460   const CMSCollector*             _collector;
 461   const CompactibleFreeListSpace* _sp;
 462   const CMSBitMap*                _live_bit_map;
 463   const bool                      _post_remark;
 464   outputStream*                   _st;
 465 public:
 466   BlkPrintingClosure(const CMSCollector* collector,
 467                      const CompactibleFreeListSpace* sp,
 468                      const CMSBitMap* live_bit_map,
 469                      outputStream* st):
 470     _collector(collector),
 471     _sp(sp),
 472     _live_bit_map(live_bit_map),
 473     _post_remark(collector->abstract_state() > CMSCollector::FinalMarking),
 474     _st(st) { }
 475   size_t do_blk(HeapWord* addr);
 476 };
 477 
 478 size_t BlkPrintingClosure::do_blk(HeapWord* addr) {
 479   size_t sz = _sp->block_size_no_stall(addr, _collector);
 480   assert(sz != 0, "Should always be able to compute a size");
 481   if (_sp->block_is_obj(addr)) {
 482     const bool dead = _post_remark && !_live_bit_map->isMarked(addr);
 483     _st->print_cr(PTR_FORMAT ": %s object of size " SIZE_FORMAT "%s",
 484       addr,
 485       dead ? "dead" : "live",
 486       sz,
 487       (!dead && CMSPrintObjectsInDump) ? ":" : ".");
 488     if (CMSPrintObjectsInDump && !dead) {
 489       oop(addr)->print_on(_st);
 490       _st->print_cr("--------------------------------------");
 491     }
 492   } else { // free block
 493     _st->print_cr(PTR_FORMAT ": free block of size " SIZE_FORMAT "%s",
 494       addr, sz, CMSPrintChunksInDump ? ":" : ".");
 495     if (CMSPrintChunksInDump) {
 496       ((FreeChunk*)addr)->print_on(_st);
 497       _st->print_cr("--------------------------------------");
 498     }
 499   }
 500   return sz;
 501 }
 502 
 503 void CompactibleFreeListSpace::dump_at_safepoint_with_locks(CMSCollector* c,
 504   outputStream* st) {
 505   st->print_cr("\n=========================");
 506   st->print_cr("Block layout in CMS Heap:");
 507   st->print_cr("=========================");
 508   BlkPrintingClosure  bpcl(c, this, c->markBitMap(), st);
 509   blk_iterate(&bpcl);
 510 
 511   st->print_cr("\n=======================================");
 512   st->print_cr("Order & Layout of Promotion Info Blocks");
 513   st->print_cr("=======================================");
 514   print_promo_info_blocks(st);
 515 
 516   st->print_cr("\n===========================");
 517   st->print_cr("Order of Indexed Free Lists");
 518   st->print_cr("=========================");
 519   print_indexed_free_lists(st);
 520 
 521   st->print_cr("\n=================================");
 522   st->print_cr("Order of Free Lists in Dictionary");
 523   st->print_cr("=================================");
 524   print_dictionary_free_lists(st);
 525 }
 526 
 527 
 528 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 529   assert_lock_strong(&_freelistLock);
 530   assert(PrintFLSStatistics != 0, "Reporting error");
 531   _dictionary->reportStatistics();
 532   if (PrintFLSStatistics > 1) {
 533     reportIndexedFreeListStatistics();
 534     size_t totalSize = totalSizeInIndexedFreeLists() +
 535                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
 536     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
 537   }
 538 }
 539 
 540 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 541   assert_lock_strong(&_freelistLock);
 542   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 543                       "--------------------------------\n");
 544   size_t totalSize = totalSizeInIndexedFreeLists();
 545   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
 546   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
 547   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 548   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
 549   if (freeBlocks != 0) {
 550     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
 551   }
 552 }
 553 
 554 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 555   size_t res = 0;
 556   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 557     debug_only(
 558       ssize_t recount = 0;
 559       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 560          fc = fc->next()) {
 561         recount += 1;
 562       }
 563       assert(recount == _indexedFreeList[i].count(),
 564         "Incorrect count in list");
 565     )
 566     res += _indexedFreeList[i].count();
 567   }
 568   return res;
 569 }
 570 
 571 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 572   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 573     if (_indexedFreeList[i].head() != NULL) {
 574       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 575       return (size_t)i;
 576     }
 577   }
 578   return 0;
 579 }
 580 
 581 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 582   HeapWord* prevEnd = end();
 583   assert(prevEnd != value, "unnecessary set_end call");
 584   assert(prevEnd == NULL || !BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 585         "New end is below unallocated block");
 586   _end = value;
 587   if (prevEnd != NULL) {
 588     // Resize the underlying block offset table.
 589     _bt.resize(pointer_delta(value, bottom()));
 590     if (value <= prevEnd) {
 591       assert(!BlockOffsetArrayUseUnallocatedBlock || value >= unallocated_block(),
 592              "New end is below unallocated block");
 593     } else {
 594       // Now, take this new chunk and add it to the free blocks.
 595       // Note that the BOT has not yet been updated for this block.
 596       size_t newFcSize = pointer_delta(value, prevEnd);
 597       // XXX This is REALLY UGLY and should be fixed up. XXX
 598       if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 599         // Mark the boundary of the new block in BOT
 600         _bt.mark_block(prevEnd, value);
 601         // put it all in the linAB
 602         if (ParallelGCThreads == 0) {
 603           _smallLinearAllocBlock._ptr = prevEnd;
 604           _smallLinearAllocBlock._word_size = newFcSize;
 605           repairLinearAllocBlock(&_smallLinearAllocBlock);
 606         } else { // ParallelGCThreads > 0
 607           MutexLockerEx x(parDictionaryAllocLock(),
 608                           Mutex::_no_safepoint_check_flag);
 609           _smallLinearAllocBlock._ptr = prevEnd;
 610           _smallLinearAllocBlock._word_size = newFcSize;
 611           repairLinearAllocBlock(&_smallLinearAllocBlock);
 612         }
 613         // Births of chunks put into a LinAB are not recorded.  Births
 614         // of chunks as they are allocated out of a LinAB are.
 615       } else {
 616         // Add the block to the free lists, if possible coalescing it
 617         // with the last free block, and update the BOT and census data.
 618         addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 619       }
 620     }
 621   }
 622 }
 623 
 624 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 625   CompactibleFreeListSpace* _cfls;
 626   CMSCollector* _collector;
 627 protected:
 628   // Override.
 629 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 630   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 631                                        HeapWord* bottom, HeapWord* top, \
 632                                        ClosureType* cl);                \
 633       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 634                                        HeapWord* bottom, HeapWord* top, \
 635                                        ClosureType* cl);                \
 636     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 637                                        HeapWord* bottom, HeapWord* top, \
 638                                        ClosureType* cl)
 639   walk_mem_region_with_cl_DECL(OopClosure);
 640   walk_mem_region_with_cl_DECL(FilteringClosure);
 641 
 642 public:
 643   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 644                       CMSCollector* collector,
 645                       OopClosure* cl,
 646                       CardTableModRefBS::PrecisionStyle precision,
 647                       HeapWord* boundary) :
 648     Filtering_DCTOC(sp, cl, precision, boundary),
 649     _cfls(sp), _collector(collector) {}
 650 };
 651 
 652 // We de-virtualize the block-related calls below, since we know that our
 653 // space is a CompactibleFreeListSpace.
 654 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 655 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 656                                                  HeapWord* bottom,              \
 657                                                  HeapWord* top,                 \
 658                                                  ClosureType* cl) {             \
 659    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
 660      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 661    } else {                                                                     \
 662      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 663    }                                                                            \
 664 }                                                                               \
 665 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 666                                                       HeapWord* bottom,         \
 667                                                       HeapWord* top,            \
 668                                                       ClosureType* cl) {        \
 669   /* Skip parts that are before "mr", in case "block_start" sent us             \
 670      back too far. */                                                           \
 671   HeapWord* mr_start = mr.start();                                              \
 672   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 673   HeapWord* next = bottom + bot_size;                                           \
 674   while (next < mr_start) {                                                     \
 675     bottom = next;                                                              \
 676     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 677     next = bottom + bot_size;                                                   \
 678   }                                                                             \
 679                                                                                 \
 680   while (bottom < top) {                                                        \
 681     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 682         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 683                     oop(bottom)) &&                                             \
 684         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 685       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 686       bottom += _cfls->adjustObjectSize(word_sz);                               \
 687     } else {                                                                    \
 688       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 689     }                                                                           \
 690   }                                                                             \
 691 }                                                                               \
 692 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 693                                                         HeapWord* bottom,       \
 694                                                         HeapWord* top,          \
 695                                                         ClosureType* cl) {      \
 696   /* Skip parts that are before "mr", in case "block_start" sent us             \
 697      back too far. */                                                           \
 698   HeapWord* mr_start = mr.start();                                              \
 699   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 700   HeapWord* next = bottom + bot_size;                                           \
 701   while (next < mr_start) {                                                     \
 702     bottom = next;                                                              \
 703     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 704     next = bottom + bot_size;                                                   \
 705   }                                                                             \
 706                                                                                 \
 707   while (bottom < top) {                                                        \
 708     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 709         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 710                     oop(bottom)) &&                                             \
 711         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 712       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 713       bottom += _cfls->adjustObjectSize(word_sz);                               \
 714     } else {                                                                    \
 715       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 716     }                                                                           \
 717   }                                                                             \
 718 }
 719 
 720 // (There are only two of these, rather than N, because the split is due
 721 // only to the introduction of the FilteringClosure, a local part of the
 722 // impl of this abstraction.)
 723 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 724 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 725 
 726 DirtyCardToOopClosure*
 727 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 728                                       CardTableModRefBS::PrecisionStyle precision,
 729                                       HeapWord* boundary) {
 730   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 731 }
 732 
 733 
 734 // Note on locking for the space iteration functions:
 735 // since the collector's iteration activities are concurrent with
 736 // allocation activities by mutators, absent a suitable mutual exclusion
 737 // mechanism the iterators may go awry. For instace a block being iterated
 738 // may suddenly be allocated or divided up and part of it allocated and
 739 // so on.
 740 
 741 // Apply the given closure to each block in the space.
 742 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 743   assert_lock_strong(freelistLock());
 744   HeapWord *cur, *limit;
 745   for (cur = bottom(), limit = end(); cur < limit;
 746        cur += cl->do_blk_careful(cur));
 747 }
 748 
 749 // Apply the given closure to each block in the space.
 750 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 751   assert_lock_strong(freelistLock());
 752   HeapWord *cur, *limit;
 753   for (cur = bottom(), limit = end(); cur < limit;
 754        cur += cl->do_blk(cur));
 755 }
 756 
 757 // Apply the given closure to each oop in the space.
 758 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 759   assert_lock_strong(freelistLock());
 760   HeapWord *cur, *limit;
 761   size_t curSize;
 762   for (cur = bottom(), limit = end(); cur < limit;
 763        cur += curSize) {
 764     curSize = block_size(cur);
 765     if (block_is_obj(cur)) {
 766       oop(cur)->oop_iterate(cl);
 767     }
 768   }
 769 }
 770 
 771 // Apply the given closure to each oop in the space \intersect memory region.
 772 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 773   assert_lock_strong(freelistLock());
 774   if (is_empty()) {
 775     return;
 776   }
 777   MemRegion cur = MemRegion(bottom(), end());
 778   mr = mr.intersection(cur);
 779   if (mr.is_empty()) {
 780     return;
 781   }
 782   if (mr.equals(cur)) {
 783     oop_iterate(cl);
 784     return;
 785   }
 786   assert(mr.end() <= end(), "just took an intersection above");
 787   HeapWord* obj_addr = block_start(mr.start());
 788   HeapWord* t = mr.end();
 789 
 790   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 791   if (block_is_obj(obj_addr)) {
 792     // Handle first object specially.
 793     oop obj = oop(obj_addr);
 794     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 795   } else {
 796     FreeChunk* fc = (FreeChunk*)obj_addr;
 797     obj_addr += fc->size();
 798   }
 799   while (obj_addr < t) {
 800     HeapWord* obj = obj_addr;
 801     obj_addr += block_size(obj_addr);
 802     // If "obj_addr" is not greater than top, then the
 803     // entire object "obj" is within the region.
 804     if (obj_addr <= t) {
 805       if (block_is_obj(obj)) {
 806         oop(obj)->oop_iterate(cl);
 807       }
 808     } else {
 809       // "obj" extends beyond end of region
 810       if (block_is_obj(obj)) {
 811         oop(obj)->oop_iterate(&smr_blk);
 812       }
 813       break;
 814     }
 815   }
 816 }
 817 
 818 // NOTE: In the following methods, in order to safely be able to
 819 // apply the closure to an object, we need to be sure that the
 820 // object has been initialized. We are guaranteed that an object
 821 // is initialized if we are holding the Heap_lock with the
 822 // world stopped.
 823 void CompactibleFreeListSpace::verify_objects_initialized() const {
 824   if (is_init_completed()) {
 825     assert_locked_or_safepoint(Heap_lock);
 826     if (Universe::is_fully_initialized()) {
 827       guarantee(SafepointSynchronize::is_at_safepoint(),
 828                 "Required for objects to be initialized");
 829     }
 830   } // else make a concession at vm start-up
 831 }
 832 
 833 // Apply the given closure to each object in the space
 834 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 835   assert_lock_strong(freelistLock());
 836   NOT_PRODUCT(verify_objects_initialized());
 837   HeapWord *cur, *limit;
 838   size_t curSize;
 839   for (cur = bottom(), limit = end(); cur < limit;
 840        cur += curSize) {
 841     curSize = block_size(cur);
 842     if (block_is_obj(cur)) {
 843       blk->do_object(oop(cur));
 844     }
 845   }
 846 }
 847 
 848 // Apply the given closure to each live object in the space
 849 //   The usage of CompactibleFreeListSpace
 850 // by the ConcurrentMarkSweepGeneration for concurrent GC's allows
 851 // objects in the space with references to objects that are no longer
 852 // valid.  For example, an object may reference another object
 853 // that has already been sweep up (collected).  This method uses
 854 // obj_is_alive() to determine whether it is safe to apply the closure to
 855 // an object.  See obj_is_alive() for details on how liveness of an
 856 // object is decided.
 857 
 858 void CompactibleFreeListSpace::safe_object_iterate(ObjectClosure* blk) {
 859   assert_lock_strong(freelistLock());
 860   NOT_PRODUCT(verify_objects_initialized());
 861   HeapWord *cur, *limit;
 862   size_t curSize;
 863   for (cur = bottom(), limit = end(); cur < limit;
 864        cur += curSize) {
 865     curSize = block_size(cur);
 866     if (block_is_obj(cur) && obj_is_alive(cur)) {
 867       blk->do_object(oop(cur));
 868     }
 869   }
 870 }
 871 
 872 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 873                                                   UpwardsObjectClosure* cl) {
 874   assert_locked(freelistLock());
 875   NOT_PRODUCT(verify_objects_initialized());
 876   Space::object_iterate_mem(mr, cl);
 877 }
 878 
 879 // Callers of this iterator beware: The closure application should
 880 // be robust in the face of uninitialized objects and should (always)
 881 // return a correct size so that the next addr + size below gives us a
 882 // valid block boundary. [See for instance,
 883 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 884 // in ConcurrentMarkSweepGeneration.cpp.]
 885 HeapWord*
 886 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 887   assert_lock_strong(freelistLock());
 888   HeapWord *addr, *last;
 889   size_t size;
 890   for (addr = bottom(), last  = end();
 891        addr < last; addr += size) {
 892     FreeChunk* fc = (FreeChunk*)addr;
 893     if (fc->isFree()) {
 894       // Since we hold the free list lock, which protects direct
 895       // allocation in this generation by mutators, a free object
 896       // will remain free throughout this iteration code.
 897       size = fc->size();
 898     } else {
 899       // Note that the object need not necessarily be initialized,
 900       // because (for instance) the free list lock does NOT protect
 901       // object initialization. The closure application below must
 902       // therefore be correct in the face of uninitialized objects.
 903       size = cl->do_object_careful(oop(addr));
 904       if (size == 0) {
 905         // An unparsable object found. Signal early termination.
 906         return addr;
 907       }
 908     }
 909   }
 910   return NULL;
 911 }
 912 
 913 // Callers of this iterator beware: The closure application should
 914 // be robust in the face of uninitialized objects and should (always)
 915 // return a correct size so that the next addr + size below gives us a
 916 // valid block boundary. [See for instance,
 917 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 918 // in ConcurrentMarkSweepGeneration.cpp.]
 919 HeapWord*
 920 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 921   ObjectClosureCareful* cl) {
 922   assert_lock_strong(freelistLock());
 923   // Can't use used_region() below because it may not necessarily
 924   // be the same as [bottom(),end()); although we could
 925   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 926   // that appears too cumbersome, so we just do the simpler check
 927   // in the assertion below.
 928   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 929          "mr should be non-empty and within used space");
 930   HeapWord *addr, *end;
 931   size_t size;
 932   for (addr = block_start_careful(mr.start()), end  = mr.end();
 933        addr < end; addr += size) {
 934     FreeChunk* fc = (FreeChunk*)addr;
 935     if (fc->isFree()) {
 936       // Since we hold the free list lock, which protects direct
 937       // allocation in this generation by mutators, a free object
 938       // will remain free throughout this iteration code.
 939       size = fc->size();
 940     } else {
 941       // Note that the object need not necessarily be initialized,
 942       // because (for instance) the free list lock does NOT protect
 943       // object initialization. The closure application below must
 944       // therefore be correct in the face of uninitialized objects.
 945       size = cl->do_object_careful_m(oop(addr), mr);
 946       if (size == 0) {
 947         // An unparsable object found. Signal early termination.
 948         return addr;
 949       }
 950     }
 951   }
 952   return NULL;
 953 }
 954 
 955 
 956 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 957   NOT_PRODUCT(verify_objects_initialized());
 958   return _bt.block_start(p);
 959 }
 960 
 961 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 962   return _bt.block_start_careful(p);
 963 }
 964 
 965 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 966   NOT_PRODUCT(verify_objects_initialized());
 967   // This must be volatile, or else there is a danger that the compiler
 968   // will compile the code below into a sometimes-infinite loop, by keeping
 969   // the value read the first time in a register.
 970   while (true) {
 971     // We must do this until we get a consistent view of the object.
 972     if (FreeChunk::indicatesFreeChunk(p)) {
 973       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 974       size_t res = fc->size();
 975       // If the object is still a free chunk, return the size, else it
 976       // has been allocated so try again.
 977       if (FreeChunk::indicatesFreeChunk(p)) {
 978         assert(res != 0, "Block size should not be 0");
 979         return res;
 980       }
 981     } else {
 982       // must read from what 'p' points to in each loop.
 983       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
 984       if (k != NULL) {
 985         assert(k->is_oop(true /* ignore mark word */), "Should be klass oop");
 986         oop o = (oop)p;
 987         assert(o->is_parsable(), "Should be parsable");
 988         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 989         size_t res = o->size_given_klass(k->klass_part());
 990         res = adjustObjectSize(res);
 991         assert(res != 0, "Block size should not be 0");
 992         return res;
 993       }
 994     }
 995   }
 996 }
 997 
 998 // A variant of the above that uses the Printezis bits for
 999 // unparsable but allocated objects. This avoids any possible
1000 // stalls waiting for mutators to initialize objects, and is
1001 // thus potentially faster than the variant above. However,
1002 // this variant may return a zero size for a block that is
1003 // under mutation and for which a consistent size cannot be
1004 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
1005 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
1006                                                      const CMSCollector* c)
1007 const {
1008   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1009   // This must be volatile, or else there is a danger that the compiler
1010   // will compile the code below into a sometimes-infinite loop, by keeping
1011   // the value read the first time in a register.
1012   DEBUG_ONLY(uint loops = 0;)
1013   while (true) {
1014     // We must do this until we get a consistent view of the object.
1015     if (FreeChunk::indicatesFreeChunk(p)) {
1016       volatile FreeChunk* fc = (volatile FreeChunk*)p;
1017       size_t res = fc->size();
1018       if (FreeChunk::indicatesFreeChunk(p)) {
1019         assert(res != 0, "Block size should not be 0");
1020         assert(loops == 0, "Should be 0");
1021         return res;
1022       }
1023     } else {
1024       // must read from what 'p' points to in each loop.
1025       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
1026       if (k != NULL &&
1027           ((oopDesc*)p)->is_parsable() &&
1028           ((oopDesc*)p)->is_conc_safe()) {
1029         assert(k->is_oop(), "Should really be klass oop.");
1030         oop o = (oop)p;
1031         assert(o->is_oop(), "Should be an oop");
1032         size_t res = o->size_given_klass(k->klass_part());
1033         res = adjustObjectSize(res);
1034         assert(res != 0, "Block size should not be 0");
1035         return res;
1036       } else {
1037         return c->block_size_if_printezis_bits(p);
1038       }
1039     }
1040     assert(loops == 0, "Can loop at most once");
1041     DEBUG_ONLY(loops++;)
1042   }
1043 }
1044 
1045 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
1046   NOT_PRODUCT(verify_objects_initialized());
1047   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
1048   FreeChunk* fc = (FreeChunk*)p;
1049   if (fc->isFree()) {
1050     return fc->size();
1051   } else {
1052     // Ignore mark word because this may be a recently promoted
1053     // object whose mark word is used to chain together grey
1054     // objects (the last one would have a null value).
1055     assert(oop(p)->is_oop(true), "Should be an oop");
1056     return adjustObjectSize(oop(p)->size());
1057   }
1058 }
1059 
1060 // This implementation assumes that the property of "being an object" is
1061 // stable.  But being a free chunk may not be (because of parallel
1062 // promotion.)
1063 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
1064   FreeChunk* fc = (FreeChunk*)p;
1065   assert(is_in_reserved(p), "Should be in space");
1066   // When doing a mark-sweep-compact of the CMS generation, this
1067   // assertion may fail because prepare_for_compaction() uses
1068   // space that is garbage to maintain information on ranges of
1069   // live objects so that these live ranges can be moved as a whole.
1070   // Comment out this assertion until that problem can be solved
1071   // (i.e., that the block start calculation may look at objects
1072   // at address below "p" in finding the object that contains "p"
1073   // and those objects (if garbage) may have been modified to hold
1074   // live range information.
1075   // assert(CollectedHeap::use_parallel_gc_threads() || _bt.block_start(p) == p,
1076   //        "Should be a block boundary");
1077   if (FreeChunk::indicatesFreeChunk(p)) return false;
1078   klassOop k = oop(p)->klass_or_null();
1079   if (k != NULL) {
1080     // Ignore mark word because it may have been used to
1081     // chain together promoted objects (the last one
1082     // would have a null value).
1083     assert(oop(p)->is_oop(true), "Should be an oop");
1084     return true;
1085   } else {
1086     return false;  // Was not an object at the start of collection.
1087   }
1088 }
1089 
1090 // Check if the object is alive. This fact is checked either by consulting
1091 // the main marking bitmap in the sweeping phase or, if it's a permanent
1092 // generation and we're not in the sweeping phase, by checking the
1093 // perm_gen_verify_bit_map where we store the "deadness" information if
1094 // we did not sweep the perm gen in the most recent previous GC cycle.
1095 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
1096   assert (block_is_obj(p), "The address should point to an object");
1097 
1098   // If we're sweeping, we use object liveness information from the main bit map
1099   // for both perm gen and old gen.
1100   // We don't need to lock the bitmap (live_map or dead_map below), because
1101   // EITHER we are in the middle of the sweeping phase, and the
1102   // main marking bit map (live_map below) is locked,
1103   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
1104   // is stable, because it's mutated only in the sweeping phase.
1105   if (_collector->abstract_state() == CMSCollector::Sweeping) {
1106     CMSBitMap* live_map = _collector->markBitMap();
1107     return live_map->isMarked((HeapWord*) p);
1108   } else {
1109     // If we're not currently sweeping and we haven't swept the perm gen in
1110     // the previous concurrent cycle then we may have dead but unswept objects
1111     // in the perm gen. In this case, we use the "deadness" information
1112     // that we had saved in perm_gen_verify_bit_map at the last sweep.
1113     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
1114       if (_collector->verifying()) {
1115         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
1116         // Object is marked in the dead_map bitmap at the previous sweep
1117         // when we know that it's dead; if the bitmap is not allocated then
1118         // the object is alive.
1119         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
1120                || !dead_map->par_isMarked((HeapWord*) p);
1121       } else {
1122         return false; // We can't say for sure if it's live, so we say that it's dead.
1123       }
1124     }
1125   }
1126   return true;
1127 }
1128 
1129 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
1130   FreeChunk* fc = (FreeChunk*)p;
1131   assert(is_in_reserved(p), "Should be in space");
1132   assert(_bt.block_start(p) == p, "Should be a block boundary");
1133   if (!fc->isFree()) {
1134     // Ignore mark word because it may have been used to
1135     // chain together promoted objects (the last one
1136     // would have a null value).
1137     assert(oop(p)->is_oop(true), "Should be an oop");
1138     return true;
1139   }
1140   return false;
1141 }
1142 
1143 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
1144 // approximate answer if you don't hold the freelistlock when you call this.
1145 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
1146   size_t size = 0;
1147   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1148     debug_only(
1149       // We may be calling here without the lock in which case we
1150       // won't do this modest sanity check.
1151       if (freelistLock()->owned_by_self()) {
1152         size_t total_list_size = 0;
1153         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
1154           fc = fc->next()) {
1155           total_list_size += i;
1156         }
1157         assert(total_list_size == i * _indexedFreeList[i].count(),
1158                "Count in list is incorrect");
1159       }
1160     )
1161     size += i * _indexedFreeList[i].count();
1162   }
1163   return size;
1164 }
1165 
1166 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1167   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1168   return allocate(size);
1169 }
1170 
1171 HeapWord*
1172 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1173   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1174 }
1175 
1176 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1177   assert_lock_strong(freelistLock());
1178   HeapWord* res = NULL;
1179   assert(size == adjustObjectSize(size),
1180          "use adjustObjectSize() before calling into allocate()");
1181 
1182   if (_adaptive_freelists) {
1183     res = allocate_adaptive_freelists(size);
1184   } else {  // non-adaptive free lists
1185     res = allocate_non_adaptive_freelists(size);
1186   }
1187 
1188   if (res != NULL) {
1189     // check that res does lie in this space!
1190     assert(is_in_reserved(res), "Not in this space!");
1191     assert(is_aligned((void*)res), "alignment check");
1192 
1193     FreeChunk* fc = (FreeChunk*)res;
1194     fc->markNotFree();
1195     assert(!fc->isFree(), "shouldn't be marked free");
1196     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1197     // Verify that the block offset table shows this to
1198     // be a single block, but not one which is unallocated.
1199     _bt.verify_single_block(res, size);
1200     _bt.verify_not_unallocated(res, size);
1201     // mangle a just allocated object with a distinct pattern.
1202     debug_only(fc->mangleAllocated(size));
1203   }
1204 
1205   return res;
1206 }
1207 
1208 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1209   HeapWord* res = NULL;
1210   // try and use linear allocation for smaller blocks
1211   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1212     // if successful, the following also adjusts block offset table
1213     res = getChunkFromSmallLinearAllocBlock(size);
1214   }
1215   // Else triage to indexed lists for smaller sizes
1216   if (res == NULL) {
1217     if (size < SmallForDictionary) {
1218       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1219     } else {
1220       // else get it from the big dictionary; if even this doesn't
1221       // work we are out of luck.
1222       res = (HeapWord*)getChunkFromDictionaryExact(size);
1223     }
1224   }
1225 
1226   return res;
1227 }
1228 
1229 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1230   assert_lock_strong(freelistLock());
1231   HeapWord* res = NULL;
1232   assert(size == adjustObjectSize(size),
1233          "use adjustObjectSize() before calling into allocate()");
1234 
1235   // Strategy
1236   //   if small
1237   //     exact size from small object indexed list if small
1238   //     small or large linear allocation block (linAB) as appropriate
1239   //     take from lists of greater sized chunks
1240   //   else
1241   //     dictionary
1242   //     small or large linear allocation block if it has the space
1243   // Try allocating exact size from indexTable first
1244   if (size < IndexSetSize) {
1245     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1246     if(res != NULL) {
1247       assert(res != (HeapWord*)_indexedFreeList[size].head(),
1248         "Not removed from free list");
1249       // no block offset table adjustment is necessary on blocks in
1250       // the indexed lists.
1251 
1252     // Try allocating from the small LinAB
1253     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1254         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1255         // if successful, the above also adjusts block offset table
1256         // Note that this call will refill the LinAB to
1257         // satisfy the request.  This is different that
1258         // evm.
1259         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1260     } else {
1261       // Raid the exact free lists larger than size, even if they are not
1262       // overpopulated.
1263       res = (HeapWord*) getChunkFromGreater(size);
1264     }
1265   } else {
1266     // Big objects get allocated directly from the dictionary.
1267     res = (HeapWord*) getChunkFromDictionaryExact(size);
1268     if (res == NULL) {
1269       // Try hard not to fail since an allocation failure will likely
1270       // trigger a synchronous GC.  Try to get the space from the
1271       // allocation blocks.
1272       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1273     }
1274   }
1275 
1276   return res;
1277 }
1278 
1279 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1280 // when promoting obj.
1281 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1282   // Depending on the object size, expansion may require refilling either a
1283   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1284   // is added because the dictionary may over-allocate to avoid fragmentation.
1285   size_t space = obj_size;
1286   if (!_adaptive_freelists) {
1287     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1288   }
1289   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1290   return space;
1291 }
1292 
1293 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1294   FreeChunk* ret;
1295 
1296   assert(numWords >= MinChunkSize, "Size is less than minimum");
1297   assert(linearAllocationWouldFail() || bestFitFirst(),
1298     "Should not be here");
1299 
1300   size_t i;
1301   size_t currSize = numWords + MinChunkSize;
1302   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1303   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1304     FreeList* fl = &_indexedFreeList[i];
1305     if (fl->head()) {
1306       ret = getFromListGreater(fl, numWords);
1307       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1308       return ret;
1309     }
1310   }
1311 
1312   currSize = MAX2((size_t)SmallForDictionary,
1313                   (size_t)(numWords + MinChunkSize));
1314 
1315   /* Try to get a chunk that satisfies request, while avoiding
1316      fragmentation that can't be handled. */
1317   {
1318     ret =  dictionary()->getChunk(currSize);
1319     if (ret != NULL) {
1320       assert(ret->size() - numWords >= MinChunkSize,
1321              "Chunk is too small");
1322       _bt.allocated((HeapWord*)ret, ret->size());
1323       /* Carve returned chunk. */
1324       (void) splitChunkAndReturnRemainder(ret, numWords);
1325       /* Label this as no longer a free chunk. */
1326       assert(ret->isFree(), "This chunk should be free");
1327       ret->linkPrev(NULL);
1328     }
1329     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1330     return ret;
1331   }
1332   ShouldNotReachHere();
1333 }
1334 
1335 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc)
1336   const {
1337   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1338   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1339 }
1340 
1341 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1342   if (fc->size() >= IndexSetSize) {
1343     return dictionary()->verifyChunkInFreeLists(fc);
1344   } else {
1345     return verifyChunkInIndexedFreeLists(fc);
1346   }
1347 }
1348 
1349 #ifndef PRODUCT
1350 void CompactibleFreeListSpace::assert_locked() const {
1351   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1352 }
1353 
1354 void CompactibleFreeListSpace::assert_locked(const Mutex* lock) const {
1355   CMSLockVerifier::assert_locked(lock);
1356 }
1357 #endif
1358 
1359 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1360   // In the parallel case, the main thread holds the free list lock
1361   // on behalf the parallel threads.
1362   FreeChunk* fc;
1363   {
1364     // If GC is parallel, this might be called by several threads.
1365     // This should be rare enough that the locking overhead won't affect
1366     // the sequential code.
1367     MutexLockerEx x(parDictionaryAllocLock(),
1368                     Mutex::_no_safepoint_check_flag);
1369     fc = getChunkFromDictionary(size);
1370   }
1371   if (fc != NULL) {
1372     fc->dontCoalesce();
1373     assert(fc->isFree(), "Should be free, but not coalescable");
1374     // Verify that the block offset table shows this to
1375     // be a single block, but not one which is unallocated.
1376     _bt.verify_single_block((HeapWord*)fc, fc->size());
1377     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1378   }
1379   return fc;
1380 }
1381 
1382 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1383   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1384   assert_locked();
1385 
1386   // if we are tracking promotions, then first ensure space for
1387   // promotion (including spooling space for saving header if necessary).
1388   // then allocate and copy, then track promoted info if needed.
1389   // When tracking (see PromotionInfo::track()), the mark word may
1390   // be displaced and in this case restoration of the mark word
1391   // occurs in the (oop_since_save_marks_)iterate phase.
1392   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1393     return NULL;
1394   }
1395   // Call the allocate(size_t, bool) form directly to avoid the
1396   // additional call through the allocate(size_t) form.  Having
1397   // the compile inline the call is problematic because allocate(size_t)
1398   // is a virtual method.
1399   HeapWord* res = allocate(adjustObjectSize(obj_size));
1400   if (res != NULL) {
1401     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1402     // if we should be tracking promotions, do so.
1403     if (_promoInfo.tracking()) {
1404         _promoInfo.track((PromotedObject*)res);
1405     }
1406   }
1407   return oop(res);
1408 }
1409 
1410 HeapWord*
1411 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1412   assert_locked();
1413   assert(size >= MinChunkSize, "minimum chunk size");
1414   assert(size <  _smallLinearAllocBlock._allocation_size_limit,
1415     "maximum from smallLinearAllocBlock");
1416   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1417 }
1418 
1419 HeapWord*
1420 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1421                                                        size_t size) {
1422   assert_locked();
1423   assert(size >= MinChunkSize, "too small");
1424   HeapWord* res = NULL;
1425   // Try to do linear allocation from blk, making sure that
1426   if (blk->_word_size == 0) {
1427     // We have probably been unable to fill this either in the prologue or
1428     // when it was exhausted at the last linear allocation. Bail out until
1429     // next time.
1430     assert(blk->_ptr == NULL, "consistency check");
1431     return NULL;
1432   }
1433   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1434   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1435   if (res != NULL) return res;
1436 
1437   // about to exhaust this linear allocation block
1438   if (blk->_word_size == size) { // exactly satisfied
1439     res = blk->_ptr;
1440     _bt.allocated(res, blk->_word_size);
1441   } else if (size + MinChunkSize <= blk->_refillSize) {
1442     size_t sz = blk->_word_size;
1443     // Update _unallocated_block if the size is such that chunk would be
1444     // returned to the indexed free list.  All other chunks in the indexed
1445     // free lists are allocated from the dictionary so that _unallocated_block
1446     // has already been adjusted for them.  Do it here so that the cost
1447     // for all chunks added back to the indexed free lists.
1448     if (sz < SmallForDictionary) {
1449       _bt.allocated(blk->_ptr, sz);
1450     }
1451     // Return the chunk that isn't big enough, and then refill below.
1452     addChunkToFreeLists(blk->_ptr, sz);
1453     splitBirth(sz);
1454     // Don't keep statistics on adding back chunk from a LinAB.
1455   } else {
1456     // A refilled block would not satisfy the request.
1457     return NULL;
1458   }
1459 
1460   blk->_ptr = NULL; blk->_word_size = 0;
1461   refillLinearAllocBlock(blk);
1462   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1463          "block was replenished");
1464   if (res != NULL) {
1465     splitBirth(size);
1466     repairLinearAllocBlock(blk);
1467   } else if (blk->_ptr != NULL) {
1468     res = blk->_ptr;
1469     size_t blk_size = blk->_word_size;
1470     blk->_word_size -= size;
1471     blk->_ptr  += size;
1472     splitBirth(size);
1473     repairLinearAllocBlock(blk);
1474     // Update BOT last so that other (parallel) GC threads see a consistent
1475     // view of the BOT and free blocks.
1476     // Above must occur before BOT is updated below.
1477     OrderAccess::storestore();
1478     _bt.split_block(res, blk_size, size);  // adjust block offset table
1479   }
1480   return res;
1481 }
1482 
1483 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1484                                         LinearAllocBlock* blk,
1485                                         size_t size) {
1486   assert_locked();
1487   assert(size >= MinChunkSize, "too small");
1488 
1489   HeapWord* res = NULL;
1490   // This is the common case.  Keep it simple.
1491   if (blk->_word_size >= size + MinChunkSize) {
1492     assert(blk->_ptr != NULL, "consistency check");
1493     res = blk->_ptr;
1494     // Note that the BOT is up-to-date for the linAB before allocation.  It
1495     // indicates the start of the linAB.  The split_block() updates the
1496     // BOT for the linAB after the allocation (indicates the start of the
1497     // next chunk to be allocated).
1498     size_t blk_size = blk->_word_size;
1499     blk->_word_size -= size;
1500     blk->_ptr  += size;
1501     splitBirth(size);
1502     repairLinearAllocBlock(blk);
1503     // Update BOT last so that other (parallel) GC threads see a consistent
1504     // view of the BOT and free blocks.
1505     // Above must occur before BOT is updated below.
1506     OrderAccess::storestore();
1507     _bt.split_block(res, blk_size, size);  // adjust block offset table
1508     _bt.allocated(res, size);
1509   }
1510   return res;
1511 }
1512 
1513 FreeChunk*
1514 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1515   assert_locked();
1516   assert(size < SmallForDictionary, "just checking");
1517   FreeChunk* res;
1518   res = _indexedFreeList[size].getChunkAtHead();
1519   if (res == NULL) {
1520     res = getChunkFromIndexedFreeListHelper(size);
1521   }
1522   _bt.verify_not_unallocated((HeapWord*) res, size);
1523   assert(res == NULL || res->size() == size, "Incorrect block size");
1524   return res;
1525 }
1526 
1527 FreeChunk*
1528 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size,
1529   bool replenish) {
1530   assert_locked();
1531   FreeChunk* fc = NULL;
1532   if (size < SmallForDictionary) {
1533     assert(_indexedFreeList[size].head() == NULL ||
1534       _indexedFreeList[size].surplus() <= 0,
1535       "List for this size should be empty or under populated");
1536     // Try best fit in exact lists before replenishing the list
1537     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1538       // Replenish list.
1539       //
1540       // Things tried that failed.
1541       //   Tried allocating out of the two LinAB's first before
1542       // replenishing lists.
1543       //   Tried small linAB of size 256 (size in indexed list)
1544       // and replenishing indexed lists from the small linAB.
1545       //
1546       FreeChunk* newFc = NULL;
1547       const size_t replenish_size = CMSIndexedFreeListReplenish * size;
1548       if (replenish_size < SmallForDictionary) {
1549         // Do not replenish from an underpopulated size.
1550         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1551             _indexedFreeList[replenish_size].head() != NULL) {
1552           newFc = _indexedFreeList[replenish_size].getChunkAtHead();
1553         } else if (bestFitFirst()) {
1554           newFc = bestFitSmall(replenish_size);
1555         }
1556       }
1557       if (newFc == NULL && replenish_size > size) {
1558         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1559         newFc = getChunkFromIndexedFreeListHelper(replenish_size, false);
1560       }
1561       // Note: The stats update re split-death of block obtained above
1562       // will be recorded below precisely when we know we are going to
1563       // be actually splitting it into more than one pieces below.
1564       if (newFc != NULL) {
1565         if  (replenish || CMSReplenishIntermediate) {
1566           // Replenish this list and return one block to caller.
1567           size_t i;
1568           FreeChunk *curFc, *nextFc;
1569           size_t num_blk = newFc->size() / size;
1570           assert(num_blk >= 1, "Smaller than requested?");
1571           assert(newFc->size() % size == 0, "Should be integral multiple of request");
1572           if (num_blk > 1) {
1573             // we are sure we will be splitting the block just obtained
1574             // into multiple pieces; record the split-death of the original
1575             splitDeath(replenish_size);
1576           }
1577           // carve up and link blocks 0, ..., num_blk - 2
1578           // The last chunk is not added to the lists but is returned as the
1579           // free chunk.
1580           for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size),
1581                i = 0;
1582                i < (num_blk - 1);
1583                curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size),
1584                i++) {
1585             curFc->setSize(size);
1586             // Don't record this as a return in order to try and
1587             // determine the "returns" from a GC.
1588             _bt.verify_not_unallocated((HeapWord*) fc, size);
1589             _indexedFreeList[size].returnChunkAtTail(curFc, false);
1590             _bt.mark_block((HeapWord*)curFc, size);
1591             splitBirth(size);
1592             // Don't record the initial population of the indexed list
1593             // as a split birth.
1594           }
1595 
1596           // check that the arithmetic was OK above
1597           assert((HeapWord*)nextFc == (HeapWord*)newFc + num_blk*size,
1598             "inconsistency in carving newFc");
1599           curFc->setSize(size);
1600           _bt.mark_block((HeapWord*)curFc, size);
1601           splitBirth(size);
1602           fc = curFc;
1603         } else {
1604           // Return entire block to caller
1605           fc = newFc;
1606         }
1607       }
1608     }
1609   } else {
1610     // Get a free chunk from the free chunk dictionary to be returned to
1611     // replenish the indexed free list.
1612     fc = getChunkFromDictionaryExact(size);
1613   }
1614   // assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1615   return fc;
1616 }
1617 
1618 FreeChunk*
1619 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1620   assert_locked();
1621   FreeChunk* fc = _dictionary->getChunk(size);
1622   if (fc == NULL) {
1623     return NULL;
1624   }
1625   _bt.allocated((HeapWord*)fc, fc->size());
1626   if (fc->size() >= size + MinChunkSize) {
1627     fc = splitChunkAndReturnRemainder(fc, size);
1628   }
1629   assert(fc->size() >= size, "chunk too small");
1630   assert(fc->size() < size + MinChunkSize, "chunk too big");
1631   _bt.verify_single_block((HeapWord*)fc, fc->size());
1632   return fc;
1633 }
1634 
1635 FreeChunk*
1636 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1637   assert_locked();
1638   FreeChunk* fc = _dictionary->getChunk(size);
1639   if (fc == NULL) {
1640     return fc;
1641   }
1642   _bt.allocated((HeapWord*)fc, fc->size());
1643   if (fc->size() == size) {
1644     _bt.verify_single_block((HeapWord*)fc, size);
1645     return fc;
1646   }
1647   assert(fc->size() > size, "getChunk() guarantee");
1648   if (fc->size() < size + MinChunkSize) {
1649     // Return the chunk to the dictionary and go get a bigger one.
1650     returnChunkToDictionary(fc);
1651     fc = _dictionary->getChunk(size + MinChunkSize);
1652     if (fc == NULL) {
1653       return NULL;
1654     }
1655     _bt.allocated((HeapWord*)fc, fc->size());
1656   }
1657   assert(fc->size() >= size + MinChunkSize, "tautology");
1658   fc = splitChunkAndReturnRemainder(fc, size);
1659   assert(fc->size() == size, "chunk is wrong size");
1660   _bt.verify_single_block((HeapWord*)fc, size);
1661   return fc;
1662 }
1663 
1664 void
1665 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1666   assert_locked();
1667 
1668   size_t size = chunk->size();
1669   _bt.verify_single_block((HeapWord*)chunk, size);
1670   // adjust _unallocated_block downward, as necessary
1671   _bt.freed((HeapWord*)chunk, size);
1672   _dictionary->returnChunk(chunk);
1673 #ifndef PRODUCT
1674   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1675     TreeChunk::as_TreeChunk(chunk)->list()->verify_stats();
1676   }
1677 #endif // PRODUCT
1678 }
1679 
1680 void
1681 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1682   assert_locked();
1683   size_t size = fc->size();
1684   _bt.verify_single_block((HeapWord*) fc, size);
1685   _bt.verify_not_unallocated((HeapWord*) fc, size);
1686   if (_adaptive_freelists) {
1687     _indexedFreeList[size].returnChunkAtTail(fc);
1688   } else {
1689     _indexedFreeList[size].returnChunkAtHead(fc);
1690   }
1691 #ifndef PRODUCT
1692   if (CMSCollector::abstract_state() != CMSCollector::Sweeping) {
1693      _indexedFreeList[size].verify_stats();
1694   }
1695 #endif // PRODUCT
1696 }
1697 
1698 // Add chunk to end of last block -- if it's the largest
1699 // block -- and update BOT and census data. We would
1700 // of course have preferred to coalesce it with the
1701 // last block, but it's currently less expensive to find the
1702 // largest block than it is to find the last.
1703 void
1704 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1705   HeapWord* chunk, size_t     size) {
1706   // check that the chunk does lie in this space!
1707   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1708   // One of the parallel gc task threads may be here
1709   // whilst others are allocating.
1710   Mutex* lock = NULL;
1711   if (ParallelGCThreads != 0) {
1712     lock = &_parDictionaryAllocLock;
1713   }
1714   FreeChunk* ec;
1715   {
1716     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1717     ec = dictionary()->findLargestDict();  // get largest block
1718     if (ec != NULL && ec->end() == chunk) {
1719       // It's a coterminal block - we can coalesce.
1720       size_t old_size = ec->size();
1721       coalDeath(old_size);
1722       removeChunkFromDictionary(ec);
1723       size += old_size;
1724     } else {
1725       ec = (FreeChunk*)chunk;
1726     }
1727   }
1728   ec->setSize(size);
1729   debug_only(ec->mangleFreed(size));
1730   if (size < SmallForDictionary) {
1731     lock = _indexedFreeListParLocks[size];
1732   }
1733   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1734   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1735   // record the birth under the lock since the recording involves
1736   // manipulation of the list on which the chunk lives and
1737   // if the chunk is allocated and is the last on the list,
1738   // the list can go away.
1739   coalBirth(size);
1740 }
1741 
1742 void
1743 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1744                                               size_t     size) {
1745   // check that the chunk does lie in this space!
1746   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1747   assert_locked();
1748   _bt.verify_single_block(chunk, size);
1749 
1750   FreeChunk* fc = (FreeChunk*) chunk;
1751   fc->setSize(size);
1752   debug_only(fc->mangleFreed(size));
1753   if (size < SmallForDictionary) {
1754     returnChunkToFreeList(fc);
1755   } else {
1756     returnChunkToDictionary(fc);
1757   }
1758 }
1759 
1760 void
1761 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1762   size_t size, bool coalesced) {
1763   assert_locked();
1764   assert(chunk != NULL, "null chunk");
1765   if (coalesced) {
1766     // repair BOT
1767     _bt.single_block(chunk, size);
1768   }
1769   addChunkToFreeLists(chunk, size);
1770 }
1771 
1772 // We _must_ find the purported chunk on our free lists;
1773 // we assert if we don't.
1774 void
1775 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1776   size_t size = fc->size();
1777   assert_locked();
1778   debug_only(verifyFreeLists());
1779   if (size < SmallForDictionary) {
1780     removeChunkFromIndexedFreeList(fc);
1781   } else {
1782     removeChunkFromDictionary(fc);
1783   }
1784   _bt.verify_single_block((HeapWord*)fc, size);
1785   debug_only(verifyFreeLists());
1786 }
1787 
1788 void
1789 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1790   size_t size = fc->size();
1791   assert_locked();
1792   assert(fc != NULL, "null chunk");
1793   _bt.verify_single_block((HeapWord*)fc, size);
1794   _dictionary->removeChunk(fc);
1795   // adjust _unallocated_block upward, as necessary
1796   _bt.allocated((HeapWord*)fc, size);
1797 }
1798 
1799 void
1800 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1801   assert_locked();
1802   size_t size = fc->size();
1803   _bt.verify_single_block((HeapWord*)fc, size);
1804   NOT_PRODUCT(
1805     if (FLSVerifyIndexTable) {
1806       verifyIndexedFreeList(size);
1807     }
1808   )
1809   _indexedFreeList[size].removeChunk(fc);
1810   debug_only(fc->clearNext());
1811   debug_only(fc->clearPrev());
1812   NOT_PRODUCT(
1813     if (FLSVerifyIndexTable) {
1814       verifyIndexedFreeList(size);
1815     }
1816   )
1817 }
1818 
1819 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1820   /* A hint is the next larger size that has a surplus.
1821      Start search at a size large enough to guarantee that
1822      the excess is >= MIN_CHUNK. */
1823   size_t start = align_object_size(numWords + MinChunkSize);
1824   if (start < IndexSetSize) {
1825     FreeList* it   = _indexedFreeList;
1826     size_t    hint = _indexedFreeList[start].hint();
1827     while (hint < IndexSetSize) {
1828       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1829       FreeList *fl = &_indexedFreeList[hint];
1830       if (fl->surplus() > 0 && fl->head() != NULL) {
1831         // Found a list with surplus, reset original hint
1832         // and split out a free chunk which is returned.
1833         _indexedFreeList[start].set_hint(hint);
1834         FreeChunk* res = getFromListGreater(fl, numWords);
1835         assert(res == NULL || res->isFree(),
1836           "Should be returning a free chunk");
1837         return res;
1838       }
1839       hint = fl->hint(); /* keep looking */
1840     }
1841     /* None found. */
1842     it[start].set_hint(IndexSetSize);
1843   }
1844   return NULL;
1845 }
1846 
1847 /* Requires fl->size >= numWords + MinChunkSize */
1848 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1849   size_t numWords) {
1850   FreeChunk *curr = fl->head();
1851   size_t oldNumWords = curr->size();
1852   assert(numWords >= MinChunkSize, "Word size is too small");
1853   assert(curr != NULL, "List is empty");
1854   assert(oldNumWords >= numWords + MinChunkSize,
1855         "Size of chunks in the list is too small");
1856 
1857   fl->removeChunk(curr);
1858   // recorded indirectly by splitChunkAndReturnRemainder -
1859   // smallSplit(oldNumWords, numWords);
1860   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1861   // Does anything have to be done for the remainder in terms of
1862   // fixing the card table?
1863   assert(new_chunk == NULL || new_chunk->isFree(),
1864     "Should be returning a free chunk");
1865   return new_chunk;
1866 }
1867 
1868 FreeChunk*
1869 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1870   size_t new_size) {
1871   assert_locked();
1872   size_t size = chunk->size();
1873   assert(size > new_size, "Split from a smaller block?");
1874   assert(is_aligned(chunk), "alignment problem");
1875   assert(size == adjustObjectSize(size), "alignment problem");
1876   size_t rem_size = size - new_size;
1877   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1878   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1879   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1880   assert(is_aligned(ffc), "alignment problem");
1881   ffc->setSize(rem_size);
1882   ffc->linkNext(NULL);
1883   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1884   // Above must occur before BOT is updated below.
1885   // adjust block offset table
1886   OrderAccess::storestore();
1887   assert(chunk->isFree() && ffc->isFree(), "Error");
1888   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1889   if (rem_size < SmallForDictionary) {
1890     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1891     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1892     returnChunkToFreeList(ffc);
1893     split(size, rem_size);
1894     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1895   } else {
1896     returnChunkToDictionary(ffc);
1897     split(size ,rem_size);
1898   }
1899   chunk->setSize(new_size);
1900   return chunk;
1901 }
1902 
1903 void
1904 CompactibleFreeListSpace::sweep_completed() {
1905   // Now that space is probably plentiful, refill linear
1906   // allocation blocks as needed.
1907   refillLinearAllocBlocksIfNeeded();
1908 }
1909 
1910 void
1911 CompactibleFreeListSpace::gc_prologue() {
1912   assert_locked();
1913   if (PrintFLSStatistics != 0) {
1914     gclog_or_tty->print("Before GC:\n");
1915     reportFreeListStatistics();
1916   }
1917   refillLinearAllocBlocksIfNeeded();
1918 }
1919 
1920 void
1921 CompactibleFreeListSpace::gc_epilogue() {
1922   assert_locked();
1923   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1924     if (_smallLinearAllocBlock._word_size == 0)
1925       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1926   }
1927   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1928   _promoInfo.stopTrackingPromotions();
1929   repairLinearAllocationBlocks();
1930   // Print Space's stats
1931   if (PrintFLSStatistics != 0) {
1932     gclog_or_tty->print("After GC:\n");
1933     reportFreeListStatistics();
1934   }
1935 }
1936 
1937 // Iteration support, mostly delegated from a CMS generation
1938 
1939 void CompactibleFreeListSpace::save_marks() {
1940   // mark the "end" of the used space at the time of this call;
1941   // note, however, that promoted objects from this point
1942   // on are tracked in the _promoInfo below.
1943   set_saved_mark_word(unallocated_block());
1944   // inform allocator that promotions should be tracked.
1945   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1946   _promoInfo.startTrackingPromotions();
1947 }
1948 
1949 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1950   assert(_promoInfo.tracking(), "No preceding save_marks?");
1951   assert(SharedHeap::heap()->n_par_threads() == 0,
1952          "Shouldn't be called if using parallel gc.");
1953   return _promoInfo.noPromotions();
1954 }
1955 
1956 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1957                                                                             \
1958 void CompactibleFreeListSpace::                                             \
1959 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1960   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1961          "Shouldn't be called (yet) during parallel part of gc.");          \
1962   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1963   /*                                                                        \
1964    * This also restores any displaced headers and removes the elements from \
1965    * the iteration set as they are processed, so that we have a clean slate \
1966    * at the end of the iteration. Note, thus, that if new objects are       \
1967    * promoted as a result of the iteration they are iterated over as well.  \
1968    */                                                                       \
1969   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1970 }
1971 
1972 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1973 
1974 
1975 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1976   // ugghh... how would one do this efficiently for a non-contiguous space?
1977   guarantee(false, "NYI");
1978 }
1979 
1980 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1981   return _smallLinearAllocBlock._word_size == 0;
1982 }
1983 
1984 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1985   // Fix up linear allocation blocks to look like free blocks
1986   repairLinearAllocBlock(&_smallLinearAllocBlock);
1987 }
1988 
1989 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1990   assert_locked();
1991   if (blk->_ptr != NULL) {
1992     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1993            "Minimum block size requirement");
1994     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1995     fc->setSize(blk->_word_size);
1996     fc->linkPrev(NULL);   // mark as free
1997     fc->dontCoalesce();
1998     assert(fc->isFree(), "just marked it free");
1999     assert(fc->cantCoalesce(), "just marked it uncoalescable");
2000   }
2001 }
2002 
2003 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
2004   assert_locked();
2005   if (_smallLinearAllocBlock._ptr == NULL) {
2006     assert(_smallLinearAllocBlock._word_size == 0,
2007       "Size of linAB should be zero if the ptr is NULL");
2008     // Reset the linAB refill and allocation size limit.
2009     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
2010   }
2011   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
2012 }
2013 
2014 void
2015 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
2016   assert_locked();
2017   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
2018          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
2019          "blk invariant");
2020   if (blk->_ptr == NULL) {
2021     refillLinearAllocBlock(blk);
2022   }
2023   if (PrintMiscellaneous && Verbose) {
2024     if (blk->_word_size == 0) {
2025       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
2026     }
2027   }
2028 }
2029 
2030 void
2031 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
2032   assert_locked();
2033   assert(blk->_word_size == 0 && blk->_ptr == NULL,
2034          "linear allocation block should be empty");
2035   FreeChunk* fc;
2036   if (blk->_refillSize < SmallForDictionary &&
2037       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
2038     // A linAB's strategy might be to use small sizes to reduce
2039     // fragmentation but still get the benefits of allocation from a
2040     // linAB.
2041   } else {
2042     fc = getChunkFromDictionary(blk->_refillSize);
2043   }
2044   if (fc != NULL) {
2045     blk->_ptr  = (HeapWord*)fc;
2046     blk->_word_size = fc->size();
2047     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
2048   }
2049 }
2050 
2051 // Support for concurrent collection policy decisions.
2052 bool CompactibleFreeListSpace::should_concurrent_collect() const {
2053   // In the future we might want to add in frgamentation stats --
2054   // including erosion of the "mountain" into this decision as well.
2055   return !adaptive_freelists() && linearAllocationWouldFail();
2056 }
2057 
2058 // Support for compaction
2059 
2060 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
2061   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
2062   // prepare_for_compaction() uses the space between live objects
2063   // so that later phase can skip dead space quickly.  So verification
2064   // of the free lists doesn't work after.
2065 }
2066 
2067 #define obj_size(q) adjustObjectSize(oop(q)->size())
2068 #define adjust_obj_size(s) adjustObjectSize(s)
2069 
2070 void CompactibleFreeListSpace::adjust_pointers() {
2071   // In other versions of adjust_pointers(), a bail out
2072   // based on the amount of live data in the generation
2073   // (i.e., if 0, bail out) may be used.
2074   // Cannot test used() == 0 here because the free lists have already
2075   // been mangled by the compaction.
2076 
2077   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
2078   // See note about verification in prepare_for_compaction().
2079 }
2080 
2081 void CompactibleFreeListSpace::compact() {
2082   SCAN_AND_COMPACT(obj_size);
2083 }
2084 
2085 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
2086 // where fbs is free block sizes
2087 double CompactibleFreeListSpace::flsFrag() const {
2088   size_t itabFree = totalSizeInIndexedFreeLists();
2089   double frag = 0.0;
2090   size_t i;
2091 
2092   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2093     double sz  = i;
2094     frag      += _indexedFreeList[i].count() * (sz * sz);
2095   }
2096 
2097   double totFree = itabFree +
2098                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
2099   if (totFree > 0) {
2100     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) /
2101             (totFree * totFree));
2102     frag = (double)1.0  - frag;
2103   } else {
2104     assert(frag == 0.0, "Follows from totFree == 0");
2105   }
2106   return frag;
2107 }
2108 
2109 void CompactibleFreeListSpace::beginSweepFLCensus(
2110   float inter_sweep_current,
2111   float inter_sweep_estimate,
2112   float intra_sweep_estimate) {
2113   assert_locked();
2114   size_t i;
2115   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2116     FreeList* fl    = &_indexedFreeList[i];
2117     if (PrintFLSStatistics > 1) {
2118       gclog_or_tty->print("size[%d] : ", i);
2119     }
2120     fl->compute_desired(inter_sweep_current, inter_sweep_estimate, intra_sweep_estimate);
2121     fl->set_coalDesired((ssize_t)((double)fl->desired() * CMSSmallCoalSurplusPercent));
2122     fl->set_beforeSweep(fl->count());
2123     fl->set_bfrSurp(fl->surplus());
2124   }
2125   _dictionary->beginSweepDictCensus(CMSLargeCoalSurplusPercent,
2126                                     inter_sweep_current,
2127                                     inter_sweep_estimate,
2128                                     intra_sweep_estimate);
2129 }
2130 
2131 void CompactibleFreeListSpace::setFLSurplus() {
2132   assert_locked();
2133   size_t i;
2134   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2135     FreeList *fl = &_indexedFreeList[i];
2136     fl->set_surplus(fl->count() -
2137                     (ssize_t)((double)fl->desired() * CMSSmallSplitSurplusPercent));
2138   }
2139 }
2140 
2141 void CompactibleFreeListSpace::setFLHints() {
2142   assert_locked();
2143   size_t i;
2144   size_t h = IndexSetSize;
2145   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2146     FreeList *fl = &_indexedFreeList[i];
2147     fl->set_hint(h);
2148     if (fl->surplus() > 0) {
2149       h = i;
2150     }
2151   }
2152 }
2153 
2154 void CompactibleFreeListSpace::clearFLCensus() {
2155   assert_locked();
2156   int i;
2157   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2158     FreeList *fl = &_indexedFreeList[i];
2159     fl->set_prevSweep(fl->count());
2160     fl->set_coalBirths(0);
2161     fl->set_coalDeaths(0);
2162     fl->set_splitBirths(0);
2163     fl->set_splitDeaths(0);
2164   }
2165 }
2166 
2167 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2168   if (PrintFLSStatistics > 0) {
2169     HeapWord* largestAddr = (HeapWord*) dictionary()->findLargestDict();
2170     gclog_or_tty->print_cr("CMS: Large block " PTR_FORMAT,
2171                            largestAddr);
2172   }
2173   setFLSurplus();
2174   setFLHints();
2175   if (PrintGC && PrintFLSCensus > 0) {
2176     printFLCensus(sweep_count);
2177   }
2178   clearFLCensus();
2179   assert_locked();
2180   _dictionary->endSweepDictCensus(CMSLargeSplitSurplusPercent);
2181 }
2182 
2183 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2184   if (size < SmallForDictionary) {
2185     FreeList *fl = &_indexedFreeList[size];
2186     return (fl->coalDesired() < 0) ||
2187            ((int)fl->count() > fl->coalDesired());
2188   } else {
2189     return dictionary()->coalDictOverPopulated(size);
2190   }
2191 }
2192 
2193 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2194   assert(size < SmallForDictionary, "Size too large for indexed list");
2195   FreeList *fl = &_indexedFreeList[size];
2196   fl->increment_coalBirths();
2197   fl->increment_surplus();
2198 }
2199 
2200 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2201   assert(size < SmallForDictionary, "Size too large for indexed list");
2202   FreeList *fl = &_indexedFreeList[size];
2203   fl->increment_coalDeaths();
2204   fl->decrement_surplus();
2205 }
2206 
2207 void CompactibleFreeListSpace::coalBirth(size_t size) {
2208   if (size  < SmallForDictionary) {
2209     smallCoalBirth(size);
2210   } else {
2211     dictionary()->dictCensusUpdate(size,
2212                                    false /* split */,
2213                                    true /* birth */);
2214   }
2215 }
2216 
2217 void CompactibleFreeListSpace::coalDeath(size_t size) {
2218   if(size  < SmallForDictionary) {
2219     smallCoalDeath(size);
2220   } else {
2221     dictionary()->dictCensusUpdate(size,
2222                                    false /* split */,
2223                                    false /* birth */);
2224   }
2225 }
2226 
2227 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2228   assert(size < SmallForDictionary, "Size too large for indexed list");
2229   FreeList *fl = &_indexedFreeList[size];
2230   fl->increment_splitBirths();
2231   fl->increment_surplus();
2232 }
2233 
2234 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2235   assert(size < SmallForDictionary, "Size too large for indexed list");
2236   FreeList *fl = &_indexedFreeList[size];
2237   fl->increment_splitDeaths();
2238   fl->decrement_surplus();
2239 }
2240 
2241 void CompactibleFreeListSpace::splitBirth(size_t size) {
2242   if (size  < SmallForDictionary) {
2243     smallSplitBirth(size);
2244   } else {
2245     dictionary()->dictCensusUpdate(size,
2246                                    true /* split */,
2247                                    true /* birth */);
2248   }
2249 }
2250 
2251 void CompactibleFreeListSpace::splitDeath(size_t size) {
2252   if (size  < SmallForDictionary) {
2253     smallSplitDeath(size);
2254   } else {
2255     dictionary()->dictCensusUpdate(size,
2256                                    true /* split */,
2257                                    false /* birth */);
2258   }
2259 }
2260 
2261 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2262   size_t to2 = from - to1;
2263   splitDeath(from);
2264   splitBirth(to1);
2265   splitBirth(to2);
2266 }
2267 
2268 void CompactibleFreeListSpace::print() const {
2269   Space::print_on(tty);
2270 }
2271 
2272 void CompactibleFreeListSpace::prepare_for_verify() {
2273   assert_locked();
2274   repairLinearAllocationBlocks();
2275   // Verify that the SpoolBlocks look like free blocks of
2276   // appropriate sizes... To be done ...
2277 }
2278 
2279 class VerifyAllBlksClosure: public BlkClosure {
2280  private:
2281   const CompactibleFreeListSpace* _sp;
2282   const MemRegion                 _span;
2283   HeapWord*                       _last_addr;
2284   size_t                          _last_size;
2285   bool                            _last_was_obj;
2286   bool                            _last_was_live;
2287 
2288  public:
2289   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2290     MemRegion span) :  _sp(sp), _span(span),
2291                        _last_addr(NULL), _last_size(0),
2292                        _last_was_obj(false), _last_was_live(false) { }
2293 
2294   virtual size_t do_blk(HeapWord* addr) {
2295     size_t res;
2296     bool   was_obj  = false;
2297     bool   was_live = false;
2298     if (_sp->block_is_obj(addr)) {
2299       was_obj = true;
2300       oop p = oop(addr);
2301       guarantee(p->is_oop(), "Should be an oop");
2302       res = _sp->adjustObjectSize(p->size());
2303       if (_sp->obj_is_alive(addr)) {
2304         was_live = true;
2305         p->verify();
2306       }
2307     } else {
2308       FreeChunk* fc = (FreeChunk*)addr;
2309       res = fc->size();
2310       if (FLSVerifyLists && !fc->cantCoalesce()) {
2311         guarantee(_sp->verifyChunkInFreeLists(fc),
2312                   "Chunk should be on a free list");
2313       }
2314     }
2315     if (res == 0) {
2316       gclog_or_tty->print_cr("Livelock: no rank reduction!");
2317       gclog_or_tty->print_cr(
2318         " Current:  addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n"
2319         " Previous: addr = " PTR_FORMAT ", size = " SIZE_FORMAT ", obj = %s, live = %s \n",
2320         addr,       res,        was_obj      ?"true":"false", was_live      ?"true":"false",
2321         _last_addr, _last_size, _last_was_obj?"true":"false", _last_was_live?"true":"false");
2322       _sp->print_on(gclog_or_tty);
2323       guarantee(false, "Seppuku!");
2324     }
2325     _last_addr = addr;
2326     _last_size = res;
2327     _last_was_obj  = was_obj;
2328     _last_was_live = was_live;
2329     return res;
2330   }
2331 };
2332 
2333 class VerifyAllOopsClosure: public OopClosure {
2334  private:
2335   const CMSCollector*             _collector;
2336   const CompactibleFreeListSpace* _sp;
2337   const MemRegion                 _span;
2338   const bool                      _past_remark;
2339   const CMSBitMap*                _bit_map;
2340 
2341  protected:
2342   void do_oop(void* p, oop obj) {
2343     if (_span.contains(obj)) { // the interior oop points into CMS heap
2344       if (!_span.contains(p)) { // reference from outside CMS heap
2345         // Should be a valid object; the first disjunct below allows
2346         // us to sidestep an assertion in block_is_obj() that insists
2347         // that p be in _sp. Note that several generations (and spaces)
2348         // are spanned by _span (CMS heap) above.
2349         guarantee(!_sp->is_in_reserved(obj) ||
2350                   _sp->block_is_obj((HeapWord*)obj),
2351                   "Should be an object");
2352         guarantee(obj->is_oop(), "Should be an oop");
2353         obj->verify();
2354         if (_past_remark) {
2355           // Remark has been completed, the object should be marked
2356           _bit_map->isMarked((HeapWord*)obj);
2357         }
2358       } else { // reference within CMS heap
2359         if (_past_remark) {
2360           // Remark has been completed -- so the referent should have
2361           // been marked, if referring object is.
2362           if (_bit_map->isMarked(_collector->block_start(p))) {
2363             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2364           }
2365         }
2366       }
2367     } else if (_sp->is_in_reserved(p)) {
2368       // the reference is from FLS, and points out of FLS
2369       guarantee(obj->is_oop(), "Should be an oop");
2370       obj->verify();
2371     }
2372   }
2373 
2374   template <class T> void do_oop_work(T* p) {
2375     T heap_oop = oopDesc::load_heap_oop(p);
2376     if (!oopDesc::is_null(heap_oop)) {
2377       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2378       do_oop(p, obj);
2379     }
2380   }
2381 
2382  public:
2383   VerifyAllOopsClosure(const CMSCollector* collector,
2384     const CompactibleFreeListSpace* sp, MemRegion span,
2385     bool past_remark, CMSBitMap* bit_map) :
2386     OopClosure(), _collector(collector), _sp(sp), _span(span),
2387     _past_remark(past_remark), _bit_map(bit_map) { }
2388 
2389   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2390   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2391 };
2392 
2393 void CompactibleFreeListSpace::verify(bool ignored) const {
2394   assert_lock_strong(&_freelistLock);
2395   verify_objects_initialized();
2396   MemRegion span = _collector->_span;
2397   bool past_remark = (_collector->abstract_state() ==
2398                       CMSCollector::Sweeping);
2399 
2400   ResourceMark rm;
2401   HandleMark  hm;
2402 
2403   // Check integrity of CFL data structures
2404   _promoInfo.verify();
2405   _dictionary->verify();
2406   if (FLSVerifyIndexTable) {
2407     verifyIndexedFreeLists();
2408   }
2409   // Check integrity of all objects and free blocks in space
2410   {
2411     VerifyAllBlksClosure cl(this, span);
2412     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2413   }
2414   // Check that all references in the heap to FLS
2415   // are to valid objects in FLS or that references in
2416   // FLS are to valid objects elsewhere in the heap
2417   if (FLSVerifyAllHeapReferences)
2418   {
2419     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2420       _collector->markBitMap());
2421     CollectedHeap* ch = Universe::heap();
2422     ch->oop_iterate(&cl);              // all oops in generations
2423     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2424   }
2425 
2426   if (VerifyObjectStartArray) {
2427     // Verify the block offset table
2428     _bt.verify();
2429   }
2430 }
2431 
2432 #ifndef PRODUCT
2433 void CompactibleFreeListSpace::verifyFreeLists() const {
2434   if (FLSVerifyLists) {
2435     _dictionary->verify();
2436     verifyIndexedFreeLists();
2437   } else {
2438     if (FLSVerifyDictionary) {
2439       _dictionary->verify();
2440     }
2441     if (FLSVerifyIndexTable) {
2442       verifyIndexedFreeLists();
2443     }
2444   }
2445 }
2446 #endif
2447 
2448 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2449   size_t i = 0;
2450   for (; i < MinChunkSize; i++) {
2451     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2452   }
2453   for (; i < IndexSetSize; i++) {
2454     verifyIndexedFreeList(i);
2455   }
2456 }
2457 
2458 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2459   FreeChunk* fc   =  _indexedFreeList[size].head();
2460   FreeChunk* tail =  _indexedFreeList[size].tail();
2461   size_t    num = _indexedFreeList[size].count();
2462   size_t      n = 0;
2463   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2464   for (; fc != NULL; fc = fc->next(), n++) {
2465     guarantee(fc->size() == size, "Size inconsistency");
2466     guarantee(fc->isFree(), "!free?");
2467     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2468     guarantee((fc->next() == NULL) == (fc == tail), "Incorrect tail");
2469   }
2470   guarantee(n == num, "Incorrect count");
2471 }
2472 
2473 #ifndef PRODUCT
2474 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2475   assert(_dictionary->minSize() <= IndexSetSize,
2476     "Some sizes can't be allocated without recourse to"
2477     " linear allocation buffers");
2478   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2479     "else MIN_TREE_CHUNK_SIZE is wrong");
2480   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2481          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2482   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2483       "Some for-loops may be incorrectly initialized");
2484   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2485       "For-loops that iterate over IndexSet with stride 2 may be wrong");
2486 }
2487 #endif
2488 
2489 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2490   assert_lock_strong(&_freelistLock);
2491   FreeList total;
2492   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2493   FreeList::print_labels_on(gclog_or_tty, "size");
2494   size_t totalFree = 0;
2495   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2496     const FreeList *fl = &_indexedFreeList[i];
2497     totalFree += fl->count() * fl->size();
2498     if (i % (40*IndexSetStride) == 0) {
2499       FreeList::print_labels_on(gclog_or_tty, "size");
2500     }
2501     fl->print_on(gclog_or_tty);
2502     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
2503     total.set_surplus(    total.surplus()     + fl->surplus()    );
2504     total.set_desired(    total.desired()     + fl->desired()    );
2505     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
2506     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2507     total.set_count(      total.count()       + fl->count()      );
2508     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
2509     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
2510     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2511     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2512   }
2513   total.print_on(gclog_or_tty, "TOTAL");
2514   gclog_or_tty->print_cr("Total free in indexed lists "
2515                          SIZE_FORMAT " words", totalFree);
2516   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2517     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2518             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2519     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2520   _dictionary->printDictCensus();
2521 }
2522 
2523 ///////////////////////////////////////////////////////////////////////////
2524 // CFLS_LAB
2525 ///////////////////////////////////////////////////////////////////////////
2526 
2527 #define VECTOR_257(x)                                                                                  \
2528   /* 1  2  3  4  5  6  7  8  9 1x 11 12 13 14 15 16 17 18 19 2x 21 22 23 24 25 26 27 28 29 3x 31 32 */ \
2529   {  x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2530      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2531      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2532      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2533      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2534      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2535      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2536      x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,   \
2537      x }
2538 
2539 // Initialize with default setting of CMSParPromoteBlocksToClaim, _not_
2540 // OldPLABSize, whose static default is different; if overridden at the
2541 // command-line, this will get reinitialized via a call to
2542 // modify_initialization() below.
2543 AdaptiveWeightedAverage CFLS_LAB::_blocks_to_claim[]    =
2544   VECTOR_257(AdaptiveWeightedAverage(OldPLABWeight, (float)CMSParPromoteBlocksToClaim));
2545 size_t CFLS_LAB::_global_num_blocks[]  = VECTOR_257(0);
2546 int    CFLS_LAB::_global_num_workers[] = VECTOR_257(0);
2547 
2548 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2549   _cfls(cfls)
2550 {
2551   assert(CompactibleFreeListSpace::IndexSetSize == 257, "Modify VECTOR_257() macro above");
2552   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2553        i < CompactibleFreeListSpace::IndexSetSize;
2554        i += CompactibleFreeListSpace::IndexSetStride) {
2555     _indexedFreeList[i].set_size(i);
2556     _num_blocks[i] = 0;
2557   }
2558 }
2559 
2560 static bool _CFLS_LAB_modified = false;
2561 
2562 void CFLS_LAB::modify_initialization(size_t n, unsigned wt) {
2563   assert(!_CFLS_LAB_modified, "Call only once");
2564   _CFLS_LAB_modified = true;
2565   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2566        i < CompactibleFreeListSpace::IndexSetSize;
2567        i += CompactibleFreeListSpace::IndexSetStride) {
2568     _blocks_to_claim[i].modify(n, wt, true /* force */);
2569   }
2570 }
2571 
2572 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2573   FreeChunk* res;
2574   assert(word_sz == _cfls->adjustObjectSize(word_sz), "Error");
2575   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2576     // This locking manages sync with other large object allocations.
2577     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2578                     Mutex::_no_safepoint_check_flag);
2579     res = _cfls->getChunkFromDictionaryExact(word_sz);
2580     if (res == NULL) return NULL;
2581   } else {
2582     FreeList* fl = &_indexedFreeList[word_sz];
2583     if (fl->count() == 0) {
2584       // Attempt to refill this local free list.
2585       get_from_global_pool(word_sz, fl);
2586       // If it didn't work, give up.
2587       if (fl->count() == 0) return NULL;
2588     }
2589     res = fl->getChunkAtHead();
2590     assert(res != NULL, "Why was count non-zero?");
2591   }
2592   res->markNotFree();
2593   assert(!res->isFree(), "shouldn't be marked free");
2594   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2595   // mangle a just allocated object with a distinct pattern.
2596   debug_only(res->mangleAllocated(word_sz));
2597   return (HeapWord*)res;
2598 }
2599 
2600 // Get a chunk of blocks of the right size and update related
2601 // book-keeping stats
2602 void CFLS_LAB::get_from_global_pool(size_t word_sz, FreeList* fl) {
2603   // Get the #blocks we want to claim
2604   size_t n_blks = (size_t)_blocks_to_claim[word_sz].average();
2605   assert(n_blks > 0, "Error");
2606   assert(ResizePLAB || n_blks == OldPLABSize, "Error");
2607   // In some cases, when the application has a phase change,
2608   // there may be a sudden and sharp shift in the object survival
2609   // profile, and updating the counts at the end of a scavenge
2610   // may not be quick enough, giving rise to large scavenge pauses
2611   // during these phase changes. It is beneficial to detect such
2612   // changes on-the-fly during a scavenge and avoid such a phase-change
2613   // pothole. The following code is a heuristic attempt to do that.
2614   // It is protected by a product flag until we have gained
2615   // enough experience with this heuristic and fine-tuned its behaviour.
2616   // WARNING: This might increase fragmentation if we overreact to
2617   // small spikes, so some kind of historical smoothing based on
2618   // previous experience with the greater reactivity might be useful.
2619   // Lacking sufficient experience, CMSOldPLABResizeQuicker is disabled by
2620   // default.
2621   if (ResizeOldPLAB && CMSOldPLABResizeQuicker) {
2622     size_t multiple = _num_blocks[word_sz]/(CMSOldPLABToleranceFactor*CMSOldPLABNumRefills*n_blks);
2623     n_blks +=  CMSOldPLABReactivityFactor*multiple*n_blks;
2624     n_blks = MIN2(n_blks, CMSOldPLABMax);
2625   }
2626   assert(n_blks > 0, "Error");
2627   _cfls->par_get_chunk_of_blocks(word_sz, n_blks, fl);
2628   // Update stats table entry for this block size
2629   _num_blocks[word_sz] += fl->count();
2630 }
2631 
2632 void CFLS_LAB::compute_desired_plab_size() {
2633   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2634        i < CompactibleFreeListSpace::IndexSetSize;
2635        i += CompactibleFreeListSpace::IndexSetStride) {
2636     assert((_global_num_workers[i] == 0) == (_global_num_blocks[i] == 0),
2637            "Counter inconsistency");
2638     if (_global_num_workers[i] > 0) {
2639       // Need to smooth wrt historical average
2640       if (ResizeOldPLAB) {
2641         _blocks_to_claim[i].sample(
2642           MAX2((size_t)CMSOldPLABMin,
2643           MIN2((size_t)CMSOldPLABMax,
2644                _global_num_blocks[i]/(_global_num_workers[i]*CMSOldPLABNumRefills))));
2645       }
2646       // Reset counters for next round
2647       _global_num_workers[i] = 0;
2648       _global_num_blocks[i] = 0;
2649       if (PrintOldPLAB) {
2650         gclog_or_tty->print_cr("[%d]: %d", i, (size_t)_blocks_to_claim[i].average());
2651       }
2652     }
2653   }
2654 }
2655 
2656 void CFLS_LAB::retire(int tid) {
2657   // We run this single threaded with the world stopped;
2658   // so no need for locks and such.
2659 #define CFLS_LAB_PARALLEL_ACCESS 0
2660   NOT_PRODUCT(Thread* t = Thread::current();)
2661   assert(Thread::current()->is_VM_thread(), "Error");
2662   assert(CompactibleFreeListSpace::IndexSetStart == CompactibleFreeListSpace::IndexSetStride,
2663          "Will access to uninitialized slot below");
2664 #if CFLS_LAB_PARALLEL_ACCESS
2665   for (size_t i = CompactibleFreeListSpace::IndexSetSize - 1;
2666        i > 0;
2667        i -= CompactibleFreeListSpace::IndexSetStride) {
2668 #else // CFLS_LAB_PARALLEL_ACCESS
2669   for (size_t i =  CompactibleFreeListSpace::IndexSetStart;
2670        i < CompactibleFreeListSpace::IndexSetSize;
2671        i += CompactibleFreeListSpace::IndexSetStride) {
2672 #endif // !CFLS_LAB_PARALLEL_ACCESS
2673     assert(_num_blocks[i] >= (size_t)_indexedFreeList[i].count(),
2674            "Can't retire more than what we obtained");
2675     if (_num_blocks[i] > 0) {
2676       size_t num_retire =  _indexedFreeList[i].count();
2677       assert(_num_blocks[i] > num_retire, "Should have used at least one");
2678       {
2679 #if CFLS_LAB_PARALLEL_ACCESS
2680         MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2681                         Mutex::_no_safepoint_check_flag);
2682 #endif // CFLS_LAB_PARALLEL_ACCESS
2683         // Update globals stats for num_blocks used
2684         _global_num_blocks[i] += (_num_blocks[i] - num_retire);
2685         _global_num_workers[i]++;
2686         assert(_global_num_workers[i] <= (ssize_t)ParallelGCThreads, "Too big");
2687         if (num_retire > 0) {
2688           _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2689           // Reset this list.
2690           _indexedFreeList[i] = FreeList();
2691           _indexedFreeList[i].set_size(i);
2692         }
2693       }
2694       if (PrintOldPLAB) {
2695         gclog_or_tty->print_cr("%d[%d]: %d/%d/%d",
2696                                tid, i, num_retire, _num_blocks[i], (size_t)_blocks_to_claim[i].average());
2697       }
2698       // Reset stats for next round
2699       _num_blocks[i]         = 0;
2700     }
2701   }
2702 }
2703 
2704 void CompactibleFreeListSpace:: par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2705   assert(fl->count() == 0, "Precondition.");
2706   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2707          "Precondition");
2708 
2709   // We'll try all multiples of word_sz in the indexed set, starting with
2710   // word_sz itself and, if CMSSplitIndexedFreeListBlocks, try larger multiples,
2711   // then try getting a big chunk and splitting it.
2712   {
2713     bool found;
2714     int  k;
2715     size_t cur_sz;
2716     for (k = 1, cur_sz = k * word_sz, found = false;
2717          (cur_sz < CompactibleFreeListSpace::IndexSetSize) &&
2718          (CMSSplitIndexedFreeListBlocks || k <= 1);
2719          k++, cur_sz = k * word_sz) {
2720       FreeList fl_for_cur_sz;  // Empty.
2721       fl_for_cur_sz.set_size(cur_sz);
2722       {
2723         MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2724                         Mutex::_no_safepoint_check_flag);
2725         FreeList* gfl = &_indexedFreeList[cur_sz];
2726         if (gfl->count() != 0) {
2727           // nn is the number of chunks of size cur_sz that
2728           // we'd need to split k-ways each, in order to create
2729           // "n" chunks of size word_sz each.
2730           const size_t nn = MAX2(n/k, (size_t)1);
2731           gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2732           found = true;
2733           if (k > 1) {
2734             // Update split death stats for the cur_sz-size blocks list:
2735             // we increment the split death count by the number of blocks
2736             // we just took from the cur_sz-size blocks list and which
2737             // we will be splitting below.
2738             ssize_t deaths = gfl->splitDeaths() +
2739                              fl_for_cur_sz.count();
2740             gfl->set_splitDeaths(deaths);
2741           }
2742         }
2743       }
2744       // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2745       if (found) {
2746         if (k == 1) {
2747           fl->prepend(&fl_for_cur_sz);
2748         } else {
2749           // Divide each block on fl_for_cur_sz up k ways.
2750           FreeChunk* fc;
2751           while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2752             // Must do this in reverse order, so that anybody attempting to
2753             // access the main chunk sees it as a single free block until we
2754             // change it.
2755             size_t fc_size = fc->size();
2756             assert(fc->isFree(), "Error");
2757             for (int i = k-1; i >= 0; i--) {
2758               FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2759               assert((i != 0) ||
2760                         ((fc == ffc) && ffc->isFree() &&
2761                          (ffc->size() == k*word_sz) && (fc_size == word_sz)),
2762                         "Counting error");
2763               ffc->setSize(word_sz);
2764               ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2765               ffc->linkNext(NULL);
2766               // Above must occur before BOT is updated below.
2767               OrderAccess::storestore();
2768               // splitting from the right, fc_size == i * word_sz
2769               _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2770               fc_size -= word_sz;
2771               assert(fc_size == i*word_sz, "Error");
2772               _bt.verify_not_unallocated((HeapWord*)ffc, word_sz);
2773               _bt.verify_single_block((HeapWord*)fc, fc_size);
2774               _bt.verify_single_block((HeapWord*)ffc, word_sz);
2775               // Push this on "fl".
2776               fl->returnChunkAtHead(ffc);
2777             }
2778             // TRAP
2779             assert(fl->tail()->next() == NULL, "List invariant.");
2780           }
2781         }
2782         // Update birth stats for this block size.
2783         size_t num = fl->count();
2784         MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2785                         Mutex::_no_safepoint_check_flag);
2786         ssize_t births = _indexedFreeList[word_sz].splitBirths() + num;
2787         _indexedFreeList[word_sz].set_splitBirths(births);
2788         return;
2789       }
2790     }
2791   }
2792   // Otherwise, we'll split a block from the dictionary.
2793   FreeChunk* fc = NULL;
2794   FreeChunk* rem_fc = NULL;
2795   size_t rem;
2796   {
2797     MutexLockerEx x(parDictionaryAllocLock(),
2798                     Mutex::_no_safepoint_check_flag);
2799     while (n > 0) {
2800       fc = dictionary()->getChunk(MAX2(n * word_sz,
2801                                   _dictionary->minSize()),
2802                                   FreeBlockDictionary::atLeast);
2803       if (fc != NULL) {
2804         _bt.allocated((HeapWord*)fc, fc->size(), true /* reducing */);  // update _unallocated_blk
2805         dictionary()->dictCensusUpdate(fc->size(),
2806                                        true /*split*/,
2807                                        false /*birth*/);
2808         break;
2809       } else {
2810         n--;
2811       }
2812     }
2813     if (fc == NULL) return;
2814     // Otherwise, split up that block.
2815     assert((ssize_t)n >= 1, "Control point invariant");
2816     assert(fc->isFree(), "Error: should be a free block");
2817     _bt.verify_single_block((HeapWord*)fc, fc->size());
2818     const size_t nn = fc->size() / word_sz;
2819     n = MIN2(nn, n);
2820     assert((ssize_t)n >= 1, "Control point invariant");
2821     rem = fc->size() - n * word_sz;
2822     // If there is a remainder, and it's too small, allocate one fewer.
2823     if (rem > 0 && rem < MinChunkSize) {
2824       n--; rem += word_sz;
2825     }
2826     // Note that at this point we may have n == 0.
2827     assert((ssize_t)n >= 0, "Control point invariant");
2828 
2829     // If n is 0, the chunk fc that was found is not large
2830     // enough to leave a viable remainder.  We are unable to
2831     // allocate even one block.  Return fc to the
2832     // dictionary and return, leaving "fl" empty.
2833     if (n == 0) {
2834       returnChunkToDictionary(fc);
2835       assert(fl->count() == 0, "We never allocated any blocks");
2836       return;
2837     }
2838 
2839     // First return the remainder, if any.
2840     // Note that we hold the lock until we decide if we're going to give
2841     // back the remainder to the dictionary, since a concurrent allocation
2842     // may otherwise see the heap as empty.  (We're willing to take that
2843     // hit if the block is a small block.)
2844     if (rem > 0) {
2845       size_t prefix_size = n * word_sz;
2846       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2847       rem_fc->setSize(rem);
2848       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2849       rem_fc->linkNext(NULL);
2850       // Above must occur before BOT is updated below.
2851       assert((ssize_t)n > 0 && prefix_size > 0 && rem_fc > fc, "Error");
2852       OrderAccess::storestore();
2853       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2854       assert(fc->isFree(), "Error");
2855       fc->setSize(prefix_size);
2856       if (rem >= IndexSetSize) {
2857         returnChunkToDictionary(rem_fc);
2858         dictionary()->dictCensusUpdate(rem, true /*split*/, true /*birth*/);
2859         rem_fc = NULL;
2860       }
2861       // Otherwise, return it to the small list below.
2862     }
2863   }
2864   if (rem_fc != NULL) {
2865     MutexLockerEx x(_indexedFreeListParLocks[rem],
2866                     Mutex::_no_safepoint_check_flag);
2867     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2868     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2869     smallSplitBirth(rem);
2870   }
2871   assert((ssize_t)n > 0 && fc != NULL, "Consistency");
2872   // Now do the splitting up.
2873   // Must do this in reverse order, so that anybody attempting to
2874   // access the main chunk sees it as a single free block until we
2875   // change it.
2876   size_t fc_size = n * word_sz;
2877   // All but first chunk in this loop
2878   for (ssize_t i = n-1; i > 0; i--) {
2879     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2880     ffc->setSize(word_sz);
2881     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2882     ffc->linkNext(NULL);
2883     // Above must occur before BOT is updated below.
2884     OrderAccess::storestore();
2885     // splitting from the right, fc_size == (n - i + 1) * wordsize
2886     _bt.mark_block((HeapWord*)ffc, word_sz, true /* reducing */);
2887     fc_size -= word_sz;
2888     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2889     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2890     _bt.verify_single_block((HeapWord*)fc, fc_size);
2891     // Push this on "fl".
2892     fl->returnChunkAtHead(ffc);
2893   }
2894   // First chunk
2895   assert(fc->isFree() && fc->size() == n*word_sz, "Error: should still be a free block");
2896   // The blocks above should show their new sizes before the first block below
2897   fc->setSize(word_sz);
2898   fc->linkPrev(NULL);    // idempotent wrt free-ness, see assert above
2899   fc->linkNext(NULL);
2900   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2901   _bt.verify_single_block((HeapWord*)fc, fc->size());
2902   fl->returnChunkAtHead(fc);
2903 
2904   assert((ssize_t)n > 0 && (ssize_t)n == fl->count(), "Incorrect number of blocks");
2905   {
2906     // Update the stats for this block size.
2907     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2908                     Mutex::_no_safepoint_check_flag);
2909     const ssize_t births = _indexedFreeList[word_sz].splitBirths() + n;
2910     _indexedFreeList[word_sz].set_splitBirths(births);
2911     // ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2912     // _indexedFreeList[word_sz].set_surplus(new_surplus);
2913   }
2914 
2915   // TRAP
2916   assert(fl->tail()->next() == NULL, "List invariant.");
2917 }
2918 
2919 // Set up the space's par_seq_tasks structure for work claiming
2920 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2921 // XXX Need to suitably abstract and generalize this and the next
2922 // method into one.
2923 void
2924 CompactibleFreeListSpace::
2925 initialize_sequential_subtasks_for_rescan(int n_threads) {
2926   // The "size" of each task is fixed according to rescan_task_size.
2927   assert(n_threads > 0, "Unexpected n_threads argument");
2928   const size_t task_size = rescan_task_size();
2929   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2930   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2931   assert(n_tasks == 0 ||
2932          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2933           (used_region().start() + n_tasks*task_size >= used_region().end())),
2934          "n_tasks calculation incorrect");
2935   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2936   assert(!pst->valid(), "Clobbering existing data?");
2937   // Sets the condition for completion of the subtask (how many threads
2938   // need to finish in order to be done).
2939   pst->set_n_threads(n_threads);
2940   pst->set_n_tasks((int)n_tasks);
2941 }
2942 
2943 // Set up the space's par_seq_tasks structure for work claiming
2944 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2945 void
2946 CompactibleFreeListSpace::
2947 initialize_sequential_subtasks_for_marking(int n_threads,
2948                                            HeapWord* low) {
2949   // The "size" of each task is fixed according to rescan_task_size.
2950   assert(n_threads > 0, "Unexpected n_threads argument");
2951   const size_t task_size = marking_task_size();
2952   assert(task_size > CardTableModRefBS::card_size_in_words &&
2953          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2954          "Otherwise arithmetic below would be incorrect");
2955   MemRegion span = _gen->reserved();
2956   if (low != NULL) {
2957     if (span.contains(low)) {
2958       // Align low down to  a card boundary so that
2959       // we can use block_offset_careful() on span boundaries.
2960       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2961                                  CardTableModRefBS::card_size);
2962       // Clip span prefix at aligned_low
2963       span = span.intersection(MemRegion(aligned_low, span.end()));
2964     } else if (low > span.end()) {
2965       span = MemRegion(low, low);  // Null region
2966     } // else use entire span
2967   }
2968   assert(span.is_empty() ||
2969          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2970         "span should start at a card boundary");
2971   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2972   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2973   assert(n_tasks == 0 ||
2974          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2975           (span.start() + n_tasks*task_size >= span.end())),
2976          "n_tasks calculation incorrect");
2977   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2978   assert(!pst->valid(), "Clobbering existing data?");
2979   // Sets the condition for completion of the subtask (how many threads
2980   // need to finish in order to be done).
2981   pst->set_n_threads(n_threads);
2982   pst->set_n_tasks((int)n_tasks);
2983 }