1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)compactibleFreeListSpace.cpp 1.144 08/09/06 09:20:55 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2008 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_compactibleFreeListSpace.cpp.incl"
  30 
  31 /////////////////////////////////////////////////////////////////////////
  32 //// CompactibleFreeListSpace
  33 /////////////////////////////////////////////////////////////////////////
  34 
  35 // highest ranked  free list lock rank
  36 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  37 
  38 // Constructor
  39 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  40   MemRegion mr, bool use_adaptive_freelists,
  41   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
  42   _dictionaryChoice(dictionaryChoice),
  43   _adaptive_freelists(use_adaptive_freelists),
  44   _bt(bs, mr),
  45   // free list locks are in the range of values taken by _lockRank
  46   // This range currently is [_leaf+2, _leaf+3]
  47   // Note: this requires that CFLspace c'tors
  48   // are called serially in the order in which the locks are
  49   // are acquired in the program text. This is true today.
  50   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  51   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  52                           "CompactibleFreeListSpace._dict_par_lock", true),
  53   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  54                     CMSRescanMultiple),
  55   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  56                     CMSConcMarkMultiple),
  57   _collector(NULL)
  58 {
  59   _bt.set_space(this);
  60   initialize(mr, SpaceDecorator::Clear, SpaceDecorator::Mangle);
  61   // We have all of "mr", all of which we place in the dictionary
  62   // as one big chunk. We'll need to decide here which of several
  63   // possible alternative dictionary implementations to use. For
  64   // now the choice is easy, since we have only one working
  65   // implementation, namely, the simple binary tree (splaying
  66   // temporarily disabled).
  67   switch (dictionaryChoice) {
  68     case FreeBlockDictionary::dictionaryBinaryTree:
  69       _dictionary = new BinaryTreeDictionary(mr);
  70       break;
  71     case FreeBlockDictionary::dictionarySplayTree:
  72     case FreeBlockDictionary::dictionarySkipList:
  73     default:
  74       warning("dictionaryChoice: selected option not understood; using"
  75               " default BinaryTreeDictionary implementation instead.");
  76       _dictionary = new BinaryTreeDictionary(mr);
  77       break;
  78   }
  79   splitBirth(mr.word_size());
  80   assert(_dictionary != NULL, "CMS dictionary initialization");
  81   // The indexed free lists are initially all empty and are lazily
  82   // filled in on demand. Initialize the array elements to NULL.
  83   initializeIndexedFreeListArray();
  84 
  85   // Not using adaptive free lists assumes that allocation is first
  86   // from the linAB's.  Also a cms perm gen which can be compacted
  87   // has to have the klass's klassKlass allocated at a lower
  88   // address in the heap than the klass so that the klassKlass is
  89   // moved to its new location before the klass is moved.
  90   // Set the _refillSize for the linear allocation blocks
  91   if (!use_adaptive_freelists) {
  92     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
  93     // The small linAB initially has all the space and will allocate
  94     // a chunk of any size.
  95     HeapWord* addr = (HeapWord*) fc;
  96     _smallLinearAllocBlock.set(addr, fc->size() , 
  97       1024*SmallForLinearAlloc, fc->size());
  98     // Note that _unallocated_block is not updated here.
  99     // Allocations from the linear allocation block should
 100     // update it.
 101   } else {
 102     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, 
 103                                SmallForLinearAlloc);
 104   }
 105   // CMSIndexedFreeListReplenish should be at least 1
 106   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 107   _promoInfo.setSpace(this);
 108   if (UseCMSBestFit) {
 109     _fitStrategy = FreeBlockBestFitFirst;
 110   } else {
 111     _fitStrategy = FreeBlockStrategyNone;
 112   }
 113   checkFreeListConsistency();
 114 
 115   // Initialize locks for parallel case.
 116   if (ParallelGCThreads > 0) {
 117     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 118       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 119                                               "a freelist par lock",
 120                                               true);
 121       if (_indexedFreeListParLocks[i] == NULL) 
 122         vm_exit_during_initialization("Could not allocate a par lock");
 123       DEBUG_ONLY(
 124         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 125       )
 126     }
 127     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 128   }
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.  
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   VALIDATE_MARK_SWEEP_ONLY(MarkSweep::register_live_oop(q, adjusted_size));
 184   compact_top += adjusted_size;
 185 
 186   // we need to update the offset table so that the beginnings of objects can be
 187   // found during scavenge.  Note that we are updating the offset table based on
 188   // where the object will be once the compaction phase finishes.
 189 
 190   // Always call cross_threshold().  A contiguous space can only call it when
 191   // the compaction_top exceeds the current threshold but not for an
 192   // non-contiguous space.
 193   cp->threshold =
 194     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 195   return compact_top;
 196 }
 197 
 198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 199 // and use of single_block instead of alloc_block.  The name here is not really
 200 // appropriate - maybe a more general name could be invented for both the
 201 // contiguous and noncontiguous spaces.
 202 
 203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 204   _bt.single_block(start, the_end);
 205   return end();
 206 }
 207 
 208 // Initialize them to NULL.
 209 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 210   for (size_t i = 0; i < IndexSetSize; i++) {
 211     // Note that on platforms where objects are double word aligned,
 212     // the odd array elements are not used.  It is convenient, however,
 213     // to map directly from the object size to the array element.
 214     _indexedFreeList[i].reset(IndexSetSize);
 215     _indexedFreeList[i].set_size(i);
 216     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 217     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 219     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 220   }
 221 }
 222 
 223 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 224   for (int i = 1; i < IndexSetSize; i++) {
 225     assert(_indexedFreeList[i].size() == (size_t) i, 
 226       "Indexed free list sizes are incorrect");
 227     _indexedFreeList[i].reset(IndexSetSize);
 228     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 229     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 230     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 231     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 232   }
 233 }
 234 
 235 void CompactibleFreeListSpace::reset(MemRegion mr) {
 236   resetIndexedFreeListArray();
 237   dictionary()->reset();
 238   if (BlockOffsetArrayUseUnallocatedBlock) {
 239     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 240     // Everything's allocated until proven otherwise.
 241     _bt.set_unallocated_block(end());
 242   }
 243   if (!mr.is_empty()) {
 244     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 245     _bt.single_block(mr.start(), mr.word_size());
 246     FreeChunk* fc = (FreeChunk*) mr.start();
 247     fc->setSize(mr.word_size());
 248     if (mr.word_size() >= IndexSetSize ) {
 249       returnChunkToDictionary(fc);
 250     } else {
 251       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 252       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
 253     }
 254   }
 255   _promoInfo.reset();
 256   _smallLinearAllocBlock._ptr = NULL;
 257   _smallLinearAllocBlock._word_size = 0;
 258 }
 259 
 260 void CompactibleFreeListSpace::reset_after_compaction() {
 261   // Reset the space to the new reality - one free chunk.
 262   MemRegion mr(compaction_top(), end());
 263   reset(mr);
 264   // Now refill the linear allocation block(s) if possible.
 265   if (_adaptive_freelists) {
 266     refillLinearAllocBlocksIfNeeded();
 267   } else {
 268     // Place as much of mr in the linAB as we can get,
 269     // provided it was big enough to go into the dictionary.
 270     FreeChunk* fc = dictionary()->findLargestDict();
 271     if (fc != NULL) {
 272       assert(fc->size() == mr.word_size(),
 273              "Why was the chunk broken up?");
 274       removeChunkFromDictionary(fc);
 275       HeapWord* addr = (HeapWord*) fc;
 276       _smallLinearAllocBlock.set(addr, fc->size() ,
 277         1024*SmallForLinearAlloc, fc->size());
 278       // Note that _unallocated_block is not updated here.
 279     }
 280   }
 281 }
 282 
 283 // Walks the entire dictionary, returning a coterminal
 284 // chunk, if it exists. Use with caution since it involves
 285 // a potentially complete walk of a potentially large tree.
 286 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 287 
 288   assert_lock_strong(&_freelistLock);
 289 
 290   return dictionary()->find_chunk_ends_at(end());
 291 }
 292 
 293 
 294 #ifndef PRODUCT
 295 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 296   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 297     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
 298   }
 299 }
 300 
 301 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 302   size_t sum = 0;
 303   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 304     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
 305   }
 306   return sum;
 307 }
 308 
 309 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 310   size_t count = 0;
 311   for (int i = MinChunkSize; i < IndexSetSize; i++) {
 312     debug_only(
 313       ssize_t total_list_count = 0;
 314       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 315          fc = fc->next()) {
 316         total_list_count++;
 317       }
 318       assert(total_list_count ==  _indexedFreeList[i].count(),
 319         "Count in list is incorrect");
 320     )
 321     count += _indexedFreeList[i].count();
 322   }
 323   return count;
 324 }
 325 
 326 size_t CompactibleFreeListSpace::totalCount() {
 327   size_t num = totalCountInIndexedFreeLists();
 328   num +=  dictionary()->totalCount();
 329   if (_smallLinearAllocBlock._word_size != 0) {
 330     num++;
 331   }
 332   return num;
 333 }
 334 #endif
 335 
 336 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 337   FreeChunk* fc = (FreeChunk*) p;
 338   return fc->isFree();
 339 }
 340 
 341 size_t CompactibleFreeListSpace::used() const {
 342   return capacity() - free();
 343 }
 344 
 345 size_t CompactibleFreeListSpace::free() const {
 346   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 347   // if you do this while the structures are in flux you
 348   // may get an approximate answer only; for instance
 349   // because there is concurrent allocation either
 350   // directly by mutators or for promotion during a GC.
 351   // It's "MT-safe", however, in the sense that you are guaranteed
 352   // not to crash and burn, for instance, because of walking
 353   // pointers that could disappear as you were walking them.
 354   // The approximation is because the various components
 355   // that are read below are not read atomically (and
 356   // further the computation of totalSizeInIndexedFreeLists()
 357   // is itself a non-atomic computation. The normal use of
 358   // this is during a resize operation at the end of GC
 359   // and at that time you are guaranteed to get the
 360   // correct actual value. However, for instance, this is
 361   // also read completely asynchronously by the "perf-sampler"
 362   // that supports jvmstat, and you are apt to see the values
 363   // flicker in such cases.
 364   assert(_dictionary != NULL, "No _dictionary?");
 365   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
 366           totalSizeInIndexedFreeLists() +
 367           _smallLinearAllocBlock._word_size) * HeapWordSize;
 368 }
 369 
 370 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 371   assert(_dictionary != NULL, "No _dictionary?");
 372   assert_locked();
 373   size_t res = _dictionary->maxChunkSize();
 374   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 375                        (size_t) SmallForLinearAlloc - 1));
 376   // XXX the following could potentially be pretty slow;
 377   // should one, pesimally for the rare cases when res
 378   // caclulated above is less than IndexSetSize,
 379   // just return res calculated above? My reasoning was that
 380   // those cases will be so rare that the extra time spent doesn't
 381   // really matter....
 382   // Note: do not change the loop test i >= res + IndexSetStride
 383   // to i > res below, because i is unsigned and res may be zero.
 384   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 385        i -= IndexSetStride) {
 386     if (_indexedFreeList[i].head() != NULL) {
 387       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 388       return i;
 389     }
 390   }
 391   return res;
 392 }
 393 
 394 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 395   assert_lock_strong(&_freelistLock);
 396   assert(PrintFLSStatistics != 0, "Reporting error");
 397   _dictionary->reportStatistics();
 398   if (PrintFLSStatistics > 1) {
 399     reportIndexedFreeListStatistics();
 400     size_t totalSize = totalSizeInIndexedFreeLists() +
 401                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
 402     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
 403   }
 404 }
 405 
 406 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 407   assert_lock_strong(&_freelistLock);
 408   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 409                       "--------------------------------\n");
 410   size_t totalSize = totalSizeInIndexedFreeLists();
 411   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
 412   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
 413   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 414   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
 415   if (freeBlocks != 0) {
 416     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
 417   }
 418 }
 419 
 420 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 421   size_t res = 0;
 422   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 423     debug_only(
 424       ssize_t recount = 0;
 425       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 426          fc = fc->next()) {
 427         recount += 1;
 428       }
 429       assert(recount == _indexedFreeList[i].count(), 
 430         "Incorrect count in list");
 431     )
 432     res += _indexedFreeList[i].count();
 433   }
 434   return res;
 435 }
 436 
 437 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 438   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 439     if (_indexedFreeList[i].head() != NULL) {
 440       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 441       return (size_t)i;
 442     }
 443   }
 444   return 0;
 445 }
 446 
 447 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 448   HeapWord* prevEnd = end();
 449   assert(prevEnd != value, "unnecessary set_end call");
 450   assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
 451   _end = value;
 452   if (prevEnd != NULL) {
 453     // Resize the underlying block offset table.
 454     _bt.resize(pointer_delta(value, bottom()));
 455   if (value <= prevEnd) {
 456     assert(value >= unallocated_block(), "New end is below unallocated block");
 457   } else {
 458     // Now, take this new chunk and add it to the free blocks.
 459     // Note that the BOT has not yet been updated for this block.
 460     size_t newFcSize = pointer_delta(value, prevEnd);
 461     // XXX This is REALLY UGLY and should be fixed up. XXX
 462     if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 463       // Mark the boundary of the new block in BOT
 464       _bt.mark_block(prevEnd, value);
 465       // put it all in the linAB
 466       if (ParallelGCThreads == 0) {
 467         _smallLinearAllocBlock._ptr = prevEnd;
 468         _smallLinearAllocBlock._word_size = newFcSize;
 469         repairLinearAllocBlock(&_smallLinearAllocBlock);
 470       } else { // ParallelGCThreads > 0
 471         MutexLockerEx x(parDictionaryAllocLock(),
 472                         Mutex::_no_safepoint_check_flag);
 473         _smallLinearAllocBlock._ptr = prevEnd;
 474         _smallLinearAllocBlock._word_size = newFcSize;
 475         repairLinearAllocBlock(&_smallLinearAllocBlock);
 476       }
 477       // Births of chunks put into a LinAB are not recorded.  Births
 478       // of chunks as they are allocated out of a LinAB are.
 479     } else {
 480       // Add the block to the free lists, if possible coalescing it
 481       // with the last free block, and update the BOT and census data.
 482       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 483     }
 484   }
 485   }
 486 }
 487 
 488 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 489   CompactibleFreeListSpace* _cfls;
 490   CMSCollector* _collector;
 491 protected:
 492   // Override.
 493 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 494   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 495                                        HeapWord* bottom, HeapWord* top, \
 496                                        ClosureType* cl);                \
 497       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 498                                        HeapWord* bottom, HeapWord* top, \
 499                                        ClosureType* cl);                \
 500     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 501                                        HeapWord* bottom, HeapWord* top, \
 502                                        ClosureType* cl)
 503   walk_mem_region_with_cl_DECL(OopClosure);
 504   walk_mem_region_with_cl_DECL(FilteringClosure);
 505 
 506 public:
 507   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 508                       CMSCollector* collector,
 509                       OopClosure* cl,
 510                       CardTableModRefBS::PrecisionStyle precision,
 511                       HeapWord* boundary) :
 512     Filtering_DCTOC(sp, cl, precision, boundary),
 513     _cfls(sp), _collector(collector) {}
 514 };
 515 
 516 // We de-virtualize the block-related calls below, since we know that our
 517 // space is a CompactibleFreeListSpace.
 518 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 519 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 520                                                  HeapWord* bottom,              \
 521                                                  HeapWord* top,                 \
 522                                                  ClosureType* cl) {             \
 523    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
 524      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 525    } else {                                                                     \
 526      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 527    }                                                                            \
 528 }                                                                               \
 529 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 530                                                       HeapWord* bottom,         \
 531                                                       HeapWord* top,            \
 532                                                       ClosureType* cl) {        \
 533   /* Skip parts that are before "mr", in case "block_start" sent us             \
 534      back too far. */                                                           \
 535   HeapWord* mr_start = mr.start();                                              \
 536   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 537   HeapWord* next = bottom + bot_size;                                           \
 538   while (next < mr_start) {                                                     \
 539     bottom = next;                                                              \
 540     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 541     next = bottom + bot_size;                                                   \
 542   }                                                                             \
 543                                                                                 \
 544   while (bottom < top) {                                                        \
 545     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 546         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 547                     oop(bottom)) &&                                             \
 548         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 549       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 550       bottom += _cfls->adjustObjectSize(word_sz);                               \
 551     } else {                                                                    \
 552       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 553     }                                                                           \
 554   }                                                                             \
 555 }                                                                               \
 556 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 557                                                         HeapWord* bottom,       \
 558                                                         HeapWord* top,          \
 559                                                         ClosureType* cl) {      \
 560   /* Skip parts that are before "mr", in case "block_start" sent us             \
 561      back too far. */                                                           \
 562   HeapWord* mr_start = mr.start();                                              \
 563   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 564   HeapWord* next = bottom + bot_size;                                           \
 565   while (next < mr_start) {                                                     \
 566     bottom = next;                                                              \
 567     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 568     next = bottom + bot_size;                                                   \
 569   }                                                                             \
 570                                                                                 \
 571   while (bottom < top) {                                                        \
 572     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 573         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 574                     oop(bottom)) &&                                             \
 575         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 576       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 577       bottom += _cfls->adjustObjectSize(word_sz);                               \
 578     } else {                                                                    \
 579       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 580     }                                                                           \
 581   }                                                                             \
 582 }
 583 
 584 // (There are only two of these, rather than N, because the split is due
 585 // only to the introduction of the FilteringClosure, a local part of the
 586 // impl of this abstraction.)
 587 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 588 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 589 
 590 DirtyCardToOopClosure*
 591 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 592                                       CardTableModRefBS::PrecisionStyle precision,
 593                                       HeapWord* boundary) {
 594   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 595 }
 596 
 597 
 598 // Note on locking for the space iteration functions:
 599 // since the collector's iteration activities are concurrent with
 600 // allocation activities by mutators, absent a suitable mutual exclusion
 601 // mechanism the iterators may go awry. For instace a block being iterated
 602 // may suddenly be allocated or divided up and part of it allocated and
 603 // so on.
 604 
 605 // Apply the given closure to each block in the space.
 606 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 607   assert_lock_strong(freelistLock());
 608   HeapWord *cur, *limit;
 609   for (cur = bottom(), limit = end(); cur < limit;
 610        cur += cl->do_blk_careful(cur));
 611 }
 612 
 613 // Apply the given closure to each block in the space.
 614 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 615   assert_lock_strong(freelistLock());
 616   HeapWord *cur, *limit;
 617   for (cur = bottom(), limit = end(); cur < limit;
 618        cur += cl->do_blk(cur));
 619 }
 620 
 621 // Apply the given closure to each oop in the space.
 622 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 623   assert_lock_strong(freelistLock());
 624   HeapWord *cur, *limit;
 625   size_t curSize;
 626   for (cur = bottom(), limit = end(); cur < limit;
 627        cur += curSize) {
 628     curSize = block_size(cur);
 629     if (block_is_obj(cur)) {
 630       oop(cur)->oop_iterate(cl);
 631     }
 632   }
 633 }
 634 
 635 // Apply the given closure to each oop in the space \intersect memory region.
 636 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 637   assert_lock_strong(freelistLock());
 638   if (is_empty()) {
 639     return;
 640   }
 641   MemRegion cur = MemRegion(bottom(), end());
 642   mr = mr.intersection(cur);
 643   if (mr.is_empty()) {
 644     return;
 645   }
 646   if (mr.equals(cur)) {
 647     oop_iterate(cl);
 648     return;
 649   }
 650   assert(mr.end() <= end(), "just took an intersection above");
 651   HeapWord* obj_addr = block_start(mr.start());
 652   HeapWord* t = mr.end();
 653 
 654   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 655   if (block_is_obj(obj_addr)) {
 656     // Handle first object specially.
 657     oop obj = oop(obj_addr);
 658     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 659   } else {
 660     FreeChunk* fc = (FreeChunk*)obj_addr;
 661     obj_addr += fc->size();
 662   }
 663   while (obj_addr < t) {
 664     HeapWord* obj = obj_addr;
 665     obj_addr += block_size(obj_addr);
 666     // If "obj_addr" is not greater than top, then the
 667     // entire object "obj" is within the region.
 668     if (obj_addr <= t) {    
 669       if (block_is_obj(obj)) {
 670         oop(obj)->oop_iterate(cl);
 671       }               
 672     } else {
 673       // "obj" extends beyond end of region
 674       if (block_is_obj(obj)) {
 675         oop(obj)->oop_iterate(&smr_blk);
 676       }    
 677       break;
 678     }
 679   }
 680 }
 681 
 682 // NOTE: In the following methods, in order to safely be able to
 683 // apply the closure to an object, we need to be sure that the
 684 // object has been initialized. We are guaranteed that an object
 685 // is initialized if we are holding the Heap_lock with the
 686 // world stopped.
 687 void CompactibleFreeListSpace::verify_objects_initialized() const {
 688   if (is_init_completed()) {
 689     assert_locked_or_safepoint(Heap_lock);
 690     if (Universe::is_fully_initialized()) {
 691       guarantee(SafepointSynchronize::is_at_safepoint(),
 692                 "Required for objects to be initialized");
 693     }
 694   } // else make a concession at vm start-up
 695 }
 696 
 697 // Apply the given closure to each object in the space
 698 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 699   assert_lock_strong(freelistLock());
 700   NOT_PRODUCT(verify_objects_initialized());
 701   HeapWord *cur, *limit;
 702   size_t curSize;
 703   for (cur = bottom(), limit = end(); cur < limit;
 704        cur += curSize) {
 705     curSize = block_size(cur);
 706     if (block_is_obj(cur)) {
 707       blk->do_object(oop(cur));
 708     }
 709   }
 710 }
 711 
 712 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 713                                                   UpwardsObjectClosure* cl) {
 714   assert_locked();
 715   NOT_PRODUCT(verify_objects_initialized());
 716   Space::object_iterate_mem(mr, cl);
 717 }
 718 
 719 // Callers of this iterator beware: The closure application should
 720 // be robust in the face of uninitialized objects and should (always)
 721 // return a correct size so that the next addr + size below gives us a
 722 // valid block boundary. [See for instance,
 723 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 724 // in ConcurrentMarkSweepGeneration.cpp.]
 725 HeapWord*
 726 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 727   assert_lock_strong(freelistLock());
 728   HeapWord *addr, *last;
 729   size_t size;
 730   for (addr = bottom(), last  = end();
 731        addr < last; addr += size) {
 732     FreeChunk* fc = (FreeChunk*)addr;
 733     if (fc->isFree()) {
 734       // Since we hold the free list lock, which protects direct
 735       // allocation in this generation by mutators, a free object
 736       // will remain free throughout this iteration code.
 737       size = fc->size();
 738     } else {
 739       // Note that the object need not necessarily be initialized,
 740       // because (for instance) the free list lock does NOT protect
 741       // object initialization. The closure application below must
 742       // therefore be correct in the face of uninitialized objects.
 743       size = cl->do_object_careful(oop(addr));
 744       if (size == 0) {
 745         // An unparsable object found. Signal early termination.
 746         return addr;
 747       }
 748     }
 749   }
 750   return NULL;
 751 }
 752 
 753 // Callers of this iterator beware: The closure application should
 754 // be robust in the face of uninitialized objects and should (always)
 755 // return a correct size so that the next addr + size below gives us a
 756 // valid block boundary. [See for instance,
 757 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 758 // in ConcurrentMarkSweepGeneration.cpp.]
 759 HeapWord*
 760 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 761   ObjectClosureCareful* cl) {
 762   assert_lock_strong(freelistLock());
 763   // Can't use used_region() below because it may not necessarily
 764   // be the same as [bottom(),end()); although we could
 765   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 766   // that appears too cumbersome, so we just do the simpler check
 767   // in the assertion below.
 768   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 769          "mr should be non-empty and within used space");
 770   HeapWord *addr, *end;
 771   size_t size;
 772   for (addr = block_start_careful(mr.start()), end  = mr.end();
 773        addr < end; addr += size) {
 774     FreeChunk* fc = (FreeChunk*)addr;
 775     if (fc->isFree()) {
 776       // Since we hold the free list lock, which protects direct
 777       // allocation in this generation by mutators, a free object
 778       // will remain free throughout this iteration code.
 779       size = fc->size();
 780     } else {
 781       // Note that the object need not necessarily be initialized,
 782       // because (for instance) the free list lock does NOT protect
 783       // object initialization. The closure application below must
 784       // therefore be correct in the face of uninitialized objects.
 785       size = cl->do_object_careful_m(oop(addr), mr);
 786       if (size == 0) {
 787         // An unparsable object found. Signal early termination.
 788         return addr;
 789       }
 790     }
 791   }
 792   return NULL;
 793 }
 794 
 795 
 796 HeapWord* CompactibleFreeListSpace::block_start_const(const void* p) const {
 797   NOT_PRODUCT(verify_objects_initialized());
 798   return _bt.block_start(p);
 799 }
 800 
 801 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 802   return _bt.block_start_careful(p);
 803 }
 804 
 805 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 806   NOT_PRODUCT(verify_objects_initialized());
 807   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 808   // This must be volatile, or else there is a danger that the compiler
 809   // will compile the code below into a sometimes-infinite loop, by keeping 
 810   // the value read the first time in a register.
 811   while (true) {
 812     // We must do this until we get a consistent view of the object.
 813     if (FreeChunk::indicatesFreeChunk(p)) {
 814       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 815       size_t res = fc->size();
 816       // If the object is still a free chunk, return the size, else it
 817       // has been allocated so try again.
 818       if (FreeChunk::indicatesFreeChunk(p)) {
 819         assert(res != 0, "Block size should not be 0");
 820         return res;
 821       }
 822     } else {
 823       // must read from what 'p' points to in each loop.
 824       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
 825       if (k != NULL) {
 826         assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
 827         oop o = (oop)p;
 828         assert(o->is_parsable(), "Should be parsable");
 829         assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 830         size_t res = o->size_given_klass(k->klass_part());
 831         res = adjustObjectSize(res);
 832         assert(res != 0, "Block size should not be 0");
 833         return res;
 834       }
 835     }
 836   }
 837 }
 838 
 839 // A variant of the above that uses the Printezis bits for
 840 // unparsable but allocated objects. This avoids any possible
 841 // stalls waiting for mutators to initialize objects, and is
 842 // thus potentially faster than the variant above. However,
 843 // this variant may return a zero size for a block that is
 844 // under mutation and for which a consistent size cannot be
 845 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 846 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 847                                                      const CMSCollector* c)
 848 const {
 849   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 850   // This must be volatile, or else there is a danger that the compiler
 851   // will compile the code below into a sometimes-infinite loop, by keeping
 852   // the value read the first time in a register.
 853   DEBUG_ONLY(uint loops = 0;)
 854   while (true) {
 855     // We must do this until we get a consistent view of the object.
 856     if (FreeChunk::indicatesFreeChunk(p)) {
 857       volatile FreeChunk* fc = (volatile FreeChunk*)p;
 858       size_t res = fc->size();
 859       if (FreeChunk::indicatesFreeChunk(p)) {
 860         assert(res != 0, "Block size should not be 0");
 861         assert(loops == 0, "Should be 0");
 862         return res;
 863       }
 864     } else {
 865       // must read from what 'p' points to in each loop.
 866       klassOop k = ((volatile oopDesc*)p)->klass_or_null();
 867       if (k != NULL && ((oopDesc*)p)->is_parsable()) {
 868         assert(k->is_oop(), "Should really be klass oop.");
 869         oop o = (oop)p;
 870         assert(o->is_oop(), "Should be an oop");
 871         size_t res = o->size_given_klass(k->klass_part());
 872         res = adjustObjectSize(res);
 873         assert(res != 0, "Block size should not be 0");
 874         return res;
 875       } else {
 876         return c->block_size_if_printezis_bits(p);
 877       }
 878     }
 879     assert(loops == 0, "Can loop at most once");
 880     DEBUG_ONLY(loops++;)
 881   }
 882 }
 883 
 884 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 885   NOT_PRODUCT(verify_objects_initialized());
 886   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 887   FreeChunk* fc = (FreeChunk*)p;
 888   if (fc->isFree()) {
 889     return fc->size();
 890   } else {
 891     // Ignore mark word because this may be a recently promoted
 892     // object whose mark word is used to chain together grey
 893     // objects (the last one would have a null value).
 894     assert(oop(p)->is_oop(true), "Should be an oop");
 895     return adjustObjectSize(oop(p)->size());
 896   }
 897 }
 898 
 899 // This implementation assumes that the property of "being an object" is
 900 // stable.  But being a free chunk may not be (because of parallel
 901 // promotion.)
 902 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
 903   FreeChunk* fc = (FreeChunk*)p;
 904   assert(is_in_reserved(p), "Should be in space");
 905   // When doing a mark-sweep-compact of the CMS generation, this
 906   // assertion may fail because prepare_for_compaction() uses
 907   // space that is garbage to maintain information on ranges of
 908   // live objects so that these live ranges can be moved as a whole.
 909   // Comment out this assertion until that problem can be solved
 910   // (i.e., that the block start calculation may look at objects
 911   // at address below "p" in finding the object that contains "p"
 912   // and those objects (if garbage) may have been modified to hold
 913   // live range information.
 914   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
 915   if (FreeChunk::indicatesFreeChunk(p)) return false;
 916   klassOop k = oop(p)->klass_or_null();
 917   if (k != NULL) {
 918     // Ignore mark word because it may have been used to
 919     // chain together promoted objects (the last one
 920     // would have a null value).
 921     assert(oop(p)->is_oop(true), "Should be an oop");
 922     return true;
 923   } else {
 924     return false;  // Was not an object at the start of collection.
 925   }
 926 }
 927 
 928 // Check if the object is alive. This fact is checked either by consulting
 929 // the main marking bitmap in the sweeping phase or, if it's a permanent
 930 // generation and we're not in the sweeping phase, by checking the
 931 // perm_gen_verify_bit_map where we store the "deadness" information if
 932 // we did not sweep the perm gen in the most recent previous GC cycle.
 933 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
 934   assert (block_is_obj(p), "The address should point to an object");
 935 
 936   // If we're sweeping, we use object liveness information from the main bit map
 937   // for both perm gen and old gen.
 938   // We don't need to lock the bitmap (live_map or dead_map below), because
 939   // EITHER we are in the middle of the sweeping phase, and the
 940   // main marking bit map (live_map below) is locked,
 941   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
 942   // is stable, because it's mutated only in the sweeping phase.
 943   if (_collector->abstract_state() == CMSCollector::Sweeping) {
 944     CMSBitMap* live_map = _collector->markBitMap();
 945     return live_map->isMarked((HeapWord*) p);
 946   } else {
 947     // If we're not currently sweeping and we haven't swept the perm gen in
 948     // the previous concurrent cycle then we may have dead but unswept objects
 949     // in the perm gen. In this case, we use the "deadness" information
 950     // that we had saved in perm_gen_verify_bit_map at the last sweep.
 951     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
 952       if (_collector->verifying()) {
 953         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
 954         // Object is marked in the dead_map bitmap at the previous sweep
 955         // when we know that it's dead; if the bitmap is not allocated then
 956         // the object is alive.
 957         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
 958                || !dead_map->par_isMarked((HeapWord*) p);
 959       } else {
 960         return false; // We can't say for sure if it's live, so we say that it's dead.
 961       }
 962     }
 963   }
 964   return true;
 965 }
 966 
 967 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
 968   FreeChunk* fc = (FreeChunk*)p;
 969   assert(is_in_reserved(p), "Should be in space");
 970   assert(_bt.block_start(p) == p, "Should be a block boundary");
 971   if (!fc->isFree()) {
 972     // Ignore mark word because it may have been used to
 973     // chain together promoted objects (the last one
 974     // would have a null value).
 975     assert(oop(p)->is_oop(true), "Should be an oop");
 976     return true;
 977   }
 978   return false;
 979 }
 980 
 981 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
 982 // approximate answer if you don't hold the freelistlock when you call this.
 983 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
 984   size_t size = 0;
 985   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 986     debug_only(
 987       // We may be calling here without the lock in which case we
 988       // won't do this modest sanity check.
 989       if (freelistLock()->owned_by_self()) {
 990         size_t total_list_size = 0;
 991         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 992           fc = fc->next()) {
 993           total_list_size += i;
 994         }
 995         assert(total_list_size == i * _indexedFreeList[i].count(),
 996                "Count in list is incorrect");
 997       }
 998     )
 999     size += i * _indexedFreeList[i].count();
1000   }
1001   return size;
1002 }
1003 
1004 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1005   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1006   return allocate(size);
1007 }
1008 
1009 HeapWord*
1010 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1011   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1012 }
1013 
1014 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1015   assert_lock_strong(freelistLock());
1016   HeapWord* res = NULL;
1017   assert(size == adjustObjectSize(size),
1018          "use adjustObjectSize() before calling into allocate()");
1019   
1020   if (_adaptive_freelists) {
1021     res = allocate_adaptive_freelists(size);
1022   } else {  // non-adaptive free lists
1023     res = allocate_non_adaptive_freelists(size);
1024   }
1025   
1026   if (res != NULL) {
1027     // check that res does lie in this space!
1028     assert(is_in_reserved(res), "Not in this space!");
1029     assert(is_aligned((void*)res), "alignment check");
1030 
1031     FreeChunk* fc = (FreeChunk*)res;
1032     fc->markNotFree();
1033     assert(!fc->isFree(), "shouldn't be marked free");
1034     assert(oop(fc)->klass_or_null() == NULL, "should look uninitialized");
1035     // Verify that the block offset table shows this to
1036     // be a single block, but not one which is unallocated.
1037     _bt.verify_single_block(res, size); 
1038     _bt.verify_not_unallocated(res, size);
1039     // mangle a just allocated object with a distinct pattern.
1040     debug_only(fc->mangleAllocated(size));
1041   }
1042   
1043   return res;
1044 }
1045 
1046 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1047   HeapWord* res = NULL;
1048   // try and use linear allocation for smaller blocks
1049   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1050     // if successful, the following also adjusts block offset table
1051     res = getChunkFromSmallLinearAllocBlock(size);
1052   }
1053   // Else triage to indexed lists for smaller sizes
1054   if (res == NULL) {
1055     if (size < SmallForDictionary) {
1056       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1057     } else { 
1058       // else get it from the big dictionary; if even this doesn't
1059       // work we are out of luck.
1060       res = (HeapWord*)getChunkFromDictionaryExact(size);
1061     }
1062   }
1063 
1064   return res;
1065 }
1066 
1067 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1068   assert_lock_strong(freelistLock());
1069   HeapWord* res = NULL;
1070   assert(size == adjustObjectSize(size),
1071          "use adjustObjectSize() before calling into allocate()");
1072   
1073   // Strategy
1074   //   if small
1075   //     exact size from small object indexed list if small
1076   //     small or large linear allocation block (linAB) as appropriate
1077   //     take from lists of greater sized chunks
1078   //   else
1079   //     dictionary
1080   //     small or large linear allocation block if it has the space
1081   // Try allocating exact size from indexTable first
1082   if (size < IndexSetSize) {
1083     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1084     if(res != NULL) {
1085       assert(res != (HeapWord*)_indexedFreeList[size].head(), 
1086         "Not removed from free list");
1087       // no block offset table adjustment is necessary on blocks in
1088       // the indexed lists.
1089 
1090     // Try allocating from the small LinAB
1091     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1092         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1093         // if successful, the above also adjusts block offset table
1094         // Note that this call will refill the LinAB to 
1095         // satisfy the request.  This is different that
1096         // evm.  
1097         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1098   
1099     } else {
1100       // Raid the exact free lists larger than size, even if they are not
1101       // overpopulated.
1102       res = (HeapWord*) getChunkFromGreater(size);
1103     }
1104   } else {
1105     // Big objects get allocated directly from the dictionary.
1106     res = (HeapWord*) getChunkFromDictionaryExact(size);
1107     if (res == NULL) {
1108       // Try hard not to fail since an allocation failure will likely
1109       // trigger a synchronous GC.  Try to get the space from the 
1110       // allocation blocks.
1111       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1112     }
1113   }
1114   
1115   return res;
1116 }
1117 
1118 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1119 // when promoting obj.
1120 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1121   // Depending on the object size, expansion may require refilling either a
1122   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1123   // is added because the dictionary may over-allocate to avoid fragmentation.
1124   size_t space = obj_size;
1125   if (!_adaptive_freelists) {
1126     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1127   }
1128   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1129   return space;
1130 }
1131 
1132 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1133   FreeChunk* ret;
1134 
1135   assert(numWords >= MinChunkSize, "Size is less than minimum");
1136   assert(linearAllocationWouldFail() || bestFitFirst(),
1137     "Should not be here");
1138 
1139   size_t i;
1140   size_t currSize = numWords + MinChunkSize;
1141   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1142   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1143     FreeList* fl = &_indexedFreeList[i];
1144     if (fl->head()) {
1145       ret = getFromListGreater(fl, numWords);
1146       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1147       return ret;
1148     }
1149   }
1150 
1151   currSize = MAX2((size_t)SmallForDictionary,
1152                   (size_t)(numWords + MinChunkSize));
1153 
1154   /* Try to get a chunk that satisfies request, while avoiding
1155      fragmentation that can't be handled. */
1156   {
1157     ret =  dictionary()->getChunk(currSize);
1158     if (ret != NULL) {
1159       assert(ret->size() - numWords >= MinChunkSize,
1160              "Chunk is too small");
1161       _bt.allocated((HeapWord*)ret, ret->size());
1162       /* Carve returned chunk. */
1163       (void) splitChunkAndReturnRemainder(ret, numWords);
1164       /* Label this as no longer a free chunk. */
1165       assert(ret->isFree(), "This chunk should be free");
1166       ret->linkPrev(NULL);
1167     }
1168     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1169     return ret;
1170   }
1171   ShouldNotReachHere();
1172 }
1173 
1174 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 
1175   const {
1176   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1177   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1178 }
1179 
1180 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1181   if (fc->size() >= IndexSetSize) {
1182     return dictionary()->verifyChunkInFreeLists(fc);
1183   } else {
1184     return verifyChunkInIndexedFreeLists(fc);
1185   }
1186 }
1187 
1188 #ifndef PRODUCT
1189 void CompactibleFreeListSpace::assert_locked() const {
1190   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1191 }
1192 #endif
1193 
1194 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1195   // In the parallel case, the main thread holds the free list lock
1196   // on behalf the parallel threads.
1197   assert_locked();
1198   FreeChunk* fc;
1199   {
1200     // If GC is parallel, this might be called by several threads.
1201     // This should be rare enough that the locking overhead won't affect
1202     // the sequential code.
1203     MutexLockerEx x(parDictionaryAllocLock(),
1204                     Mutex::_no_safepoint_check_flag);
1205     fc = getChunkFromDictionary(size);
1206   }
1207   if (fc != NULL) {
1208     fc->dontCoalesce();
1209     assert(fc->isFree(), "Should be free, but not coalescable");
1210     // Verify that the block offset table shows this to
1211     // be a single block, but not one which is unallocated.
1212     _bt.verify_single_block((HeapWord*)fc, fc->size());
1213     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1214   }
1215   return fc;
1216 }
1217 
1218 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size) {
1219   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1220   assert_locked();
1221 
1222   // if we are tracking promotions, then first ensure space for
1223   // promotion (including spooling space for saving header if necessary).
1224   // then allocate and copy, then track promoted info if needed.
1225   // When tracking (see PromotionInfo::track()), the mark word may
1226   // be displaced and in this case restoration of the mark word
1227   // occurs in the (oop_since_save_marks_)iterate phase.
1228   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1229     return NULL;
1230   }
1231   // Call the allocate(size_t, bool) form directly to avoid the
1232   // additional call through the allocate(size_t) form.  Having
1233   // the compile inline the call is problematic because allocate(size_t)
1234   // is a virtual method.
1235   HeapWord* res = allocate(adjustObjectSize(obj_size));
1236   if (res != NULL) {
1237     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1238     // if we should be tracking promotions, do so.
1239     if (_promoInfo.tracking()) {
1240         _promoInfo.track((PromotedObject*)res);
1241     }
1242   }
1243   return oop(res);
1244 }
1245 
1246 HeapWord*
1247 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1248   assert_locked();
1249   assert(size >= MinChunkSize, "minimum chunk size");
1250   assert(size <  _smallLinearAllocBlock._allocation_size_limit, 
1251     "maximum from smallLinearAllocBlock");
1252   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1253 }
1254 
1255 HeapWord*
1256 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1257                                                        size_t size) {
1258   assert_locked();
1259   assert(size >= MinChunkSize, "too small");
1260   HeapWord* res = NULL;
1261   // Try to do linear allocation from blk, making sure that
1262   if (blk->_word_size == 0) {
1263     // We have probably been unable to fill this either in the prologue or
1264     // when it was exhausted at the last linear allocation. Bail out until
1265     // next time.
1266     assert(blk->_ptr == NULL, "consistency check");
1267     return NULL;
1268   }
1269   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1270   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1271   if (res != NULL) return res;
1272 
1273   // about to exhaust this linear allocation block
1274   if (blk->_word_size == size) { // exactly satisfied
1275     res = blk->_ptr;
1276     _bt.allocated(res, blk->_word_size);
1277   } else if (size + MinChunkSize <= blk->_refillSize) {
1278     // Update _unallocated_block if the size is such that chunk would be
1279     // returned to the indexed free list.  All other chunks in the indexed
1280     // free lists are allocated from the dictionary so that _unallocated_block
1281     // has already been adjusted for them.  Do it here so that the cost
1282     // for all chunks added back to the indexed free lists.
1283     if (blk->_word_size < SmallForDictionary) {
1284       _bt.allocated(blk->_ptr, blk->_word_size);
1285     }
1286     // Return the chunk that isn't big enough, and then refill below.
1287     addChunkToFreeLists(blk->_ptr, blk->_word_size);
1288     _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
1289     // Don't keep statistics on adding back chunk from a LinAB.
1290   } else {
1291     // A refilled block would not satisfy the request.
1292     return NULL;
1293   }
1294 
1295   blk->_ptr = NULL; blk->_word_size = 0;
1296   refillLinearAllocBlock(blk);
1297   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1298          "block was replenished");
1299   if (res != NULL) {
1300     splitBirth(size);
1301     repairLinearAllocBlock(blk);
1302   } else if (blk->_ptr != NULL) {
1303     res = blk->_ptr;
1304     size_t blk_size = blk->_word_size;
1305     blk->_word_size -= size;
1306     blk->_ptr  += size;
1307     splitBirth(size);
1308     repairLinearAllocBlock(blk);
1309     // Update BOT last so that other (parallel) GC threads see a consistent
1310     // view of the BOT and free blocks.
1311     // Above must occur before BOT is updated below.
1312     _bt.split_block(res, blk_size, size);  // adjust block offset table
1313   }
1314   return res;
1315 }
1316 
1317 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1318                                         LinearAllocBlock* blk, 
1319                                         size_t size) {
1320   assert_locked();
1321   assert(size >= MinChunkSize, "too small");
1322 
1323   HeapWord* res = NULL;
1324   // This is the common case.  Keep it simple.
1325   if (blk->_word_size >= size + MinChunkSize) {
1326     assert(blk->_ptr != NULL, "consistency check");
1327     res = blk->_ptr;
1328     // Note that the BOT is up-to-date for the linAB before allocation.  It
1329     // indicates the start of the linAB.  The split_block() updates the
1330     // BOT for the linAB after the allocation (indicates the start of the
1331     // next chunk to be allocated).
1332     size_t blk_size = blk->_word_size;
1333     blk->_word_size -= size;
1334     blk->_ptr  += size;
1335     splitBirth(size);
1336     repairLinearAllocBlock(blk);
1337     // Update BOT last so that other (parallel) GC threads see a consistent
1338     // view of the BOT and free blocks.
1339     // Above must occur before BOT is updated below.
1340     _bt.split_block(res, blk_size, size);  // adjust block offset table
1341     _bt.allocated(res, size);
1342   } 
1343   return res;
1344 }
1345 
1346 FreeChunk* 
1347 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1348   assert_locked();
1349   assert(size < SmallForDictionary, "just checking");
1350   FreeChunk* res;
1351   res = _indexedFreeList[size].getChunkAtHead();
1352   if (res == NULL) {
1353     res = getChunkFromIndexedFreeListHelper(size);
1354   }
1355   _bt.verify_not_unallocated((HeapWord*) res, size);
1356   return res;
1357 }
1358 
1359 FreeChunk*
1360 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
1361   assert_locked();
1362   FreeChunk* fc = NULL;
1363   if (size < SmallForDictionary) {
1364     assert(_indexedFreeList[size].head() == NULL ||
1365       _indexedFreeList[size].surplus() <= 0,
1366       "List for this size should be empty or under populated");
1367     // Try best fit in exact lists before replenishing the list
1368     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1369       // Replenish list.
1370       //
1371       // Things tried that failed.
1372       //   Tried allocating out of the two LinAB's first before 
1373       // replenishing lists.  
1374       //   Tried small linAB of size 256 (size in indexed list)
1375       // and replenishing indexed lists from the small linAB.
1376       //
1377       FreeChunk* newFc = NULL;
1378       size_t replenish_size = CMSIndexedFreeListReplenish * size;
1379       if (replenish_size < SmallForDictionary) {
1380         // Do not replenish from an underpopulated size.
1381         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1382             _indexedFreeList[replenish_size].head() != NULL) {
1383           newFc = 
1384             _indexedFreeList[replenish_size].getChunkAtHead();
1385         } else {
1386           newFc = bestFitSmall(replenish_size);
1387         }
1388       }
1389       if (newFc != NULL) {
1390         splitDeath(replenish_size);
1391       } else if (replenish_size > size) {
1392         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1393         newFc = 
1394           getChunkFromIndexedFreeListHelper(replenish_size);
1395       }
1396       if (newFc != NULL) {
1397         assert(newFc->size() == replenish_size, "Got wrong size");
1398         size_t i;
1399         FreeChunk *curFc, *nextFc;
1400         // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
1401         // The last chunk is not added to the lists but is returned as the
1402         // free chunk.
1403         for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), 
1404              i = 0;
1405              i < (CMSIndexedFreeListReplenish - 1);
1406              curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), 
1407              i++) {
1408           curFc->setSize(size);
1409           // Don't record this as a return in order to try and
1410           // determine the "returns" from a GC.
1411           _bt.verify_not_unallocated((HeapWord*) fc, size);
1412           _indexedFreeList[size].returnChunkAtTail(curFc, false);
1413           _bt.mark_block((HeapWord*)curFc, size);
1414           splitBirth(size);
1415           // Don't record the initial population of the indexed list
1416           // as a split birth.
1417         }
1418 
1419         // check that the arithmetic was OK above
1420         assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
1421           "inconsistency in carving newFc");
1422         curFc->setSize(size);
1423         _bt.mark_block((HeapWord*)curFc, size);
1424         splitBirth(size);
1425         return curFc;
1426       }
1427     }
1428   } else {
1429     // Get a free chunk from the free chunk dictionary to be returned to
1430     // replenish the indexed free list.
1431     fc = getChunkFromDictionaryExact(size);
1432   }
1433   assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1434   return fc;
1435 }
1436 
1437 FreeChunk*
1438 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1439   assert_locked();
1440   FreeChunk* fc = _dictionary->getChunk(size);
1441   if (fc == NULL) {
1442     return NULL;
1443   }
1444   _bt.allocated((HeapWord*)fc, fc->size());
1445   if (fc->size() >= size + MinChunkSize) {
1446     fc = splitChunkAndReturnRemainder(fc, size);
1447   }
1448   assert(fc->size() >= size, "chunk too small");
1449   assert(fc->size() < size + MinChunkSize, "chunk too big");
1450   _bt.verify_single_block((HeapWord*)fc, fc->size());
1451   return fc;
1452 }
1453 
1454 FreeChunk*
1455 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1456   assert_locked();
1457   FreeChunk* fc = _dictionary->getChunk(size);
1458   if (fc == NULL) {
1459     return fc;
1460   }
1461   _bt.allocated((HeapWord*)fc, fc->size());
1462   if (fc->size() == size) {
1463     _bt.verify_single_block((HeapWord*)fc, size);
1464     return fc;
1465   }
1466   assert(fc->size() > size, "getChunk() guarantee");
1467   if (fc->size() < size + MinChunkSize) {
1468     // Return the chunk to the dictionary and go get a bigger one.
1469     returnChunkToDictionary(fc);
1470     fc = _dictionary->getChunk(size + MinChunkSize); 
1471     if (fc == NULL) {
1472       return NULL;
1473     }
1474     _bt.allocated((HeapWord*)fc, fc->size());
1475   }
1476   assert(fc->size() >= size + MinChunkSize, "tautology");
1477   fc = splitChunkAndReturnRemainder(fc, size);
1478   assert(fc->size() == size, "chunk is wrong size");
1479   _bt.verify_single_block((HeapWord*)fc, size);
1480   return fc;
1481 }
1482 
1483 void
1484 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1485   assert_locked();
1486 
1487   size_t size = chunk->size();
1488   _bt.verify_single_block((HeapWord*)chunk, size);
1489   // adjust _unallocated_block downward, as necessary
1490   _bt.freed((HeapWord*)chunk, size);
1491   _dictionary->returnChunk(chunk);
1492 }
1493 
1494 void
1495 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1496   assert_locked();
1497   size_t size = fc->size();
1498   _bt.verify_single_block((HeapWord*) fc, size);
1499   _bt.verify_not_unallocated((HeapWord*) fc, size);
1500   if (_adaptive_freelists) {
1501     _indexedFreeList[size].returnChunkAtTail(fc);
1502   } else {
1503     _indexedFreeList[size].returnChunkAtHead(fc);
1504   }
1505 }
1506 
1507 // Add chunk to end of last block -- if it's the largest
1508 // block -- and update BOT and census data. We would
1509 // of course have preferred to coalesce it with the
1510 // last block, but it's currently less expensive to find the
1511 // largest block than it is to find the last.
1512 void
1513 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1514   HeapWord* chunk, size_t     size) {
1515   // check that the chunk does lie in this space!
1516   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1517   assert_locked();
1518   // One of the parallel gc task threads may be here
1519   // whilst others are allocating.
1520   Mutex* lock = NULL;
1521   if (ParallelGCThreads != 0) {
1522     lock = &_parDictionaryAllocLock;
1523   }
1524   FreeChunk* ec;
1525   {
1526     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1527     ec = dictionary()->findLargestDict();  // get largest block
1528     if (ec != NULL && ec->end() == chunk) {
1529       // It's a coterminal block - we can coalesce.
1530       size_t old_size = ec->size();
1531       coalDeath(old_size);
1532       removeChunkFromDictionary(ec);
1533       size += old_size;
1534     } else {
1535       ec = (FreeChunk*)chunk;
1536     }
1537   }
1538   ec->setSize(size);
1539   debug_only(ec->mangleFreed(size));
1540   if (size < SmallForDictionary) {
1541     lock = _indexedFreeListParLocks[size];
1542   }
1543   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1544   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1545   // record the birth under the lock since the recording involves
1546   // manipulation of the list on which the chunk lives and
1547   // if the chunk is allocated and is the last on the list,
1548   // the list can go away.
1549   coalBirth(size);
1550 }
1551 
1552 void
1553 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1554                                               size_t     size) {
1555   // check that the chunk does lie in this space!
1556   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1557   assert_locked();
1558   _bt.verify_single_block(chunk, size);
1559 
1560   FreeChunk* fc = (FreeChunk*) chunk;
1561   fc->setSize(size);
1562   debug_only(fc->mangleFreed(size));
1563   if (size < SmallForDictionary) {
1564     returnChunkToFreeList(fc);
1565   } else {
1566     returnChunkToDictionary(fc);
1567   }
1568 }
1569 
1570 void
1571 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1572   size_t size, bool coalesced) {
1573   assert_locked();
1574   assert(chunk != NULL, "null chunk");
1575   if (coalesced) {
1576     // repair BOT
1577     _bt.single_block(chunk, size);
1578   }
1579   addChunkToFreeLists(chunk, size);
1580 }
1581 
1582 // We _must_ find the purported chunk on our free lists;
1583 // we assert if we don't.
1584 void
1585 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1586   size_t size = fc->size();
1587   assert_locked();
1588   debug_only(verifyFreeLists());
1589   if (size < SmallForDictionary) {
1590     removeChunkFromIndexedFreeList(fc);
1591   } else {
1592     removeChunkFromDictionary(fc);
1593   }
1594   _bt.verify_single_block((HeapWord*)fc, size);
1595   debug_only(verifyFreeLists());
1596 }
1597 
1598 void
1599 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1600   size_t size = fc->size();
1601   assert_locked();
1602   assert(fc != NULL, "null chunk");
1603   _bt.verify_single_block((HeapWord*)fc, size);
1604   _dictionary->removeChunk(fc);
1605   // adjust _unallocated_block upward, as necessary
1606   _bt.allocated((HeapWord*)fc, size);
1607 }
1608 
1609 void
1610 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1611   assert_locked();
1612   size_t size = fc->size();
1613   _bt.verify_single_block((HeapWord*)fc, size);
1614   NOT_PRODUCT(
1615     if (FLSVerifyIndexTable) {
1616       verifyIndexedFreeList(size);
1617     }
1618   )
1619   _indexedFreeList[size].removeChunk(fc);
1620   debug_only(fc->clearNext());
1621   debug_only(fc->clearPrev());
1622   NOT_PRODUCT(
1623     if (FLSVerifyIndexTable) {
1624       verifyIndexedFreeList(size);
1625     }
1626   )
1627 }
1628 
1629 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1630   /* A hint is the next larger size that has a surplus.
1631      Start search at a size large enough to guarantee that
1632      the excess is >= MIN_CHUNK. */
1633   size_t start = align_object_size(numWords + MinChunkSize);
1634   if (start < IndexSetSize) {
1635     FreeList* it   = _indexedFreeList;
1636     size_t    hint = _indexedFreeList[start].hint();
1637     while (hint < IndexSetSize) {
1638       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1639       FreeList *fl = &_indexedFreeList[hint];
1640       if (fl->surplus() > 0 && fl->head() != NULL) {
1641         // Found a list with surplus, reset original hint
1642         // and split out a free chunk which is returned.
1643         _indexedFreeList[start].set_hint(hint);
1644         FreeChunk* res = getFromListGreater(fl, numWords);
1645         assert(res == NULL || res->isFree(), 
1646           "Should be returning a free chunk");
1647         return res;
1648       }
1649       hint = fl->hint(); /* keep looking */
1650     }
1651     /* None found. */
1652     it[start].set_hint(IndexSetSize);
1653   }
1654   return NULL;
1655 }
1656 
1657 /* Requires fl->size >= numWords + MinChunkSize */
1658 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1659   size_t numWords) {
1660   FreeChunk *curr = fl->head();
1661   size_t oldNumWords = curr->size();
1662   assert(numWords >= MinChunkSize, "Word size is too small");
1663   assert(curr != NULL, "List is empty");
1664   assert(oldNumWords >= numWords + MinChunkSize, 
1665         "Size of chunks in the list is too small");
1666  
1667   fl->removeChunk(curr);
1668   // recorded indirectly by splitChunkAndReturnRemainder - 
1669   // smallSplit(oldNumWords, numWords);
1670   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1671   // Does anything have to be done for the remainder in terms of
1672   // fixing the card table?
1673   assert(new_chunk == NULL || new_chunk->isFree(), 
1674     "Should be returning a free chunk");
1675   return new_chunk;
1676 }
1677 
1678 FreeChunk*
1679 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1680   size_t new_size) {
1681   assert_locked();
1682   size_t size = chunk->size();
1683   assert(size > new_size, "Split from a smaller block?");
1684   assert(is_aligned(chunk), "alignment problem");
1685   assert(size == adjustObjectSize(size), "alignment problem");
1686   size_t rem_size = size - new_size;
1687   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1688   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1689   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1690   assert(is_aligned(ffc), "alignment problem");
1691   ffc->setSize(rem_size);
1692   ffc->linkNext(NULL);
1693   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1694   // Above must occur before BOT is updated below.
1695   // adjust block offset table
1696   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1697   if (rem_size < SmallForDictionary) {
1698     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1699     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1700     returnChunkToFreeList(ffc);
1701     split(size, rem_size);
1702     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1703   } else {
1704     returnChunkToDictionary(ffc);
1705     split(size ,rem_size);
1706   }
1707   chunk->setSize(new_size);
1708   return chunk;
1709 }
1710 
1711 void
1712 CompactibleFreeListSpace::sweep_completed() {
1713   // Now that space is probably plentiful, refill linear
1714   // allocation blocks as needed.
1715   refillLinearAllocBlocksIfNeeded();
1716 }
1717 
1718 void
1719 CompactibleFreeListSpace::gc_prologue() {
1720   assert_locked();
1721   if (PrintFLSStatistics != 0) {
1722     gclog_or_tty->print("Before GC:\n");
1723     reportFreeListStatistics();
1724   }
1725   refillLinearAllocBlocksIfNeeded();
1726 }
1727 
1728 void
1729 CompactibleFreeListSpace::gc_epilogue() {
1730   assert_locked();
1731   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1732     if (_smallLinearAllocBlock._word_size == 0)
1733       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1734   }
1735   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1736   _promoInfo.stopTrackingPromotions();
1737   repairLinearAllocationBlocks();
1738   // Print Space's stats
1739   if (PrintFLSStatistics != 0) {
1740     gclog_or_tty->print("After GC:\n");
1741     reportFreeListStatistics();
1742   }
1743 }
1744 
1745 // Iteration support, mostly delegated from a CMS generation
1746 
1747 void CompactibleFreeListSpace::save_marks() {
1748   // mark the "end" of the used space at the time of this call;
1749   // note, however, that promoted objects from this point
1750   // on are tracked in the _promoInfo below.
1751   set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 
1752                       unallocated_block() : end());
1753   // inform allocator that promotions should be tracked.
1754   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1755   _promoInfo.startTrackingPromotions();
1756 }
1757 
1758 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1759   assert(_promoInfo.tracking(), "No preceding save_marks?");
1760   guarantee(SharedHeap::heap()->n_par_threads() == 0,
1761             "Shouldn't be called (yet) during parallel part of gc.");
1762   return _promoInfo.noPromotions();
1763 }
1764 
1765 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1766                                                                             \
1767 void CompactibleFreeListSpace::                                             \
1768 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1769   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1770          "Shouldn't be called (yet) during parallel part of gc.");          \
1771   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1772   /*                                                                        \
1773    * This also restores any displaced headers and removes the elements from \
1774    * the iteration set as they are processed, so that we have a clean slate \
1775    * at the end of the iteration. Note, thus, that if new objects are       \
1776    * promoted as a result of the iteration they are iterated over as well.  \
1777    */                                                                       \
1778   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1779 }
1780 
1781 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1782 
1783 //////////////////////////////////////////////////////////////////////////////
1784 // We go over the list of promoted objects, removing each from the list,    
1785 // and applying the closure (this may, in turn, add more elements to  
1786 // the tail of the promoted list, and these newly added objects will 
1787 // also be processed) until the list is empty.                      
1788 // To aid verification and debugging, in the non-product builds
1789 // we actually forward _promoHead each time we process a promoted oop.
1790 // Note that this is not necessary in general (i.e. when we don't need to
1791 // call PromotionInfo::verify()) because oop_iterate can only add to the
1792 // end of _promoTail, and never needs to look at _promoHead.
1793 
1794 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
1795                                                                             \
1796 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
1797   NOT_PRODUCT(verify());                                                    \
1798   PromotedObject *curObj, *nextObj;                                         \
1799   for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
1800     if ((nextObj = curObj->next()) == NULL) {                               \
1801       /* protect ourselves against additions due to closure application     \
1802          below by resetting the list.  */                                   \
1803       assert(_promoTail == curObj, "Should have been the tail");            \
1804       _promoHead = _promoTail = NULL;                                       \
1805     }                                                                       \
1806     if (curObj->hasDisplacedMark()) {                                       \
1807       /* restore displaced header */                                        \
1808       oop(curObj)->set_mark(nextDisplacedHeader());                         \
1809     } else {                                                                \
1810       /* restore prototypical header */                                     \
1811       oop(curObj)->init_mark();                                             \
1812     }                                                                       \
1813     /* The "promoted_mark" should now not be set */                         \
1814     assert(!curObj->hasPromotedMark(),                                      \
1815            "Should have been cleared by restoring displaced mark-word");    \
1816     NOT_PRODUCT(_promoHead = nextObj);                                      \
1817     if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
1818     if (nextObj == NULL) { /* start at head of list reset above */          \
1819       nextObj = _promoHead;                                                 \
1820     }                                                                       \
1821   }                                                                         \
1822   assert(noPromotions(), "post-condition violation");                       \
1823   assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1824   assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
1825   assert(_firstIndex == _nextIndex, "empty buffer");                        \
1826 }
1827 
1828 // This should have been ALL_SINCE_...() just like the others,
1829 // but, because the body of the method above is somehwat longer,
1830 // the MSVC compiler cannot cope; as a workaround, we split the
1831 // macro into its 3 constituent parts below (see original macro
1832 // definition in specializedOopClosures.hpp).
1833 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1834 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1835 
1836 
1837 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1838   // ugghh... how would one do this efficiently for a non-contiguous space?
1839   guarantee(false, "NYI");
1840 }
1841 
1842 bool CompactibleFreeListSpace::linearAllocationWouldFail() const {
1843   return _smallLinearAllocBlock._word_size == 0;
1844 }
1845 
1846 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1847   // Fix up linear allocation blocks to look like free blocks
1848   repairLinearAllocBlock(&_smallLinearAllocBlock);
1849 }
1850 
1851 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1852   assert_locked();
1853   if (blk->_ptr != NULL) {
1854     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1855            "Minimum block size requirement");
1856     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1857     fc->setSize(blk->_word_size);
1858     fc->linkPrev(NULL);   // mark as free
1859     fc->dontCoalesce();
1860     assert(fc->isFree(), "just marked it free");
1861     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1862   }
1863 }
1864 
1865 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1866   assert_locked();
1867   if (_smallLinearAllocBlock._ptr == NULL) {
1868     assert(_smallLinearAllocBlock._word_size == 0, 
1869       "Size of linAB should be zero if the ptr is NULL");
1870     // Reset the linAB refill and allocation size limit.
1871     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1872   }
1873   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1874 }
1875 
1876 void
1877 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1878   assert_locked();
1879   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1880          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1881          "blk invariant");
1882   if (blk->_ptr == NULL) {
1883     refillLinearAllocBlock(blk);
1884   }
1885   if (PrintMiscellaneous && Verbose) {
1886     if (blk->_word_size == 0) {
1887       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1888     }
1889   }
1890 }
1891 
1892 void
1893 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1894   assert_locked();
1895   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1896          "linear allocation block should be empty");
1897   FreeChunk* fc;
1898   if (blk->_refillSize < SmallForDictionary && 
1899       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1900     // A linAB's strategy might be to use small sizes to reduce
1901     // fragmentation but still get the benefits of allocation from a
1902     // linAB.
1903   } else {
1904     fc = getChunkFromDictionary(blk->_refillSize);
1905   }
1906   if (fc != NULL) {
1907     blk->_ptr  = (HeapWord*)fc;
1908     blk->_word_size = fc->size();
1909     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1910   }
1911 }
1912 
1913 // Support for concurrent collection policy decisions.
1914 bool CompactibleFreeListSpace::should_concurrent_collect() const {
1915   // In the future we might want to add in frgamentation stats --
1916   // including erosion of the "mountain" into this decision as well.
1917   return !adaptive_freelists() && linearAllocationWouldFail();
1918 }
1919 
1920 // Support for compaction
1921 
1922 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1923   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1924   // prepare_for_compaction() uses the space between live objects
1925   // so that later phase can skip dead space quickly.  So verification
1926   // of the free lists doesn't work after.
1927 }
1928 
1929 #define obj_size(q) adjustObjectSize(oop(q)->size())
1930 #define adjust_obj_size(s) adjustObjectSize(s)
1931 
1932 void CompactibleFreeListSpace::adjust_pointers() {
1933   // In other versions of adjust_pointers(), a bail out
1934   // based on the amount of live data in the generation
1935   // (i.e., if 0, bail out) may be used.
1936   // Cannot test used() == 0 here because the free lists have already
1937   // been mangled by the compaction.
1938 
1939   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1940   // See note about verification in prepare_for_compaction().
1941 }
1942 
1943 void CompactibleFreeListSpace::compact() {
1944   SCAN_AND_COMPACT(obj_size);
1945 }
1946 
1947 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1948 // where fbs is free block sizes
1949 double CompactibleFreeListSpace::flsFrag() const {
1950   size_t itabFree = totalSizeInIndexedFreeLists();
1951   double frag = 0.0;
1952   size_t i;
1953 
1954   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1955     double sz  = i;
1956     frag      += _indexedFreeList[i].count() * (sz * sz);
1957   }
1958 
1959   double totFree = itabFree +
1960                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
1961   if (totFree > 0) {
1962     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / 
1963             (totFree * totFree));
1964     frag = (double)1.0  - frag;
1965   } else {
1966     assert(frag == 0.0, "Follows from totFree == 0");
1967   }
1968   return frag;
1969 }
1970 
1971 #define CoalSurplusPercent 1.05
1972 #define SplitSurplusPercent 1.10
1973 
1974 void CompactibleFreeListSpace::beginSweepFLCensus(
1975   float inter_sweep_current,
1976   float inter_sweep_estimate) {
1977   assert_locked();
1978   size_t i;
1979   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1980     FreeList* fl    = &_indexedFreeList[i];
1981     fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
1982     fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
1983     fl->set_beforeSweep(fl->count());
1984     fl->set_bfrSurp(fl->surplus());
1985   }
1986   _dictionary->beginSweepDictCensus(CoalSurplusPercent,
1987                                     inter_sweep_current,
1988                                     inter_sweep_estimate);
1989 }
1990 
1991 void CompactibleFreeListSpace::setFLSurplus() {
1992   assert_locked();
1993   size_t i;
1994   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1995     FreeList *fl = &_indexedFreeList[i];
1996     fl->set_surplus(fl->count() - 
1997                     (ssize_t)((double)fl->desired() * SplitSurplusPercent));
1998   }
1999 }
2000 
2001 void CompactibleFreeListSpace::setFLHints() {
2002   assert_locked();
2003   size_t i;
2004   size_t h = IndexSetSize;
2005   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
2006     FreeList *fl = &_indexedFreeList[i];
2007     fl->set_hint(h);
2008     if (fl->surplus() > 0) {
2009       h = i;
2010     }
2011   }
2012 }
2013 
2014 void CompactibleFreeListSpace::clearFLCensus() {
2015   assert_locked();
2016   int i;
2017   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2018     FreeList *fl = &_indexedFreeList[i];
2019     fl->set_prevSweep(fl->count());
2020     fl->set_coalBirths(0);
2021     fl->set_coalDeaths(0);
2022     fl->set_splitBirths(0);
2023     fl->set_splitDeaths(0);  
2024   }
2025 }
2026 
2027 void CompactibleFreeListSpace::endSweepFLCensus(size_t sweep_count) {
2028   setFLSurplus();
2029   setFLHints();
2030   if (PrintGC && PrintFLSCensus > 0) {
2031     printFLCensus(sweep_count);
2032   }
2033   clearFLCensus();
2034   assert_locked();
2035   _dictionary->endSweepDictCensus(SplitSurplusPercent);
2036 }
2037 
2038 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2039   if (size < SmallForDictionary) {
2040     FreeList *fl = &_indexedFreeList[size];
2041     return (fl->coalDesired() < 0) ||
2042            ((int)fl->count() > fl->coalDesired());
2043   } else {
2044     return dictionary()->coalDictOverPopulated(size);
2045   }
2046 }
2047 
2048 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2049   assert(size < SmallForDictionary, "Size too large for indexed list");
2050   FreeList *fl = &_indexedFreeList[size];
2051   fl->increment_coalBirths();
2052   fl->increment_surplus();
2053 }
2054 
2055 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2056   assert(size < SmallForDictionary, "Size too large for indexed list");
2057   FreeList *fl = &_indexedFreeList[size];
2058   fl->increment_coalDeaths();
2059   fl->decrement_surplus();
2060 }
2061 
2062 void CompactibleFreeListSpace::coalBirth(size_t size) {
2063   if (size  < SmallForDictionary) {
2064     smallCoalBirth(size);
2065   } else {
2066     dictionary()->dictCensusUpdate(size, 
2067                                    false /* split */, 
2068                                    true /* birth */);
2069   }
2070 }
2071 
2072 void CompactibleFreeListSpace::coalDeath(size_t size) {
2073   if(size  < SmallForDictionary) {
2074     smallCoalDeath(size);
2075   } else {
2076     dictionary()->dictCensusUpdate(size, 
2077                                    false /* split */, 
2078                                    false /* birth */);
2079   }
2080 }
2081 
2082 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2083   assert(size < SmallForDictionary, "Size too large for indexed list");
2084   FreeList *fl = &_indexedFreeList[size];
2085   fl->increment_splitBirths();
2086   fl->increment_surplus();
2087 }
2088 
2089 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2090   assert(size < SmallForDictionary, "Size too large for indexed list");
2091   FreeList *fl = &_indexedFreeList[size];
2092   fl->increment_splitDeaths();
2093   fl->decrement_surplus();
2094 }
2095 
2096 void CompactibleFreeListSpace::splitBirth(size_t size) {
2097   if (size  < SmallForDictionary) {
2098     smallSplitBirth(size);
2099   } else {
2100     dictionary()->dictCensusUpdate(size, 
2101                                    true /* split */, 
2102                                    true /* birth */);
2103   }
2104 }
2105 
2106 void CompactibleFreeListSpace::splitDeath(size_t size) {
2107   if (size  < SmallForDictionary) {
2108     smallSplitDeath(size);
2109   } else {
2110     dictionary()->dictCensusUpdate(size, 
2111                                    true /* split */, 
2112                                    false /* birth */);
2113   }
2114 }
2115 
2116 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2117   size_t to2 = from - to1;
2118   splitDeath(from);
2119   splitBirth(to1);
2120   splitBirth(to2);
2121 }
2122 
2123 void CompactibleFreeListSpace::print() const {
2124   tty->print(" CompactibleFreeListSpace");
2125   Space::print();
2126 }
2127 
2128 void CompactibleFreeListSpace::prepare_for_verify() {
2129   assert_locked();
2130   repairLinearAllocationBlocks();
2131   // Verify that the SpoolBlocks look like free blocks of
2132   // appropriate sizes... To be done ...
2133 }
2134 
2135 class VerifyAllBlksClosure: public BlkClosure {
2136  private:
2137   const CompactibleFreeListSpace* _sp;
2138   const MemRegion                 _span;
2139 
2140  public:
2141   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2142     MemRegion span) :  _sp(sp), _span(span) { }
2143 
2144   virtual size_t do_blk(HeapWord* addr) {
2145     size_t res;
2146     if (_sp->block_is_obj(addr)) {
2147       oop p = oop(addr);
2148       guarantee(p->is_oop(), "Should be an oop");
2149       res = _sp->adjustObjectSize(p->size());
2150       if (_sp->obj_is_alive(addr)) {
2151         p->verify();
2152       }
2153     } else {
2154       FreeChunk* fc = (FreeChunk*)addr;
2155       res = fc->size();
2156       if (FLSVerifyLists && !fc->cantCoalesce()) {
2157         guarantee(_sp->verifyChunkInFreeLists(fc),
2158                   "Chunk should be on a free list");
2159       }
2160     }
2161     guarantee(res != 0, "Livelock: no rank reduction!");
2162     return res;
2163   }
2164 };
2165 
2166 class VerifyAllOopsClosure: public OopClosure {
2167  private:
2168   const CMSCollector*             _collector;
2169   const CompactibleFreeListSpace* _sp;
2170   const MemRegion                 _span;
2171   const bool                      _past_remark;
2172   const CMSBitMap*                _bit_map;
2173 
2174  protected:
2175   void do_oop(void* p, oop obj) {
2176     if (_span.contains(obj)) { // the interior oop points into CMS heap
2177       if (!_span.contains(p)) { // reference from outside CMS heap
2178         // Should be a valid object; the first disjunct below allows
2179         // us to sidestep an assertion in block_is_obj() that insists
2180         // that p be in _sp. Note that several generations (and spaces)
2181         // are spanned by _span (CMS heap) above.
2182         guarantee(!_sp->is_in_reserved(obj) ||
2183                   _sp->block_is_obj((HeapWord*)obj),
2184                   "Should be an object");
2185         guarantee(obj->is_oop(), "Should be an oop");
2186         obj->verify();
2187         if (_past_remark) {
2188           // Remark has been completed, the object should be marked
2189           _bit_map->isMarked((HeapWord*)obj);
2190         }
2191       } else { // reference within CMS heap
2192         if (_past_remark) {
2193           // Remark has been completed -- so the referent should have
2194           // been marked, if referring object is.
2195           if (_bit_map->isMarked(_collector->block_start(p))) {
2196             guarantee(_bit_map->isMarked((HeapWord*)obj), "Marking error?");
2197           }
2198         }
2199       }
2200     } else if (_sp->is_in_reserved(p)) {
2201       // the reference is from FLS, and points out of FLS
2202       guarantee(obj->is_oop(), "Should be an oop");
2203       obj->verify();
2204     }
2205   }
2206 
2207   template <class T> void do_oop_work(T* p) {
2208     T heap_oop = oopDesc::load_heap_oop(p);
2209     if (!oopDesc::is_null(heap_oop)) {
2210       oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
2211       do_oop(p, obj);
2212     }
2213   }
2214 
2215  public:
2216   VerifyAllOopsClosure(const CMSCollector* collector,
2217     const CompactibleFreeListSpace* sp, MemRegion span,
2218     bool past_remark, CMSBitMap* bit_map) :
2219     OopClosure(), _collector(collector), _sp(sp), _span(span),
2220     _past_remark(past_remark), _bit_map(bit_map) { }
2221 
2222   virtual void do_oop(oop* p)       { VerifyAllOopsClosure::do_oop_work(p); }
2223   virtual void do_oop(narrowOop* p) { VerifyAllOopsClosure::do_oop_work(p); }
2224 };
2225 
2226 void CompactibleFreeListSpace::verify(bool ignored) const {
2227   assert_lock_strong(&_freelistLock);
2228   verify_objects_initialized();
2229   MemRegion span = _collector->_span;
2230   bool past_remark = (_collector->abstract_state() ==
2231                       CMSCollector::Sweeping);
2232 
2233   ResourceMark rm;
2234   HandleMark  hm;
2235 
2236   // Check integrity of CFL data structures
2237   _promoInfo.verify();
2238   _dictionary->verify();
2239   if (FLSVerifyIndexTable) {
2240     verifyIndexedFreeLists();
2241   }
2242   // Check integrity of all objects and free blocks in space
2243   {
2244     VerifyAllBlksClosure cl(this, span);
2245     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2246   }
2247   // Check that all references in the heap to FLS
2248   // are to valid objects in FLS or that references in
2249   // FLS are to valid objects elsewhere in the heap
2250   if (FLSVerifyAllHeapReferences)
2251   {
2252     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2253       _collector->markBitMap());
2254     CollectedHeap* ch = Universe::heap();
2255     ch->oop_iterate(&cl);              // all oops in generations
2256     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2257   }
2258 
2259   if (VerifyObjectStartArray) {
2260     // Verify the block offset table
2261     _bt.verify();
2262   }
2263 }
2264 
2265 #ifndef PRODUCT
2266 void CompactibleFreeListSpace::verifyFreeLists() const {
2267   if (FLSVerifyLists) {
2268     _dictionary->verify();
2269     verifyIndexedFreeLists();
2270   } else {
2271     if (FLSVerifyDictionary) {
2272       _dictionary->verify();
2273     }
2274     if (FLSVerifyIndexTable) {
2275       verifyIndexedFreeLists();
2276     }
2277   }
2278 }
2279 #endif
2280 
2281 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2282   size_t i = 0;
2283   for (; i < MinChunkSize; i++) {
2284     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2285   }
2286   for (; i < IndexSetSize; i++) {
2287     verifyIndexedFreeList(i);
2288   }
2289 }
2290 
2291 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2292   FreeChunk* fc =  _indexedFreeList[size].head();
2293   guarantee((size % 2 == 0) || fc == NULL, "Odd slots should be empty");
2294   for (; fc != NULL; fc = fc->next()) {
2295     guarantee(fc->size() == size, "Size inconsistency");
2296     guarantee(fc->isFree(), "!free?");
2297     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2298   }
2299 }
2300 
2301 #ifndef PRODUCT
2302 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2303   assert(_dictionary->minSize() <= IndexSetSize,
2304     "Some sizes can't be allocated without recourse to"
2305     " linear allocation buffers");
2306   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2307     "else MIN_TREE_CHUNK_SIZE is wrong");
2308   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2309          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2310   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2311       "Some for-loops may be incorrectly initialized");
2312   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2313       "For-loops that iterate over IndexSet with stride 2 may be wrong");
2314 }
2315 #endif
2316 
2317 void CompactibleFreeListSpace::printFLCensus(size_t sweep_count) const {
2318   assert_lock_strong(&_freelistLock);
2319   FreeList total;
2320   gclog_or_tty->print("end sweep# " SIZE_FORMAT "\n", sweep_count);
2321   FreeList::print_labels_on(gclog_or_tty, "size");
2322   size_t totalFree = 0;
2323   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2324     const FreeList *fl = &_indexedFreeList[i];
2325     totalFree += fl->count() * fl->size();
2326     if (i % (40*IndexSetStride) == 0) {
2327       FreeList::print_labels_on(gclog_or_tty, "size");
2328     }
2329     fl->print_on(gclog_or_tty);
2330     total.set_bfrSurp(    total.bfrSurp()     + fl->bfrSurp()    );
2331     total.set_surplus(    total.surplus()     + fl->surplus()    );
2332     total.set_desired(    total.desired()     + fl->desired()    );
2333     total.set_prevSweep(  total.prevSweep()   + fl->prevSweep()  );
2334     total.set_beforeSweep(total.beforeSweep() + fl->beforeSweep());
2335     total.set_count(      total.count()       + fl->count()      );
2336     total.set_coalBirths( total.coalBirths()  + fl->coalBirths() );
2337     total.set_coalDeaths( total.coalDeaths()  + fl->coalDeaths() );
2338     total.set_splitBirths(total.splitBirths() + fl->splitBirths());
2339     total.set_splitDeaths(total.splitDeaths() + fl->splitDeaths());
2340   }
2341   total.print_on(gclog_or_tty, "TOTAL");
2342   gclog_or_tty->print_cr("Total free in indexed lists "
2343                          SIZE_FORMAT " words", totalFree);
2344   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2345     (double)(total.splitBirths()+total.coalBirths()-total.splitDeaths()-total.coalDeaths())/
2346             (total.prevSweep() != 0 ? (double)total.prevSweep() : 1.0),
2347     (double)(total.desired() - total.count())/(total.desired() != 0 ? (double)total.desired() : 1.0));
2348   _dictionary->printDictCensus();
2349 }
2350 
2351 // Return the next displaced header, incrementing the pointer and
2352 // recycling spool area as necessary.
2353 markOop PromotionInfo::nextDisplacedHeader() {
2354   assert(_spoolHead != NULL, "promotionInfo inconsistency");
2355   assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2356          "Empty spool space: no displaced header can be fetched");
2357   assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2358   markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2359   // Spool forward
2360   if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2361     // forward to next block, recycling this block into spare spool buffer
2362     SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2363     assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2364     _spoolHead->nextSpoolBlock = _spareSpool;
2365     _spareSpool = _spoolHead;
2366     _spoolHead = tmp;
2367     _firstIndex = 1;
2368     NOT_PRODUCT(
2369       if (_spoolHead == NULL) {  // all buffers fully consumed
2370         assert(_spoolTail == NULL && _nextIndex == 1,
2371                "spool buffers processing inconsistency");
2372       }
2373     )
2374   } 
2375   return hdr;
2376 }
2377 
2378 void PromotionInfo::track(PromotedObject* trackOop) {
2379   track(trackOop, oop(trackOop)->klass());
2380 }
2381 
2382 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
2383   // make a copy of header as it may need to be spooled
2384   markOop mark = oop(trackOop)->mark();
2385   trackOop->clearNext();
2386   if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
2387     // save non-prototypical header, and mark oop
2388     saveDisplacedHeader(mark);
2389     trackOop->setDisplacedMark();
2390   } else {
2391     // we'd like to assert something like the following:
2392     // assert(mark == markOopDesc::prototype(), "consistency check");
2393     // ... but the above won't work because the age bits have not (yet) been
2394     // cleared. The remainder of the check would be identical to the
2395     // condition checked in must_be_preserved() above, so we don't really
2396     // have anything useful to check here!
2397   }
2398   if (_promoTail != NULL) {
2399     assert(_promoHead != NULL, "List consistency");
2400     _promoTail->setNext(trackOop);
2401     _promoTail = trackOop;
2402   } else {
2403     assert(_promoHead == NULL, "List consistency");
2404     _promoHead = _promoTail = trackOop;
2405   }
2406   // Mask as newly promoted, so we can skip over such objects
2407   // when scanning dirty cards
2408   assert(!trackOop->hasPromotedMark(), "Should not have been marked");
2409   trackOop->setPromotedMark();
2410 }
2411 
2412 // Save the given displaced header, incrementing the pointer and
2413 // obtaining more spool area as necessary.
2414 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
2415   assert(_spoolHead != NULL && _spoolTail != NULL,
2416          "promotionInfo inconsistency");
2417   assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
2418   _spoolTail->displacedHdr[_nextIndex] = hdr;
2419   // Spool forward
2420   if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
2421     // get a new spooling block
2422     assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
2423     _splice_point = _spoolTail;                   // save for splicing
2424     _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
2425     _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
2426     // ... but will attempt filling before next promotion attempt
2427     _nextIndex = 1;
2428   }
2429 }
2430 
2431 // Ensure that spooling space exists. Return false if spooling space
2432 // could not be obtained.
2433 bool PromotionInfo::ensure_spooling_space_work() {
2434   assert(!has_spooling_space(), "Only call when there is no spooling space");
2435   // Try and obtain more spooling space
2436   SpoolBlock* newSpool = getSpoolBlock();
2437   assert(newSpool == NULL ||
2438          (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
2439         "getSpoolBlock() sanity check");
2440   if (newSpool == NULL) {
2441     return false;
2442   }
2443   _nextIndex = 1;
2444   if (_spoolTail == NULL) {
2445     _spoolTail = newSpool;
2446     if (_spoolHead == NULL) {
2447       _spoolHead = newSpool;
2448       _firstIndex = 1;
2449     } else {
2450       assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
2451              "Splice point invariant");
2452       // Extra check that _splice_point is connected to list
2453       #ifdef ASSERT
2454       {
2455         SpoolBlock* blk = _spoolHead;
2456         for (; blk->nextSpoolBlock != NULL;
2457              blk = blk->nextSpoolBlock);
2458         assert(blk != NULL && blk == _splice_point,
2459                "Splice point incorrect");
2460       }
2461       #endif // ASSERT
2462       _splice_point->nextSpoolBlock = newSpool;
2463     }
2464   } else {
2465     assert(_spoolHead != NULL, "spool list consistency");
2466     _spoolTail->nextSpoolBlock = newSpool;
2467     _spoolTail = newSpool;
2468   }
2469   return true;
2470 }
2471 
2472 // Get a free spool buffer from the free pool, getting a new block
2473 // from the heap if necessary.
2474 SpoolBlock* PromotionInfo::getSpoolBlock() {
2475   SpoolBlock* res;
2476   if ((res = _spareSpool) != NULL) {
2477     _spareSpool = _spareSpool->nextSpoolBlock;
2478     res->nextSpoolBlock = NULL;
2479   } else {  // spare spool exhausted, get some from heap
2480     res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
2481     if (res != NULL) {
2482       res->init();
2483     }
2484   }
2485   assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
2486   return res;
2487 }
2488 
2489 void PromotionInfo::startTrackingPromotions() {
2490   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2491          "spooling inconsistency?");
2492   _firstIndex = _nextIndex = 1;
2493   _tracking = true;
2494 }
2495 
2496 void PromotionInfo::stopTrackingPromotions() {
2497   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2498          "spooling inconsistency?");
2499   _firstIndex = _nextIndex = 1;
2500   _tracking = false;
2501 }
2502 
2503 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
2504 // points to the next slot available for filling.
2505 // The set of slots holding displaced headers are then all those in the
2506 // right-open interval denoted by: 
2507 // 
2508 //    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
2509 // 
2510 // When _spoolTail is NULL, then the set of slots with displaced headers
2511 // is all those starting at the slot <_spoolHead, _firstIndex> and
2512 // going up to the last slot of last block in the linked list.
2513 // In this lartter case, _splice_point points to the tail block of
2514 // this linked list of blocks holding displaced headers.
2515 void PromotionInfo::verify() const {
2516   // Verify the following:
2517   // 1. the number of displaced headers matches the number of promoted
2518   //    objects that have displaced headers
2519   // 2. each promoted object lies in this space
2520   debug_only(
2521     PromotedObject* junk = NULL;
2522     assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
2523            "Offset of PromotedObject::_next is expected to align with "
2524            "  the OopDesc::_mark within OopDesc");
2525   )
2526   // FIXME: guarantee????
2527   guarantee(_spoolHead == NULL || _spoolTail != NULL ||
2528             _splice_point != NULL, "list consistency");
2529   guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
2530   // count the number of objects with displaced headers
2531   size_t numObjsWithDisplacedHdrs = 0;
2532   for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
2533     guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
2534     // the last promoted object may fail the mark() != NULL test of is_oop().
2535     guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
2536     if (curObj->hasDisplacedMark()) {
2537       numObjsWithDisplacedHdrs++;
2538     }
2539   }
2540   // Count the number of displaced headers
2541   size_t numDisplacedHdrs = 0;
2542   for (SpoolBlock* curSpool = _spoolHead;
2543        curSpool != _spoolTail && curSpool != NULL;
2544        curSpool = curSpool->nextSpoolBlock) {
2545     // the first entry is just a self-pointer; indices 1 through
2546     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
2547     guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
2548               "first entry of displacedHdr should be self-referential");
2549     numDisplacedHdrs += curSpool->bufferSize - 1;
2550   }
2551   guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
2552             "internal consistency");
2553   guarantee(_spoolTail != NULL || _nextIndex == 1,
2554             "Inconsistency between _spoolTail and _nextIndex");
2555   // We overcounted (_firstIndex-1) worth of slots in block
2556   // _spoolHead and we undercounted (_nextIndex-1) worth of
2557   // slots in block _spoolTail. We make an appropriate
2558   // adjustment by subtracting the first and adding the
2559   // second:  - (_firstIndex - 1) + (_nextIndex - 1) 
2560   numDisplacedHdrs += (_nextIndex - _firstIndex);
2561   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
2562 }
2563 
2564 
2565 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2566   _cfls(cfls)
2567 {
2568   _blocks_to_claim = CMSParPromoteBlocksToClaim;
2569   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2570        i < CompactibleFreeListSpace::IndexSetSize;
2571        i += CompactibleFreeListSpace::IndexSetStride) {
2572     _indexedFreeList[i].set_size(i);
2573   }
2574 }
2575 
2576 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2577   FreeChunk* res;
2578   word_sz = _cfls->adjustObjectSize(word_sz);
2579   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2580     // This locking manages sync with other large object allocations.
2581     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2582                     Mutex::_no_safepoint_check_flag);
2583     res = _cfls->getChunkFromDictionaryExact(word_sz);
2584     if (res == NULL) return NULL;
2585   } else {
2586     FreeList* fl = &_indexedFreeList[word_sz];
2587     bool filled = false; //TRAP
2588     if (fl->count() == 0) {
2589       bool filled = true; //TRAP
2590       // Attempt to refill this local free list.
2591       _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2592       // If it didn't work, give up.
2593       if (fl->count() == 0) return NULL;
2594     }
2595     res = fl->getChunkAtHead();
2596     assert(res != NULL, "Why was count non-zero?");
2597   }
2598   res->markNotFree();
2599   assert(!res->isFree(), "shouldn't be marked free");
2600   assert(oop(res)->klass_or_null() == NULL, "should look uninitialized");
2601   // mangle a just allocated object with a distinct pattern.
2602   debug_only(res->mangleAllocated(word_sz));
2603   return (HeapWord*)res;
2604 }
2605 
2606 void CFLS_LAB::retire() {
2607   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2608        i < CompactibleFreeListSpace::IndexSetSize;
2609        i += CompactibleFreeListSpace::IndexSetStride) {
2610     if (_indexedFreeList[i].count() > 0) {
2611       MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2612                       Mutex::_no_safepoint_check_flag);
2613       _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2614       // Reset this list.
2615       _indexedFreeList[i] = FreeList();
2616       _indexedFreeList[i].set_size(i);
2617     }
2618   }
2619 }
2620 
2621 void
2622 CompactibleFreeListSpace::
2623 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2624   assert(fl->count() == 0, "Precondition.");
2625   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2626          "Precondition");
2627 
2628   // We'll try all multiples of word_sz in the indexed set (starting with
2629   // word_sz itself), then try getting a big chunk and splitting it.
2630   int k = 1;
2631   size_t cur_sz = k * word_sz;
2632   bool found = false;
2633   while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
2634     FreeList* gfl = &_indexedFreeList[cur_sz];
2635     FreeList fl_for_cur_sz;  // Empty.
2636     fl_for_cur_sz.set_size(cur_sz);
2637     {
2638       MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2639                       Mutex::_no_safepoint_check_flag);
2640       if (gfl->count() != 0) {
2641         size_t nn = MAX2(n/k, (size_t)1);
2642         gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2643         found = true;
2644       }
2645     }
2646     // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2647     if (found) {
2648       if (k == 1) {
2649         fl->prepend(&fl_for_cur_sz);
2650       } else {
2651         // Divide each block on fl_for_cur_sz up k ways.
2652         FreeChunk* fc;
2653         while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2654           // Must do this in reverse order, so that anybody attempting to
2655           // access the main chunk sees it as a single free block until we
2656           // change it.
2657           size_t fc_size = fc->size();
2658           for (int i = k-1; i >= 0; i--) {
2659             FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2660             ffc->setSize(word_sz);
2661             ffc->linkNext(NULL);
2662             ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2663             // Above must occur before BOT is updated below.
2664             // splitting from the right, fc_size == (k - i + 1) * wordsize
2665             _bt.mark_block((HeapWord*)ffc, word_sz);
2666             fc_size -= word_sz;
2667             _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2668             _bt.verify_single_block((HeapWord*)fc, fc_size);
2669             _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2670             // Push this on "fl".
2671             fl->returnChunkAtHead(ffc);
2672           }
2673           // TRAP
2674           assert(fl->tail()->next() == NULL, "List invariant.");
2675         }
2676       }
2677       return;
2678     }
2679     k++; cur_sz = k * word_sz;
2680   }
2681   // Otherwise, we'll split a block from the dictionary.
2682   FreeChunk* fc = NULL;
2683   FreeChunk* rem_fc = NULL;
2684   size_t rem;
2685   {
2686     MutexLockerEx x(parDictionaryAllocLock(),
2687                     Mutex::_no_safepoint_check_flag);
2688     while (n > 0) {
2689       fc = dictionary()->getChunk(MAX2(n * word_sz, 
2690                                   _dictionary->minSize()),
2691                                   FreeBlockDictionary::atLeast);
2692       if (fc != NULL) {
2693         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
2694         dictionary()->dictCensusUpdate(fc->size(),
2695                                        true /*split*/,
2696                                        false /*birth*/);
2697         break;
2698       } else {
2699         n--;
2700       }
2701     }
2702     if (fc == NULL) return;
2703     // Otherwise, split up that block.
2704     size_t nn = fc->size() / word_sz;
2705     n = MIN2(nn, n);
2706     rem = fc->size() - n * word_sz;
2707     // If there is a remainder, and it's too small, allocate one fewer.
2708     if (rem > 0 && rem < MinChunkSize) {
2709       n--; rem += word_sz;
2710     }
2711     // First return the remainder, if any.
2712     // Note that we hold the lock until we decide if we're going to give
2713     // back the remainder to the dictionary, since a contending allocator
2714     // may otherwise see the heap as empty.  (We're willing to take that
2715     // hit if the block is a small block.)
2716     if (rem > 0) {
2717       size_t prefix_size = n * word_sz;
2718       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2719       rem_fc->setSize(rem);
2720       rem_fc->linkNext(NULL);
2721       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2722       // Above must occur before BOT is updated below.
2723       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2724       if (rem >= IndexSetSize) {
2725         returnChunkToDictionary(rem_fc);
2726         dictionary()->dictCensusUpdate(fc->size(),
2727                                        true /*split*/,
2728                                        true /*birth*/);
2729         rem_fc = NULL;
2730       }
2731       // Otherwise, return it to the small list below.
2732     }
2733   }
2734   // 
2735   if (rem_fc != NULL) {
2736     MutexLockerEx x(_indexedFreeListParLocks[rem],
2737                     Mutex::_no_safepoint_check_flag);
2738     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2739     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2740     smallSplitBirth(rem);
2741   }
2742 
2743   // Now do the splitting up.
2744   // Must do this in reverse order, so that anybody attempting to
2745   // access the main chunk sees it as a single free block until we
2746   // change it.
2747   size_t fc_size = n * word_sz;
2748   // All but first chunk in this loop
2749   for (ssize_t i = n-1; i > 0; i--) {
2750     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2751     ffc->setSize(word_sz);
2752     ffc->linkNext(NULL);
2753     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2754     // Above must occur before BOT is updated below.
2755     // splitting from the right, fc_size == (n - i + 1) * wordsize
2756     _bt.mark_block((HeapWord*)ffc, word_sz);
2757     fc_size -= word_sz;
2758     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2759     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2760     _bt.verify_single_block((HeapWord*)fc, fc_size);
2761     // Push this on "fl".
2762     fl->returnChunkAtHead(ffc);
2763   }
2764   // First chunk
2765   fc->setSize(word_sz);
2766   fc->linkNext(NULL);
2767   fc->linkPrev(NULL);
2768   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2769   _bt.verify_single_block((HeapWord*)fc, fc->size());
2770   fl->returnChunkAtHead(fc);
2771 
2772   {
2773     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2774                     Mutex::_no_safepoint_check_flag);
2775     ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
2776     _indexedFreeList[word_sz].set_splitBirths(new_births);
2777     ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2778     _indexedFreeList[word_sz].set_surplus(new_surplus);
2779   }
2780 
2781   // TRAP
2782   assert(fl->tail()->next() == NULL, "List invariant.");
2783 }
2784 
2785 // Set up the space's par_seq_tasks structure for work claiming
2786 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2787 // XXX Need to suitably abstract and generalize this and the next
2788 // method into one.
2789 void
2790 CompactibleFreeListSpace::
2791 initialize_sequential_subtasks_for_rescan(int n_threads) {
2792   // The "size" of each task is fixed according to rescan_task_size.
2793   assert(n_threads > 0, "Unexpected n_threads argument");
2794   const size_t task_size = rescan_task_size();
2795   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2796   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2797   assert(n_tasks == 0 ||
2798          ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2799           (used_region().start() + n_tasks*task_size >= used_region().end())),
2800          "n_tasks calculation incorrect");
2801   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2802   assert(!pst->valid(), "Clobbering existing data?");
2803   pst->set_par_threads(n_threads);
2804   pst->set_n_tasks((int)n_tasks);
2805 }
2806 
2807 // Set up the space's par_seq_tasks structure for work claiming
2808 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2809 void
2810 CompactibleFreeListSpace::
2811 initialize_sequential_subtasks_for_marking(int n_threads,
2812                                            HeapWord* low) {
2813   // The "size" of each task is fixed according to rescan_task_size.
2814   assert(n_threads > 0, "Unexpected n_threads argument");
2815   const size_t task_size = marking_task_size();
2816   assert(task_size > CardTableModRefBS::card_size_in_words &&
2817          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2818          "Otherwise arithmetic below would be incorrect");
2819   MemRegion span = _gen->reserved();
2820   if (low != NULL) {
2821     if (span.contains(low)) {
2822       // Align low down to  a card boundary so that
2823       // we can use block_offset_careful() on span boundaries.
2824       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2825                                  CardTableModRefBS::card_size);
2826       // Clip span prefix at aligned_low
2827       span = span.intersection(MemRegion(aligned_low, span.end()));
2828     } else if (low > span.end()) {
2829       span = MemRegion(low, low);  // Null region
2830     } // else use entire span
2831   }
2832   assert(span.is_empty() || 
2833          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2834         "span should start at a card boundary");
2835   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2836   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2837   assert(n_tasks == 0 ||
2838          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2839           (span.start() + n_tasks*task_size >= span.end())),
2840          "n_tasks calculation incorrect");
2841   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2842   assert(!pst->valid(), "Clobbering existing data?");
2843   pst->set_par_threads(n_threads);
2844   pst->set_n_tasks((int)n_tasks);
2845 }
2846