1 #ifdef USE_PRAGMA_IDENT_SRC
   2 #pragma ident "@(#)compactibleFreeListSpace.cpp 1.144 08/09/06 09:20:55 JVM"
   3 #endif
   4 /*
   5  * Copyright 2001-2006 Sun Microsystems, Inc.  All Rights Reserved.
   6  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   7  *
   8  * This code is free software; you can redistribute it and/or modify it
   9  * under the terms of the GNU General Public License version 2 only, as
  10  * published by the Free Software Foundation.
  11  *
  12  * This code is distributed in the hope that it will be useful, but WITHOUT
  13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  15  * version 2 for more details (a copy is included in the LICENSE file that
  16  * accompanied this code).
  17  *
  18  * You should have received a copy of the GNU General Public License version
  19  * 2 along with this work; if not, write to the Free Software Foundation,
  20  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  21  *
  22  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  23  * CA 95054 USA or visit www.sun.com if you need additional information or
  24  * have any questions.
  25  *  
  26  */
  27 
  28 # include "incls/_precompiled.incl"
  29 # include "incls/_compactibleFreeListSpace.cpp.incl"
  30 
  31 /////////////////////////////////////////////////////////////////////////
  32 //// CompactibleFreeListSpace
  33 /////////////////////////////////////////////////////////////////////////
  34 
  35 // highest ranked  free list lock rank
  36 int CompactibleFreeListSpace::_lockRank = Mutex::leaf + 3;
  37 
  38 // Constructor
  39 CompactibleFreeListSpace::CompactibleFreeListSpace(BlockOffsetSharedArray* bs,
  40   MemRegion mr, bool use_adaptive_freelists,
  41   FreeBlockDictionary::DictionaryChoice dictionaryChoice) :
  42   _dictionaryChoice(dictionaryChoice),
  43   _adaptive_freelists(use_adaptive_freelists),
  44   _bt(bs, mr),
  45   // free list locks are in the range of values taken by _lockRank
  46   // This range currently is [_leaf+2, _leaf+3]
  47   // Note: this requires that CFLspace c'tors
  48   // are called serially in the order in which the locks are
  49   // are acquired in the program text. This is true today.
  50   _freelistLock(_lockRank--, "CompactibleFreeListSpace._lock", true),
  51   _parDictionaryAllocLock(Mutex::leaf - 1,  // == rank(ExpandHeap_lock) - 1
  52                           "CompactibleFreeListSpace._dict_par_lock", true),
  53   _rescan_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  54                     CMSRescanMultiple),
  55   _marking_task_size(CardTableModRefBS::card_size_in_words * BitsPerWord *
  56                     CMSConcMarkMultiple),
  57   _collector(NULL)
  58 {
  59   _bt.set_space(this);
  60   initialize(mr, true);
  61   // We have all of "mr", all of which we place in the dictionary
  62   // as one big chunk. We'll need to decide here which of several
  63   // possible alternative dictionary implementations to use. For
  64   // now the choice is easy, since we have only one working
  65   // implementation, namely, the simple binary tree (splaying
  66   // temporarily disabled).
  67   switch (dictionaryChoice) {
  68     case FreeBlockDictionary::dictionaryBinaryTree:
  69       _dictionary = new BinaryTreeDictionary(mr);
  70       break;
  71     case FreeBlockDictionary::dictionarySplayTree:
  72     case FreeBlockDictionary::dictionarySkipList:
  73     default:
  74       warning("dictionaryChoice: selected option not understood; using"
  75               " default BinaryTreeDictionary implementation instead.");
  76       _dictionary = new BinaryTreeDictionary(mr);
  77       break;
  78   }
  79   splitBirth(mr.word_size());
  80   assert(_dictionary != NULL, "CMS dictionary initialization");
  81   // The indexed free lists are initially all empty and are lazily
  82   // filled in on demand. Initialize the array elements to NULL.
  83   initializeIndexedFreeListArray();
  84 
  85   // Not using adaptive free lists assumes that allocation is first
  86   // from the linAB's.  Also a cms perm gen which can be compacted
  87   // has to have the klass's klassKlass allocated at a lower
  88   // address in the heap than the klass so that the klassKlass is
  89   // moved to its new location before the klass is moved.
  90   // Set the _refillSize for the linear allocation blocks
  91   if (!use_adaptive_freelists) {
  92     FreeChunk* fc = _dictionary->getChunk(mr.word_size());
  93     // The small linAB initially has all the space and will allocate
  94     // a chunk of any size.
  95     HeapWord* addr = (HeapWord*) fc;
  96     _smallLinearAllocBlock.set(addr, fc->size() , 
  97       1024*SmallForLinearAlloc, fc->size());
  98     // Note that _unallocated_block is not updated here.
  99     // Allocations from the linear allocation block should
 100     // update it.
 101   } else {
 102     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, 
 103                                SmallForLinearAlloc);
 104   }
 105   // CMSIndexedFreeListReplenish should be at least 1
 106   CMSIndexedFreeListReplenish = MAX2((uintx)1, CMSIndexedFreeListReplenish);
 107   _promoInfo.setSpace(this);
 108   if (UseCMSBestFit) {
 109     _fitStrategy = FreeBlockBestFitFirst;
 110   } else {
 111     _fitStrategy = FreeBlockStrategyNone;
 112   }
 113   checkFreeListConsistency();
 114 
 115   // Initialize locks for parallel case.
 116   if (ParallelGCThreads > 0) {
 117     for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 118       _indexedFreeListParLocks[i] = new Mutex(Mutex::leaf - 1, // == ExpandHeap_lock - 1
 119                                               "a freelist par lock",
 120                                               true);
 121       if (_indexedFreeListParLocks[i] == NULL) 
 122         vm_exit_during_initialization("Could not allocate a par lock");
 123       DEBUG_ONLY(
 124         _indexedFreeList[i].set_protecting_lock(_indexedFreeListParLocks[i]);
 125       )
 126     }
 127     _dictionary->set_par_lock(&_parDictionaryAllocLock);
 128   }
 129 }
 130 
 131 // Like CompactibleSpace forward() but always calls cross_threshold() to
 132 // update the block offset table.  Removed initialize_threshold call because
 133 // CFLS does not use a block offset array for contiguous spaces.
 134 HeapWord* CompactibleFreeListSpace::forward(oop q, size_t size, 
 135                                     CompactPoint* cp, HeapWord* compact_top) {
 136   // q is alive
 137   // First check if we should switch compaction space
 138   assert(this == cp->space, "'this' should be current compaction space.");
 139   size_t compaction_max_size = pointer_delta(end(), compact_top);
 140   assert(adjustObjectSize(size) == cp->space->adjust_object_size_v(size),
 141     "virtual adjustObjectSize_v() method is not correct");
 142   size_t adjusted_size = adjustObjectSize(size);
 143   assert(compaction_max_size >= MinChunkSize || compaction_max_size == 0,
 144          "no small fragments allowed");
 145   assert(minimum_free_block_size() == MinChunkSize,
 146          "for de-virtualized reference below");
 147   // Can't leave a nonzero size, residual fragment smaller than MinChunkSize
 148   if (adjusted_size + MinChunkSize > compaction_max_size &&
 149       adjusted_size != compaction_max_size) {
 150     do {
 151       // switch to next compaction space
 152       cp->space->set_compaction_top(compact_top);
 153       cp->space = cp->space->next_compaction_space();
 154       if (cp->space == NULL) {
 155         cp->gen = GenCollectedHeap::heap()->prev_gen(cp->gen);
 156         assert(cp->gen != NULL, "compaction must succeed");
 157         cp->space = cp->gen->first_compaction_space();
 158         assert(cp->space != NULL, "generation must have a first compaction space");
 159       }
 160       compact_top = cp->space->bottom();
 161       cp->space->set_compaction_top(compact_top);
 162       // The correct adjusted_size may not be the same as that for this method
 163       // (i.e., cp->space may no longer be "this" so adjust the size again.
 164       // Use the virtual method which is not used above to save the virtual
 165       // dispatch.
 166       adjusted_size = cp->space->adjust_object_size_v(size);
 167       compaction_max_size = pointer_delta(cp->space->end(), compact_top);
 168       assert(cp->space->minimum_free_block_size() == 0, "just checking");
 169     } while (adjusted_size > compaction_max_size);
 170   }
 171 
 172   // store the forwarding pointer into the mark word
 173   if ((HeapWord*)q != compact_top) {
 174     q->forward_to(oop(compact_top));
 175     assert(q->is_gc_marked(), "encoding the pointer should preserve the mark");
 176   } else {
 177     // if the object isn't moving we can just set the mark to the default
 178     // mark and handle it specially later on.  
 179     q->init_mark();
 180     assert(q->forwardee() == NULL, "should be forwarded to NULL");
 181   }
 182 
 183   debug_only(MarkSweep::register_live_oop(q, adjusted_size));
 184   compact_top += adjusted_size;
 185 
 186   // we need to update the offset table so that the beginnings of objects can be
 187   // found during scavenge.  Note that we are updating the offset table based on
 188   // where the object will be once the compaction phase finishes.
 189 
 190   // Always call cross_threshold().  A contiguous space can only call it when
 191   // the compaction_top exceeds the current threshold but not for an
 192   // non-contiguous space.
 193   cp->threshold =
 194     cp->space->cross_threshold(compact_top - adjusted_size, compact_top);
 195   return compact_top;
 196 }
 197 
 198 // A modified copy of OffsetTableContigSpace::cross_threshold() with _offsets -> _bt
 199 // and use of single_block instead of alloc_block.  The name here is not really
 200 // appropriate - maybe a more general name could be invented for both the
 201 // contiguous and noncontiguous spaces.
 202 
 203 HeapWord* CompactibleFreeListSpace::cross_threshold(HeapWord* start, HeapWord* the_end) {
 204   _bt.single_block(start, the_end);
 205   return end();
 206 }
 207 
 208 // Initialize them to NULL.
 209 void CompactibleFreeListSpace::initializeIndexedFreeListArray() {
 210   for (size_t i = 0; i < IndexSetSize; i++) {
 211     // Note that on platforms where objects are double word aligned,
 212     // the odd array elements are not used.  It is convenient, however,
 213     // to map directly from the object size to the array element.
 214     _indexedFreeList[i].reset(IndexSetSize);
 215     _indexedFreeList[i].set_size(i);
 216     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 217     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 218     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 219     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 220   }
 221 }
 222 
 223 void CompactibleFreeListSpace::resetIndexedFreeListArray() {
 224   for (int i = 1; i < IndexSetSize; i++) {
 225     assert(_indexedFreeList[i].size() == (size_t) i, 
 226       "Indexed free list sizes are incorrect");
 227     _indexedFreeList[i].reset(IndexSetSize);
 228     assert(_indexedFreeList[i].count() == 0, "reset check failed");
 229     assert(_indexedFreeList[i].head() == NULL, "reset check failed");
 230     assert(_indexedFreeList[i].tail() == NULL, "reset check failed");
 231     assert(_indexedFreeList[i].hint() == IndexSetSize, "reset check failed");
 232   }
 233 }
 234 
 235 void CompactibleFreeListSpace::reset(MemRegion mr) {
 236   resetIndexedFreeListArray();
 237   dictionary()->reset();
 238   if (BlockOffsetArrayUseUnallocatedBlock) {
 239     assert(end() == mr.end(), "We are compacting to the bottom of CMS gen");
 240     // Everything's allocated until proven otherwise.
 241     _bt.set_unallocated_block(end());
 242   }
 243   if (!mr.is_empty()) {
 244     assert(mr.word_size() >= MinChunkSize, "Chunk size is too small");
 245     _bt.single_block(mr.start(), mr.word_size());
 246     FreeChunk* fc = (FreeChunk*) mr.start();
 247     fc->setSize(mr.word_size());
 248     if (mr.word_size() >= IndexSetSize ) {
 249       returnChunkToDictionary(fc);
 250     } else {
 251       _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
 252       _indexedFreeList[mr.word_size()].returnChunkAtHead(fc);
 253     }
 254   }
 255   _promoInfo.reset();
 256   _smallLinearAllocBlock._ptr = NULL;
 257   _smallLinearAllocBlock._word_size = 0;
 258 }
 259 
 260 void CompactibleFreeListSpace::reset_after_compaction() {
 261   // Reset the space to the new reality - one free chunk.
 262   MemRegion mr(compaction_top(), end());
 263   reset(mr);
 264   // Now refill the linear allocation block(s) if possible.
 265   if (_adaptive_freelists) {
 266     refillLinearAllocBlocksIfNeeded();
 267   } else {
 268     // Place as much of mr in the linAB as we can get,
 269     // provided it was big enough to go into the dictionary.
 270     FreeChunk* fc = dictionary()->findLargestDict();
 271     if (fc != NULL) {
 272       assert(fc->size() == mr.word_size(),
 273              "Why was the chunk broken up?");
 274       removeChunkFromDictionary(fc);
 275       HeapWord* addr = (HeapWord*) fc;
 276       _smallLinearAllocBlock.set(addr, fc->size() ,
 277         1024*SmallForLinearAlloc, fc->size());
 278       // Note that _unallocated_block is not updated here.
 279     }
 280   }
 281 }
 282 
 283 // Walks the entire dictionary, returning a coterminal
 284 // chunk, if it exists. Use with caution since it involves
 285 // a potentially complete walk of a potentially large tree.
 286 FreeChunk* CompactibleFreeListSpace::find_chunk_at_end() {
 287 
 288   assert_lock_strong(&_freelistLock);
 289 
 290   return dictionary()->find_chunk_ends_at(end());
 291 }
 292 
 293 
 294 #ifndef PRODUCT
 295 void CompactibleFreeListSpace::initializeIndexedFreeListArrayReturnedBytes() {
 296   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 297     _indexedFreeList[i].allocation_stats()->set_returnedBytes(0);
 298   }
 299 }
 300 
 301 size_t CompactibleFreeListSpace::sumIndexedFreeListArrayReturnedBytes() {
 302   size_t sum = 0;
 303   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 304     sum += _indexedFreeList[i].allocation_stats()->returnedBytes();
 305   }
 306   return sum;
 307 }
 308 
 309 size_t CompactibleFreeListSpace::totalCountInIndexedFreeLists() const {
 310   size_t count = 0;
 311   for (int i = MinChunkSize; i < IndexSetSize; i++) {
 312     debug_only(
 313       ssize_t total_list_count = 0;
 314       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 315          fc = fc->next()) {
 316         total_list_count++;
 317       }
 318       assert(total_list_count ==  _indexedFreeList[i].count(),
 319         "Count in list is incorrect");
 320     )
 321     count += _indexedFreeList[i].count();
 322   }
 323   return count;
 324 }
 325 
 326 size_t CompactibleFreeListSpace::totalCount() {
 327   size_t num = totalCountInIndexedFreeLists();
 328   num +=  dictionary()->totalCount();
 329   if (_smallLinearAllocBlock._word_size != 0) {
 330     num++;
 331   }
 332   return num;
 333 }
 334 #endif
 335 
 336 bool CompactibleFreeListSpace::is_free_block(const HeapWord* p) const {
 337   FreeChunk* fc = (FreeChunk*) p;
 338   return fc->isFree();
 339 }
 340 
 341 size_t CompactibleFreeListSpace::used() const {
 342   return capacity() - free();
 343 }
 344 
 345 size_t CompactibleFreeListSpace::free() const {
 346   // "MT-safe, but not MT-precise"(TM), if you will: i.e.
 347   // if you do this while the structures are in flux you
 348   // may get an approximate answer only; for instance
 349   // because there is concurrent allocation either
 350   // directly by mutators or for promotion during a GC.
 351   // It's "MT-safe", however, in the sense that you are guaranteed
 352   // not to crash and burn, for instance, because of walking
 353   // pointers that could disappear as you were walking them.
 354   // The approximation is because the various components
 355   // that are read below are not read atomically (and
 356   // further the computation of totalSizeInIndexedFreeLists()
 357   // is itself a non-atomic computation. The normal use of
 358   // this is during a resize operation at the end of GC
 359   // and at that time you are guaranteed to get the
 360   // correct actual value. However, for instance, this is
 361   // also read completely asynchronously by the "perf-sampler"
 362   // that supports jvmstat, and you are apt to see the values
 363   // flicker in such cases.
 364   assert(_dictionary != NULL, "No _dictionary?");
 365   return (_dictionary->totalChunkSize(DEBUG_ONLY(freelistLock())) +
 366           totalSizeInIndexedFreeLists() +
 367           _smallLinearAllocBlock._word_size) * HeapWordSize;
 368 }
 369 
 370 size_t CompactibleFreeListSpace::max_alloc_in_words() const {
 371   assert(_dictionary != NULL, "No _dictionary?");
 372   assert_locked();
 373   size_t res = _dictionary->maxChunkSize();
 374   res = MAX2(res, MIN2(_smallLinearAllocBlock._word_size,
 375                        (size_t) SmallForLinearAlloc - 1));
 376   // XXX the following could potentially be pretty slow;
 377   // should one, pesimally for the rare cases when res
 378   // caclulated above is less than IndexSetSize,
 379   // just return res calculated above? My reasoning was that
 380   // those cases will be so rare that the extra time spent doesn't
 381   // really matter....
 382   // Note: do not change the loop test i >= res + IndexSetStride
 383   // to i > res below, because i is unsigned and res may be zero.
 384   for (size_t i = IndexSetSize - 1; i >= res + IndexSetStride;
 385        i -= IndexSetStride) {
 386     if (_indexedFreeList[i].head() != NULL) {
 387       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 388       return i;
 389     }
 390   }
 391   return res;
 392 }
 393 
 394 void CompactibleFreeListSpace::reportFreeListStatistics() const {
 395   assert_lock_strong(&_freelistLock);
 396   assert(PrintFLSStatistics != 0, "Reporting error");
 397   _dictionary->reportStatistics();
 398   if (PrintFLSStatistics > 1) {
 399     reportIndexedFreeListStatistics();
 400     size_t totalSize = totalSizeInIndexedFreeLists() +
 401                        _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
 402     gclog_or_tty->print(" free=%ld frag=%1.4f\n", totalSize, flsFrag());
 403   }
 404 }
 405 
 406 void CompactibleFreeListSpace::reportIndexedFreeListStatistics() const {
 407   assert_lock_strong(&_freelistLock);
 408   gclog_or_tty->print("Statistics for IndexedFreeLists:\n"
 409                       "--------------------------------\n");
 410   size_t totalSize = totalSizeInIndexedFreeLists();
 411   size_t   freeBlocks = numFreeBlocksInIndexedFreeLists();
 412   gclog_or_tty->print("Total Free Space: %d\n", totalSize);
 413   gclog_or_tty->print("Max   Chunk Size: %d\n", maxChunkSizeInIndexedFreeLists());
 414   gclog_or_tty->print("Number of Blocks: %d\n", freeBlocks);
 415   if (freeBlocks != 0) {
 416     gclog_or_tty->print("Av.  Block  Size: %d\n", totalSize/freeBlocks);
 417   }
 418 }
 419 
 420 size_t CompactibleFreeListSpace::numFreeBlocksInIndexedFreeLists() const {
 421   size_t res = 0;
 422   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 423     debug_only(
 424       ssize_t recount = 0;
 425       for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 426          fc = fc->next()) {
 427         recount += 1;
 428       }
 429       assert(recount == _indexedFreeList[i].count(), 
 430         "Incorrect count in list");
 431     )
 432     res += _indexedFreeList[i].count();
 433   }
 434   return res;
 435 }
 436 
 437 size_t CompactibleFreeListSpace::maxChunkSizeInIndexedFreeLists() const {
 438   for (size_t i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
 439     if (_indexedFreeList[i].head() != NULL) {
 440       assert(_indexedFreeList[i].count() != 0, "Inconsistent FreeList");
 441       return (size_t)i;
 442     }
 443   }
 444   return 0;
 445 }
 446 
 447 void CompactibleFreeListSpace::set_end(HeapWord* value) {
 448   HeapWord* prevEnd = end();
 449   assert(prevEnd != value, "unnecessary set_end call");
 450   assert(prevEnd == NULL || value >= unallocated_block(), "New end is below unallocated block");
 451   _end = value;
 452   if (prevEnd != NULL) {
 453     // Resize the underlying block offset table.
 454     _bt.resize(pointer_delta(value, bottom()));
 455   if (value <= prevEnd) {
 456     assert(value >= unallocated_block(), "New end is below unallocated block");
 457   } else {
 458     // Now, take this new chunk and add it to the free blocks.
 459     // Note that the BOT has not yet been updated for this block.
 460     size_t newFcSize = pointer_delta(value, prevEnd);
 461     // XXX This is REALLY UGLY and should be fixed up. XXX
 462     if (!_adaptive_freelists && _smallLinearAllocBlock._ptr == NULL) {
 463       // Mark the boundary of the new block in BOT
 464       _bt.mark_block(prevEnd, value);
 465       // put it all in the linAB
 466       if (ParallelGCThreads == 0) {
 467         _smallLinearAllocBlock._ptr = prevEnd;
 468         _smallLinearAllocBlock._word_size = newFcSize;
 469         repairLinearAllocBlock(&_smallLinearAllocBlock);
 470       } else { // ParallelGCThreads > 0
 471         MutexLockerEx x(parDictionaryAllocLock(),
 472                         Mutex::_no_safepoint_check_flag);
 473         _smallLinearAllocBlock._ptr = prevEnd;
 474         _smallLinearAllocBlock._word_size = newFcSize;
 475         repairLinearAllocBlock(&_smallLinearAllocBlock);
 476       }
 477       // Births of chunks put into a LinAB are not recorded.  Births
 478       // of chunks as they are allocated out of a LinAB are.
 479     } else {
 480       // Add the block to the free lists, if possible coalescing it
 481       // with the last free block, and update the BOT and census data.
 482       addChunkToFreeListsAtEndRecordingStats(prevEnd, newFcSize);
 483     }
 484   }
 485   }
 486 }
 487 
 488 class FreeListSpace_DCTOC : public Filtering_DCTOC {
 489   CompactibleFreeListSpace* _cfls;
 490   CMSCollector* _collector;
 491 protected:
 492   // Override.
 493 #define walk_mem_region_with_cl_DECL(ClosureType)                       \
 494   virtual void walk_mem_region_with_cl(MemRegion mr,                    \
 495                                        HeapWord* bottom, HeapWord* top, \
 496                                        ClosureType* cl);                \
 497       void walk_mem_region_with_cl_par(MemRegion mr,                    \
 498                                        HeapWord* bottom, HeapWord* top, \
 499                                        ClosureType* cl);                \
 500     void walk_mem_region_with_cl_nopar(MemRegion mr,                    \
 501                                        HeapWord* bottom, HeapWord* top, \
 502                                        ClosureType* cl)
 503   walk_mem_region_with_cl_DECL(OopClosure);
 504   walk_mem_region_with_cl_DECL(FilteringClosure);
 505 
 506 public:
 507   FreeListSpace_DCTOC(CompactibleFreeListSpace* sp,
 508                       CMSCollector* collector,
 509                       OopClosure* cl,
 510                       CardTableModRefBS::PrecisionStyle precision,
 511                       HeapWord* boundary) :
 512     Filtering_DCTOC(sp, cl, precision, boundary),
 513     _cfls(sp), _collector(collector) {}
 514 };
 515 
 516 // We de-virtualize the block-related calls below, since we know that our
 517 // space is a CompactibleFreeListSpace.
 518 #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType)          \
 519 void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr,                 \
 520                                                  HeapWord* bottom,              \
 521                                                  HeapWord* top,                 \
 522                                                  ClosureType* cl) {             \
 523    if (SharedHeap::heap()->n_par_threads() > 0) {                               \
 524      walk_mem_region_with_cl_par(mr, bottom, top, cl);                          \
 525    } else {                                                                     \
 526      walk_mem_region_with_cl_nopar(mr, bottom, top, cl);                        \
 527    }                                                                            \
 528 }                                                                               \
 529 void FreeListSpace_DCTOC::walk_mem_region_with_cl_par(MemRegion mr,             \
 530                                                       HeapWord* bottom,         \
 531                                                       HeapWord* top,            \
 532                                                       ClosureType* cl) {        \
 533   /* Skip parts that are before "mr", in case "block_start" sent us             \
 534      back too far. */                                                           \
 535   HeapWord* mr_start = mr.start();                                              \
 536   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);        \
 537   HeapWord* next = bottom + bot_size;                                           \
 538   while (next < mr_start) {                                                     \
 539     bottom = next;                                                              \
 540     bot_size = _cfls->CompactibleFreeListSpace::block_size(bottom);             \
 541     next = bottom + bot_size;                                                   \
 542   }                                                                             \
 543                                                                                 \
 544   while (bottom < top) {                                                        \
 545     if (_cfls->CompactibleFreeListSpace::block_is_obj(bottom) &&                \
 546         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 547                     oop(bottom)) &&                                             \
 548         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 549       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 550       bottom += _cfls->adjustObjectSize(word_sz);                               \
 551     } else {                                                                    \
 552       bottom += _cfls->CompactibleFreeListSpace::block_size(bottom);            \
 553     }                                                                           \
 554   }                                                                             \
 555 }                                                                               \
 556 void FreeListSpace_DCTOC::walk_mem_region_with_cl_nopar(MemRegion mr,           \
 557                                                         HeapWord* bottom,       \
 558                                                         HeapWord* top,          \
 559                                                         ClosureType* cl) {      \
 560   /* Skip parts that are before "mr", in case "block_start" sent us             \
 561      back too far. */                                                           \
 562   HeapWord* mr_start = mr.start();                                              \
 563   size_t bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);  \
 564   HeapWord* next = bottom + bot_size;                                           \
 565   while (next < mr_start) {                                                     \
 566     bottom = next;                                                              \
 567     bot_size = _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);       \
 568     next = bottom + bot_size;                                                   \
 569   }                                                                             \
 570                                                                                 \
 571   while (bottom < top) {                                                        \
 572     if (_cfls->CompactibleFreeListSpace::block_is_obj_nopar(bottom) &&          \
 573         !_cfls->CompactibleFreeListSpace::obj_allocated_since_save_marks(       \
 574                     oop(bottom)) &&                                             \
 575         !_collector->CMSCollector::is_dead_obj(oop(bottom))) {                  \
 576       size_t word_sz = oop(bottom)->oop_iterate(cl, mr);                        \
 577       bottom += _cfls->adjustObjectSize(word_sz);                               \
 578     } else {                                                                    \
 579       bottom += _cfls->CompactibleFreeListSpace::block_size_nopar(bottom);      \
 580     }                                                                           \
 581   }                                                                             \
 582 }
 583 
 584 // (There are only two of these, rather than N, because the split is due
 585 // only to the introduction of the FilteringClosure, a local part of the
 586 // impl of this abstraction.)
 587 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(OopClosure)
 588 FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure)
 589 
 590 DirtyCardToOopClosure*
 591 CompactibleFreeListSpace::new_dcto_cl(OopClosure* cl,
 592                                       CardTableModRefBS::PrecisionStyle precision,
 593                                       HeapWord* boundary) {
 594   return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary);
 595 }
 596 
 597 
 598 // Note on locking for the space iteration functions:
 599 // since the collector's iteration activities are concurrent with
 600 // allocation activities by mutators, absent a suitable mutual exclusion
 601 // mechanism the iterators may go awry. For instace a block being iterated
 602 // may suddenly be allocated or divided up and part of it allocated and
 603 // so on.
 604 
 605 // Apply the given closure to each block in the space.
 606 void CompactibleFreeListSpace::blk_iterate_careful(BlkClosureCareful* cl) {
 607   assert_lock_strong(freelistLock());
 608   HeapWord *cur, *limit;
 609   for (cur = bottom(), limit = end(); cur < limit;
 610        cur += cl->do_blk_careful(cur));
 611 }
 612 
 613 // Apply the given closure to each block in the space.
 614 void CompactibleFreeListSpace::blk_iterate(BlkClosure* cl) {
 615   assert_lock_strong(freelistLock());
 616   HeapWord *cur, *limit;
 617   for (cur = bottom(), limit = end(); cur < limit;
 618        cur += cl->do_blk(cur));
 619 }
 620 
 621 // Apply the given closure to each oop in the space.
 622 void CompactibleFreeListSpace::oop_iterate(OopClosure* cl) {
 623   assert_lock_strong(freelistLock());
 624   HeapWord *cur, *limit;
 625   size_t curSize;
 626   for (cur = bottom(), limit = end(); cur < limit;
 627        cur += curSize) {
 628     curSize = block_size(cur);
 629     if (block_is_obj(cur)) {
 630       oop(cur)->oop_iterate(cl);
 631     }
 632   }
 633 }
 634 
 635 // Apply the given closure to each oop in the space \intersect memory region.
 636 void CompactibleFreeListSpace::oop_iterate(MemRegion mr, OopClosure* cl) {
 637   assert_lock_strong(freelistLock());
 638   if (is_empty()) {
 639     return;
 640   }
 641   MemRegion cur = MemRegion(bottom(), end());
 642   mr = mr.intersection(cur);
 643   if (mr.is_empty()) {
 644     return;
 645   }
 646   if (mr.equals(cur)) {
 647     oop_iterate(cl);
 648     return;
 649   }
 650   assert(mr.end() <= end(), "just took an intersection above");
 651   HeapWord* obj_addr = block_start(mr.start());
 652   HeapWord* t = mr.end();
 653 
 654   SpaceMemRegionOopsIterClosure smr_blk(cl, mr);
 655   if (block_is_obj(obj_addr)) {
 656     // Handle first object specially.
 657     oop obj = oop(obj_addr);
 658     obj_addr += adjustObjectSize(obj->oop_iterate(&smr_blk));
 659   } else {
 660     FreeChunk* fc = (FreeChunk*)obj_addr;
 661     obj_addr += fc->size();
 662   }
 663   while (obj_addr < t) {
 664     HeapWord* obj = obj_addr;
 665     obj_addr += block_size(obj_addr);
 666     // If "obj_addr" is not greater than top, then the
 667     // entire object "obj" is within the region.
 668     if (obj_addr <= t) {    
 669       if (block_is_obj(obj)) {
 670         oop(obj)->oop_iterate(cl);
 671       }               
 672     } else {
 673       // "obj" extends beyond end of region
 674       if (block_is_obj(obj)) {
 675         oop(obj)->oop_iterate(&smr_blk);
 676       }    
 677       break;
 678     }
 679   }
 680 }
 681 
 682 // NOTE: In the following methods, in order to safely be able to
 683 // apply the closure to an object, we need to be sure that the
 684 // object has been initialized. We are guaranteed that an object
 685 // is initialized if we are holding the Heap_lock with the
 686 // world stopped.
 687 void CompactibleFreeListSpace::verify_objects_initialized() const {
 688   if (is_init_completed()) {
 689     assert_locked_or_safepoint(Heap_lock);
 690     if (Universe::is_fully_initialized()) {
 691       guarantee(SafepointSynchronize::is_at_safepoint(),
 692                 "Required for objects to be initialized");
 693     }
 694   } // else make a concession at vm start-up
 695 }
 696 
 697 // Apply the given closure to each object in the space
 698 void CompactibleFreeListSpace::object_iterate(ObjectClosure* blk) {
 699   assert_lock_strong(freelistLock());
 700   NOT_PRODUCT(verify_objects_initialized());
 701   HeapWord *cur, *limit;
 702   size_t curSize;
 703   for (cur = bottom(), limit = end(); cur < limit;
 704        cur += curSize) {
 705     curSize = block_size(cur);
 706     if (block_is_obj(cur)) {
 707       blk->do_object(oop(cur));
 708     }
 709   }
 710 }
 711 
 712 void CompactibleFreeListSpace::object_iterate_mem(MemRegion mr,
 713                                                   UpwardsObjectClosure* cl) {
 714   assert_locked();
 715   NOT_PRODUCT(verify_objects_initialized());
 716   Space::object_iterate_mem(mr, cl);
 717 }
 718 
 719 // Callers of this iterator beware: The closure application should
 720 // be robust in the face of uninitialized objects and should (always)
 721 // return a correct size so that the next addr + size below gives us a
 722 // valid block boundary. [See for instance,
 723 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 724 // in ConcurrentMarkSweepGeneration.cpp.]
 725 HeapWord*
 726 CompactibleFreeListSpace::object_iterate_careful(ObjectClosureCareful* cl) {
 727   assert_lock_strong(freelistLock());
 728   HeapWord *addr, *last;
 729   size_t size;
 730   for (addr = bottom(), last  = end();
 731        addr < last; addr += size) {
 732     FreeChunk* fc = (FreeChunk*)addr;
 733     if (fc->isFree()) {
 734       // Since we hold the free list lock, which protects direct
 735       // allocation in this generation by mutators, a free object
 736       // will remain free throughout this iteration code.
 737       size = fc->size();
 738     } else {
 739       // Note that the object need not necessarily be initialized,
 740       // because (for instance) the free list lock does NOT protect
 741       // object initialization. The closure application below must
 742       // therefore be correct in the face of uninitialized objects.
 743       size = cl->do_object_careful(oop(addr));
 744       if (size == 0) {
 745         // An unparsable object found. Signal early termination.
 746         return addr;
 747       }
 748     }
 749   }
 750   return NULL;
 751 }
 752 
 753 // Callers of this iterator beware: The closure application should
 754 // be robust in the face of uninitialized objects and should (always)
 755 // return a correct size so that the next addr + size below gives us a
 756 // valid block boundary. [See for instance,
 757 // ScanMarkedObjectsAgainCarefullyClosure::do_object_careful()
 758 // in ConcurrentMarkSweepGeneration.cpp.]
 759 HeapWord*
 760 CompactibleFreeListSpace::object_iterate_careful_m(MemRegion mr,
 761   ObjectClosureCareful* cl) {
 762   assert_lock_strong(freelistLock());
 763   // Can't use used_region() below because it may not necessarily
 764   // be the same as [bottom(),end()); although we could
 765   // use [used_region().start(),round_to(used_region().end(),CardSize)),
 766   // that appears too cumbersome, so we just do the simpler check
 767   // in the assertion below.
 768   assert(!mr.is_empty() && MemRegion(bottom(),end()).contains(mr),
 769          "mr should be non-empty and within used space");
 770   HeapWord *addr, *end;
 771   size_t size;
 772   for (addr = block_start_careful(mr.start()), end  = mr.end();
 773        addr < end; addr += size) {
 774     FreeChunk* fc = (FreeChunk*)addr;
 775     if (fc->isFree()) {
 776       // Since we hold the free list lock, which protects direct
 777       // allocation in this generation by mutators, a free object
 778       // will remain free throughout this iteration code.
 779       size = fc->size();
 780     } else {
 781       // Note that the object need not necessarily be initialized,
 782       // because (for instance) the free list lock does NOT protect
 783       // object initialization. The closure application below must
 784       // therefore be correct in the face of uninitialized objects.
 785       size = cl->do_object_careful_m(oop(addr), mr);
 786       if (size == 0) {
 787         // An unparsable object found. Signal early termination.
 788         return addr;
 789       }
 790     }
 791   }
 792   return NULL;
 793 }
 794 
 795 
 796 HeapWord* CompactibleFreeListSpace::block_start(const void* p) const {
 797   NOT_PRODUCT(verify_objects_initialized());
 798   return _bt.block_start(p);
 799 }
 800 
 801 HeapWord* CompactibleFreeListSpace::block_start_careful(const void* p) const {
 802   return _bt.block_start_careful(p);
 803 }
 804 
 805 size_t CompactibleFreeListSpace::block_size(const HeapWord* p) const {
 806   NOT_PRODUCT(verify_objects_initialized());
 807   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 808   // This must be volatile, or else there is a danger that the compiler
 809   // will compile the code below into a sometimes-infinite loop, by keeping 
 810   // the value read the first time in a register.
 811   oop o = (oop)p;
 812   volatile oop* second_word_addr = o->klass_addr();
 813   while (true) {
 814     klassOop k = (klassOop)(*second_word_addr);
 815     // We must do this until we get a consistent view of the object.
 816     if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
 817       FreeChunk* fc = (FreeChunk*)p;
 818       volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
 819       size_t res = (*sz_addr);
 820       klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
 821       if (k == k2) {
 822         assert(res != 0, "Block size should not be 0");
 823         return res;
 824       }
 825     } else if (k != NULL) {
 826       assert(k->is_oop(true /* ignore mark word */), "Should really be klass oop.");
 827       assert(o->is_parsable(), "Should be parsable");
 828       assert(o->is_oop(true /* ignore mark word */), "Should be an oop.");
 829       size_t res = o->size_given_klass(k->klass_part());
 830       res = adjustObjectSize(res);
 831       assert(res != 0, "Block size should not be 0");
 832       return res;
 833     }
 834   }
 835 }
 836 
 837 // A variant of the above that uses the Printezis bits for
 838 // unparsable but allocated objects. This avoids any possible
 839 // stalls waiting for mutators to initialize objects, and is
 840 // thus potentially faster than the variant above. However,
 841 // this variant may return a zero size for a block that is
 842 // under mutation and for which a consistent size cannot be
 843 // inferred without stalling; see CMSCollector::block_size_if_printezis_bits().
 844 size_t CompactibleFreeListSpace::block_size_no_stall(HeapWord* p,
 845                                                      const CMSCollector* c)
 846 const {
 847   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 848   // This must be volatile, or else there is a danger that the compiler
 849   // will compile the code below into a sometimes-infinite loop, by keeping
 850   // the value read the first time in a register.
 851   oop o = (oop)p;
 852   volatile oop* second_word_addr = o->klass_addr();
 853   DEBUG_ONLY(uint loops = 0;)
 854   while (true) {
 855     klassOop k = (klassOop)(*second_word_addr);
 856     // We must do this until we get a consistent view of the object.
 857     if (FreeChunk::secondWordIndicatesFreeChunk((intptr_t)k)) {
 858       FreeChunk* fc = (FreeChunk*)p;
 859       volatile size_t* sz_addr = (volatile size_t*)(fc->size_addr());
 860       size_t res = (*sz_addr);
 861       klassOop k2 = (klassOop)(*second_word_addr);  // Read to confirm.
 862       if (k == k2) {
 863         assert(res != 0, "Block size should not be 0");
 864         assert(loops == 0, "Should be 0");
 865         return res;
 866       }
 867     } else if (k != NULL && o->is_parsable()) {
 868       assert(k->is_oop(), "Should really be klass oop.");
 869       assert(o->is_oop(), "Should be an oop");
 870       size_t res = o->size_given_klass(k->klass_part());
 871       res = adjustObjectSize(res);
 872       assert(res != 0, "Block size should not be 0");
 873       return res;
 874     } else {
 875       return c->block_size_if_printezis_bits(p);
 876     }
 877     assert(loops == 0, "Can loop at most once");
 878     DEBUG_ONLY(loops++;)
 879   }
 880 }
 881 
 882 size_t CompactibleFreeListSpace::block_size_nopar(const HeapWord* p) const {
 883   NOT_PRODUCT(verify_objects_initialized());
 884   assert(MemRegion(bottom(), end()).contains(p), "p not in space");
 885   FreeChunk* fc = (FreeChunk*)p;
 886   if (fc->isFree()) {
 887     return fc->size();
 888   } else {
 889     // Ignore mark word because this may be a recently promoted
 890     // object whose mark word is used to chain together grey
 891     // objects (the last one would have a null value).
 892     assert(oop(p)->is_oop(true), "Should be an oop");
 893     return adjustObjectSize(oop(p)->size());
 894   }
 895 }
 896 
 897 // This implementation assumes that the property of "being an object" is
 898 // stable.  But being a free chunk may not be (because of parallel
 899 // promotion.)
 900 bool CompactibleFreeListSpace::block_is_obj(const HeapWord* p) const {
 901   FreeChunk* fc = (FreeChunk*)p;
 902   assert(is_in_reserved(p), "Should be in space");
 903   // When doing a mark-sweep-compact of the CMS generation, this
 904   // assertion may fail because prepare_for_compaction() uses
 905   // space that is garbage to maintain information on ranges of
 906   // live objects so that these live ranges can be moved as a whole.
 907   // Comment out this assertion until that problem can be solved
 908   // (i.e., that the block start calculation may look at objects
 909   // at address below "p" in finding the object that contains "p"
 910   // and those objects (if garbage) may have been modified to hold
 911   // live range information.
 912   // assert(ParallelGCThreads > 0 || _bt.block_start(p) == p, "Should be a block boundary");
 913   klassOop k = oop(p)->klass();
 914   intptr_t ki = (intptr_t)k;
 915   if (FreeChunk::secondWordIndicatesFreeChunk(ki)) return false;
 916   if (k != NULL) {
 917     // Ignore mark word because it may have been used to
 918     // chain together promoted objects (the last one
 919     // would have a null value).
 920     assert(oop(p)->is_oop(true), "Should be an oop");
 921     return true;
 922   } else {
 923     return false;  // Was not an object at the start of collection.
 924   }
 925 }
 926 
 927 // Check if the object is alive. This fact is checked either by consulting
 928 // the main marking bitmap in the sweeping phase or, if it's a permanent
 929 // generation and we're not in the sweeping phase, by checking the
 930 // perm_gen_verify_bit_map where we store the "deadness" information if
 931 // we did not sweep the perm gen in the most recent previous GC cycle.
 932 bool CompactibleFreeListSpace::obj_is_alive(const HeapWord* p) const {
 933   assert (block_is_obj(p), "The address should point to an object");
 934 
 935   // If we're sweeping, we use object liveness information from the main bit map
 936   // for both perm gen and old gen.
 937   // We don't need to lock the bitmap (live_map or dead_map below), because
 938   // EITHER we are in the middle of the sweeping phase, and the
 939   // main marking bit map (live_map below) is locked,
 940   // OR we're in other phases and perm_gen_verify_bit_map (dead_map below)
 941   // is stable, because it's mutated only in the sweeping phase.
 942   if (_collector->abstract_state() == CMSCollector::Sweeping) {
 943     CMSBitMap* live_map = _collector->markBitMap();
 944     return live_map->isMarked((HeapWord*) p);
 945   } else {
 946     // If we're not currently sweeping and we haven't swept the perm gen in
 947     // the previous concurrent cycle then we may have dead but unswept objects
 948     // in the perm gen. In this case, we use the "deadness" information
 949     // that we had saved in perm_gen_verify_bit_map at the last sweep.
 950     if (!CMSClassUnloadingEnabled && _collector->_permGen->reserved().contains(p)) {
 951       if (_collector->verifying()) {
 952         CMSBitMap* dead_map = _collector->perm_gen_verify_bit_map();
 953         // Object is marked in the dead_map bitmap at the previous sweep
 954         // when we know that it's dead; if the bitmap is not allocated then
 955         // the object is alive.
 956         return (dead_map->sizeInBits() == 0) // bit_map has been allocated
 957                || !dead_map->par_isMarked((HeapWord*) p);
 958       } else {
 959         return false; // We can't say for sure if it's live, so we say that it's dead.
 960       }
 961     }
 962   }
 963   return true;
 964 }
 965 
 966 bool CompactibleFreeListSpace::block_is_obj_nopar(const HeapWord* p) const {
 967   FreeChunk* fc = (FreeChunk*)p;
 968   assert(is_in_reserved(p), "Should be in space");
 969   assert(_bt.block_start(p) == p, "Should be a block boundary");
 970   if (!fc->isFree()) {
 971     // Ignore mark word because it may have been used to
 972     // chain together promoted objects (the last one
 973     // would have a null value).
 974     assert(oop(p)->is_oop(true), "Should be an oop");
 975     return true;
 976   }
 977   return false;
 978 }
 979 
 980 // "MT-safe but not guaranteed MT-precise" (TM); you may get an
 981 // approximate answer if you don't hold the freelistlock when you call this.
 982 size_t CompactibleFreeListSpace::totalSizeInIndexedFreeLists() const {
 983   size_t size = 0;
 984   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
 985     debug_only(
 986       // We may be calling here without the lock in which case we
 987       // won't do this modest sanity check.
 988       if (freelistLock()->owned_by_self()) {
 989         size_t total_list_size = 0;
 990         for (FreeChunk* fc = _indexedFreeList[i].head(); fc != NULL;
 991           fc = fc->next()) {
 992           total_list_size += i;
 993         }
 994         assert(total_list_size == i * _indexedFreeList[i].count(),
 995                "Count in list is incorrect");
 996       }
 997     )
 998     size += i * _indexedFreeList[i].count();
 999   }
1000   return size;
1001 }
1002 
1003 HeapWord* CompactibleFreeListSpace::par_allocate(size_t size) {
1004   MutexLockerEx x(freelistLock(), Mutex::_no_safepoint_check_flag);
1005   return allocate(size);
1006 }
1007 
1008 HeapWord*
1009 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlockRemainder(size_t size) {
1010   return getChunkFromLinearAllocBlockRemainder(&_smallLinearAllocBlock, size);
1011 }
1012 
1013 HeapWord* CompactibleFreeListSpace::allocate(size_t size) {
1014   assert_lock_strong(freelistLock());
1015   HeapWord* res = NULL;
1016   assert(size == adjustObjectSize(size),
1017          "use adjustObjectSize() before calling into allocate()");
1018   
1019   if (_adaptive_freelists) {
1020     res = allocate_adaptive_freelists(size);
1021   } else {  // non-adaptive free lists
1022     res = allocate_non_adaptive_freelists(size);
1023   }
1024   
1025   if (res != NULL) {
1026     // check that res does lie in this space!
1027     assert(is_in_reserved(res), "Not in this space!");
1028     assert(is_aligned((void*)res), "alignment check");
1029 
1030     FreeChunk* fc = (FreeChunk*)res;
1031     fc->markNotFree();
1032     assert(!fc->isFree(), "shouldn't be marked free");
1033     assert(oop(fc)->klass() == NULL, "should look uninitialized");
1034     // Verify that the block offset table shows this to
1035     // be a single block, but not one which is unallocated.
1036     _bt.verify_single_block(res, size); 
1037     _bt.verify_not_unallocated(res, size);
1038     // mangle a just allocated object with a distinct pattern.
1039     debug_only(fc->mangleAllocated(size));
1040   }
1041   
1042   return res;
1043 }
1044 
1045 HeapWord* CompactibleFreeListSpace::allocate_non_adaptive_freelists(size_t size) {
1046   HeapWord* res = NULL;
1047   // try and use linear allocation for smaller blocks
1048   if (size < _smallLinearAllocBlock._allocation_size_limit) {
1049     // if successful, the following also adjusts block offset table
1050     res = getChunkFromSmallLinearAllocBlock(size);
1051   }
1052   // Else triage to indexed lists for smaller sizes
1053   if (res == NULL) {
1054     if (size < SmallForDictionary) {
1055       res = (HeapWord*) getChunkFromIndexedFreeList(size);
1056     } else { 
1057       // else get it from the big dictionary; if even this doesn't
1058       // work we are out of luck.
1059       res = (HeapWord*)getChunkFromDictionaryExact(size);
1060     }
1061   }
1062 
1063   return res;
1064 }
1065 
1066 HeapWord* CompactibleFreeListSpace::allocate_adaptive_freelists(size_t size) {
1067   assert_lock_strong(freelistLock());
1068   HeapWord* res = NULL;
1069   assert(size == adjustObjectSize(size),
1070          "use adjustObjectSize() before calling into allocate()");
1071   
1072   // Strategy
1073   //   if small
1074   //     exact size from small object indexed list if small
1075   //     small or large linear allocation block (linAB) as appropriate
1076   //     take from lists of greater sized chunks
1077   //   else
1078   //     dictionary
1079   //     small or large linear allocation block if it has the space
1080   // Try allocating exact size from indexTable first
1081   if (size < IndexSetSize) {
1082     res = (HeapWord*) getChunkFromIndexedFreeList(size);
1083     if(res != NULL) {
1084       assert(res != (HeapWord*)_indexedFreeList[size].head(), 
1085         "Not removed from free list");
1086       // no block offset table adjustment is necessary on blocks in
1087       // the indexed lists.
1088 
1089     // Try allocating from the small LinAB
1090     } else if (size < _smallLinearAllocBlock._allocation_size_limit &&
1091         (res = getChunkFromSmallLinearAllocBlock(size)) != NULL) {
1092         // if successful, the above also adjusts block offset table
1093         // Note that this call will refill the LinAB to 
1094         // satisfy the request.  This is different that
1095         // evm.  
1096         // Don't record chunk off a LinAB?  smallSplitBirth(size);
1097   
1098     } else {
1099       // Raid the exact free lists larger than size, even if they are not
1100       // overpopulated.
1101       res = (HeapWord*) getChunkFromGreater(size);
1102     }
1103   } else {
1104     // Big objects get allocated directly from the dictionary.
1105     res = (HeapWord*) getChunkFromDictionaryExact(size);
1106     if (res == NULL) {
1107       // Try hard not to fail since an allocation failure will likely
1108       // trigger a synchronous GC.  Try to get the space from the 
1109       // allocation blocks.
1110       res = getChunkFromSmallLinearAllocBlockRemainder(size);
1111     }
1112   }
1113   
1114   return res;
1115 }
1116 
1117 // A worst-case estimate of the space required (in HeapWords) to expand the heap
1118 // when promoting obj.
1119 size_t CompactibleFreeListSpace::expansionSpaceRequired(size_t obj_size) const {
1120   // Depending on the object size, expansion may require refilling either a
1121   // bigLAB or a smallLAB plus refilling a PromotionInfo object.  MinChunkSize
1122   // is added because the dictionary may over-allocate to avoid fragmentation.
1123   size_t space = obj_size;
1124   if (!_adaptive_freelists) {
1125     space = MAX2(space, _smallLinearAllocBlock._refillSize);
1126   }
1127   space += _promoInfo.refillSize() + 2 * MinChunkSize;
1128   return space;
1129 }
1130 
1131 FreeChunk* CompactibleFreeListSpace::getChunkFromGreater(size_t numWords) {
1132   FreeChunk* ret;
1133 
1134   assert(numWords >= MinChunkSize, "Size is less than minimum");
1135   assert(linearAllocationWouldFail() || bestFitFirst(),
1136     "Should not be here");
1137 
1138   size_t i;
1139   size_t currSize = numWords + MinChunkSize;
1140   assert(currSize % MinObjAlignment == 0, "currSize should be aligned");
1141   for (i = currSize; i < IndexSetSize; i += IndexSetStride) {
1142     FreeList* fl = &_indexedFreeList[i];
1143     if (fl->head()) {
1144       ret = getFromListGreater(fl, numWords);
1145       assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1146       return ret;
1147     }
1148   }
1149 
1150   currSize = MAX2((size_t)SmallForDictionary,
1151                   (size_t)(numWords + MinChunkSize));
1152 
1153   /* Try to get a chunk that satisfies request, while avoiding
1154      fragmentation that can't be handled. */
1155   {
1156     ret =  dictionary()->getChunk(currSize);
1157     if (ret != NULL) {
1158       assert(ret->size() - numWords >= MinChunkSize,
1159              "Chunk is too small");
1160       _bt.allocated((HeapWord*)ret, ret->size());
1161       /* Carve returned chunk. */
1162       (void) splitChunkAndReturnRemainder(ret, numWords);
1163       /* Label this as no longer a free chunk. */
1164       assert(ret->isFree(), "This chunk should be free");
1165       ret->linkPrev(NULL);
1166     }
1167     assert(ret == NULL || ret->isFree(), "Should be returning a free chunk");
1168     return ret;
1169   }
1170   ShouldNotReachHere();
1171 }
1172 
1173 bool CompactibleFreeListSpace::verifyChunkInIndexedFreeLists(FreeChunk* fc) 
1174   const {
1175   assert(fc->size() < IndexSetSize, "Size of chunk is too large");
1176   return _indexedFreeList[fc->size()].verifyChunkInFreeLists(fc);
1177 }
1178 
1179 bool CompactibleFreeListSpace::verifyChunkInFreeLists(FreeChunk* fc) const {
1180   if (fc->size() >= IndexSetSize) {
1181     return dictionary()->verifyChunkInFreeLists(fc);
1182   } else {
1183     return verifyChunkInIndexedFreeLists(fc);
1184   }
1185 }
1186 
1187 #ifndef PRODUCT
1188 void CompactibleFreeListSpace::assert_locked() const {
1189   CMSLockVerifier::assert_locked(freelistLock(), parDictionaryAllocLock());
1190 }
1191 #endif
1192 
1193 FreeChunk* CompactibleFreeListSpace::allocateScratch(size_t size) {
1194   // In the parallel case, the main thread holds the free list lock
1195   // on behalf the parallel threads.
1196   assert_locked();
1197   FreeChunk* fc;
1198   {
1199     // If GC is parallel, this might be called by several threads.
1200     // This should be rare enough that the locking overhead won't affect
1201     // the sequential code.
1202     MutexLockerEx x(parDictionaryAllocLock(),
1203                     Mutex::_no_safepoint_check_flag);
1204     fc = getChunkFromDictionary(size);
1205   }
1206   if (fc != NULL) {
1207     fc->dontCoalesce();
1208     assert(fc->isFree(), "Should be free, but not coalescable");
1209     // Verify that the block offset table shows this to
1210     // be a single block, but not one which is unallocated.
1211     _bt.verify_single_block((HeapWord*)fc, fc->size());
1212     _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
1213   }
1214   return fc;
1215 }
1216 
1217 oop CompactibleFreeListSpace::promote(oop obj, size_t obj_size, oop* ref) {
1218   assert(obj_size == (size_t)obj->size(), "bad obj_size passed in");
1219   assert_locked();
1220 
1221   // if we are tracking promotions, then first ensure space for
1222   // promotion (including spooling space for saving header if necessary).
1223   // then allocate and copy, then track promoted info if needed.
1224   // When tracking (see PromotionInfo::track()), the mark word may
1225   // be displaced and in this case restoration of the mark word
1226   // occurs in the (oop_since_save_marks_)iterate phase.
1227   if (_promoInfo.tracking() && !_promoInfo.ensure_spooling_space()) {
1228     return NULL;
1229   }
1230   // Call the allocate(size_t, bool) form directly to avoid the
1231   // additional call through the allocate(size_t) form.  Having
1232   // the compile inline the call is problematic because allocate(size_t)
1233   // is a virtual method.
1234   HeapWord* res = allocate(adjustObjectSize(obj_size));
1235   if (res != NULL) {
1236     Copy::aligned_disjoint_words((HeapWord*)obj, res, obj_size);
1237     // if we should be tracking promotions, do so.
1238     if (_promoInfo.tracking()) {
1239         _promoInfo.track((PromotedObject*)res);
1240     }
1241   }
1242   return oop(res);
1243 }
1244 
1245 HeapWord*
1246 CompactibleFreeListSpace::getChunkFromSmallLinearAllocBlock(size_t size) {
1247   assert_locked();
1248   assert(size >= MinChunkSize, "minimum chunk size");
1249   assert(size <  _smallLinearAllocBlock._allocation_size_limit, 
1250     "maximum from smallLinearAllocBlock");
1251   return getChunkFromLinearAllocBlock(&_smallLinearAllocBlock, size);
1252 }
1253 
1254 HeapWord*
1255 CompactibleFreeListSpace::getChunkFromLinearAllocBlock(LinearAllocBlock *blk,
1256                                                        size_t size) {
1257   assert_locked();
1258   assert(size >= MinChunkSize, "too small");
1259   HeapWord* res = NULL;
1260   // Try to do linear allocation from blk, making sure that
1261   if (blk->_word_size == 0) {
1262     // We have probably been unable to fill this either in the prologue or
1263     // when it was exhausted at the last linear allocation. Bail out until
1264     // next time.
1265     assert(blk->_ptr == NULL, "consistency check");
1266     return NULL;
1267   }
1268   assert(blk->_word_size != 0 && blk->_ptr != NULL, "consistency check");
1269   res = getChunkFromLinearAllocBlockRemainder(blk, size);
1270   if (res != NULL) return res;
1271 
1272   // about to exhaust this linear allocation block
1273   if (blk->_word_size == size) { // exactly satisfied
1274     res = blk->_ptr;
1275     _bt.allocated(res, blk->_word_size);
1276   } else if (size + MinChunkSize <= blk->_refillSize) {
1277     // Update _unallocated_block if the size is such that chunk would be
1278     // returned to the indexed free list.  All other chunks in the indexed
1279     // free lists are allocated from the dictionary so that _unallocated_block
1280     // has already been adjusted for them.  Do it here so that the cost
1281     // for all chunks added back to the indexed free lists.
1282     if (blk->_word_size < SmallForDictionary) {
1283       _bt.allocated(blk->_ptr, blk->_word_size);
1284     }
1285     // Return the chunk that isn't big enough, and then refill below.
1286     addChunkToFreeLists(blk->_ptr, blk->_word_size);
1287     _bt.verify_single_block(blk->_ptr, (blk->_ptr + blk->_word_size));
1288     // Don't keep statistics on adding back chunk from a LinAB.
1289   } else {
1290     // A refilled block would not satisfy the request.
1291     return NULL;
1292   }
1293 
1294   blk->_ptr = NULL; blk->_word_size = 0;
1295   refillLinearAllocBlock(blk);
1296   assert(blk->_ptr == NULL || blk->_word_size >= size + MinChunkSize,
1297          "block was replenished");
1298   if (res != NULL) {
1299     splitBirth(size);
1300     repairLinearAllocBlock(blk);
1301   } else if (blk->_ptr != NULL) {
1302     res = blk->_ptr;
1303     size_t blk_size = blk->_word_size;
1304     blk->_word_size -= size;
1305     blk->_ptr  += size;
1306     splitBirth(size);
1307     repairLinearAllocBlock(blk);
1308     // Update BOT last so that other (parallel) GC threads see a consistent
1309     // view of the BOT and free blocks.
1310     // Above must occur before BOT is updated below.
1311     _bt.split_block(res, blk_size, size);  // adjust block offset table
1312   }
1313   return res;
1314 }
1315 
1316 HeapWord*  CompactibleFreeListSpace::getChunkFromLinearAllocBlockRemainder(
1317                                         LinearAllocBlock* blk, 
1318                                         size_t size) {
1319   assert_locked();
1320   assert(size >= MinChunkSize, "too small");
1321 
1322   HeapWord* res = NULL;
1323   // This is the common case.  Keep it simple.
1324   if (blk->_word_size >= size + MinChunkSize) {
1325     assert(blk->_ptr != NULL, "consistency check");
1326     res = blk->_ptr;
1327     // Note that the BOT is up-to-date for the linAB before allocation.  It
1328     // indicates the start of the linAB.  The split_block() updates the
1329     // BOT for the linAB after the allocation (indicates the start of the
1330     // next chunk to be allocated).
1331     size_t blk_size = blk->_word_size;
1332     blk->_word_size -= size;
1333     blk->_ptr  += size;
1334     splitBirth(size);
1335     repairLinearAllocBlock(blk);
1336     // Update BOT last so that other (parallel) GC threads see a consistent
1337     // view of the BOT and free blocks.
1338     // Above must occur before BOT is updated below.
1339     _bt.split_block(res, blk_size, size);  // adjust block offset table
1340     _bt.allocated(res, size);
1341   } 
1342   return res;
1343 }
1344 
1345 FreeChunk* 
1346 CompactibleFreeListSpace::getChunkFromIndexedFreeList(size_t size) {
1347   assert_locked();
1348   assert(size < SmallForDictionary, "just checking");
1349   FreeChunk* res;
1350   res = _indexedFreeList[size].getChunkAtHead();
1351   if (res == NULL) {
1352     res = getChunkFromIndexedFreeListHelper(size);
1353   }
1354   _bt.verify_not_unallocated((HeapWord*) res, size);
1355   return res;
1356 }
1357 
1358 FreeChunk*
1359 CompactibleFreeListSpace::getChunkFromIndexedFreeListHelper(size_t size) {
1360   assert_locked();
1361   FreeChunk* fc = NULL;
1362   if (size < SmallForDictionary) {
1363     assert(_indexedFreeList[size].head() == NULL ||
1364       _indexedFreeList[size].surplus() <= 0,
1365       "List for this size should be empty or under populated");
1366     // Try best fit in exact lists before replenishing the list
1367     if (!bestFitFirst() || (fc = bestFitSmall(size)) == NULL) {
1368       // Replenish list.
1369       //
1370       // Things tried that failed.
1371       //   Tried allocating out of the two LinAB's first before 
1372       // replenishing lists.  
1373       //   Tried small linAB of size 256 (size in indexed list)
1374       // and replenishing indexed lists from the small linAB.
1375       //
1376       FreeChunk* newFc = NULL;
1377       size_t replenish_size = CMSIndexedFreeListReplenish * size;
1378       if (replenish_size < SmallForDictionary) {
1379         // Do not replenish from an underpopulated size.
1380         if (_indexedFreeList[replenish_size].surplus() > 0 &&
1381             _indexedFreeList[replenish_size].head() != NULL) {
1382           newFc = 
1383             _indexedFreeList[replenish_size].getChunkAtHead();
1384         } else {
1385           newFc = bestFitSmall(replenish_size);
1386         }
1387       }
1388       if (newFc != NULL) {
1389         splitDeath(replenish_size);
1390       } else if (replenish_size > size) {
1391         assert(CMSIndexedFreeListReplenish > 1, "ctl pt invariant");
1392         newFc = 
1393           getChunkFromIndexedFreeListHelper(replenish_size);
1394       }
1395       if (newFc != NULL) {
1396         assert(newFc->size() == replenish_size, "Got wrong size");
1397         size_t i;
1398         FreeChunk *curFc, *nextFc;
1399         // carve up and link blocks 0, ..., CMSIndexedFreeListReplenish - 2
1400         // The last chunk is not added to the lists but is returned as the
1401         // free chunk.
1402         for (curFc = newFc, nextFc = (FreeChunk*)((HeapWord*)curFc + size), 
1403              i = 0;
1404              i < (CMSIndexedFreeListReplenish - 1);
1405              curFc = nextFc, nextFc = (FreeChunk*)((HeapWord*)nextFc + size), 
1406              i++) {
1407           curFc->setSize(size);
1408           // Don't record this as a return in order to try and
1409           // determine the "returns" from a GC.
1410           _bt.verify_not_unallocated((HeapWord*) fc, size);
1411           _indexedFreeList[size].returnChunkAtTail(curFc, false);
1412           _bt.mark_block((HeapWord*)curFc, size);
1413           splitBirth(size);
1414           // Don't record the initial population of the indexed list
1415           // as a split birth.
1416         }
1417 
1418         // check that the arithmetic was OK above
1419         assert((HeapWord*)nextFc == (HeapWord*)newFc + replenish_size,
1420           "inconsistency in carving newFc");
1421         curFc->setSize(size);
1422         _bt.mark_block((HeapWord*)curFc, size);
1423         splitBirth(size);
1424         return curFc;
1425       }
1426     }
1427   } else {
1428     // Get a free chunk from the free chunk dictionary to be returned to
1429     // replenish the indexed free list.
1430     fc = getChunkFromDictionaryExact(size);
1431   }
1432   assert(fc == NULL || fc->isFree(), "Should be returning a free chunk");
1433   return fc;
1434 }
1435 
1436 FreeChunk*
1437 CompactibleFreeListSpace::getChunkFromDictionary(size_t size) {
1438   assert_locked();
1439   FreeChunk* fc = _dictionary->getChunk(size);
1440   if (fc == NULL) {
1441     return NULL;
1442   }
1443   _bt.allocated((HeapWord*)fc, fc->size());
1444   if (fc->size() >= size + MinChunkSize) {
1445     fc = splitChunkAndReturnRemainder(fc, size);
1446   }
1447   assert(fc->size() >= size, "chunk too small");
1448   assert(fc->size() < size + MinChunkSize, "chunk too big");
1449   _bt.verify_single_block((HeapWord*)fc, fc->size());
1450   return fc;
1451 }
1452 
1453 FreeChunk*
1454 CompactibleFreeListSpace::getChunkFromDictionaryExact(size_t size) {
1455   assert_locked();
1456   FreeChunk* fc = _dictionary->getChunk(size);
1457   if (fc == NULL) {
1458     return fc;
1459   }
1460   _bt.allocated((HeapWord*)fc, fc->size());
1461   if (fc->size() == size) {
1462     _bt.verify_single_block((HeapWord*)fc, size);
1463     return fc;
1464   }
1465   assert(fc->size() > size, "getChunk() guarantee");
1466   if (fc->size() < size + MinChunkSize) {
1467     // Return the chunk to the dictionary and go get a bigger one.
1468     returnChunkToDictionary(fc);
1469     fc = _dictionary->getChunk(size + MinChunkSize); 
1470     if (fc == NULL) {
1471       return NULL;
1472     }
1473     _bt.allocated((HeapWord*)fc, fc->size());
1474   }
1475   assert(fc->size() >= size + MinChunkSize, "tautology");
1476   fc = splitChunkAndReturnRemainder(fc, size);
1477   assert(fc->size() == size, "chunk is wrong size");
1478   _bt.verify_single_block((HeapWord*)fc, size);
1479   return fc;
1480 }
1481 
1482 void
1483 CompactibleFreeListSpace::returnChunkToDictionary(FreeChunk* chunk) {
1484   assert_locked();
1485 
1486   size_t size = chunk->size();
1487   _bt.verify_single_block((HeapWord*)chunk, size);
1488   // adjust _unallocated_block downward, as necessary
1489   _bt.freed((HeapWord*)chunk, size);
1490   _dictionary->returnChunk(chunk);
1491 }
1492 
1493 void
1494 CompactibleFreeListSpace::returnChunkToFreeList(FreeChunk* fc) {
1495   assert_locked();
1496   size_t size = fc->size();
1497   _bt.verify_single_block((HeapWord*) fc, size);
1498   _bt.verify_not_unallocated((HeapWord*) fc, size);
1499   if (_adaptive_freelists) {
1500     _indexedFreeList[size].returnChunkAtTail(fc);
1501   } else {
1502     _indexedFreeList[size].returnChunkAtHead(fc);
1503   }
1504 }
1505 
1506 // Add chunk to end of last block -- if it's the largest
1507 // block -- and update BOT and census data. We would
1508 // of course have preferred to coalesce it with the
1509 // last block, but it's currently less expensive to find the
1510 // largest block than it is to find the last.
1511 void
1512 CompactibleFreeListSpace::addChunkToFreeListsAtEndRecordingStats(
1513   HeapWord* chunk, size_t     size) {
1514   // check that the chunk does lie in this space!
1515   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1516   assert_locked();
1517   // One of the parallel gc task threads may be here
1518   // whilst others are allocating.
1519   Mutex* lock = NULL;
1520   if (ParallelGCThreads != 0) {
1521     lock = &_parDictionaryAllocLock;
1522   }
1523   FreeChunk* ec;
1524   {
1525     MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1526     ec = dictionary()->findLargestDict();  // get largest block
1527     if (ec != NULL && ec->end() == chunk) {
1528       // It's a coterminal block - we can coalesce.
1529       size_t old_size = ec->size();
1530       coalDeath(old_size);
1531       removeChunkFromDictionary(ec);
1532       size += old_size;
1533     } else {
1534       ec = (FreeChunk*)chunk;
1535     }
1536   }
1537   ec->setSize(size);
1538   debug_only(ec->mangleFreed(size));
1539   if (size < SmallForDictionary) {
1540     lock = _indexedFreeListParLocks[size];
1541   }
1542   MutexLockerEx x(lock, Mutex::_no_safepoint_check_flag);
1543   addChunkAndRepairOffsetTable((HeapWord*)ec, size, true);
1544   // record the birth under the lock since the recording involves
1545   // manipulation of the list on which the chunk lives and
1546   // if the chunk is allocated and is the last on the list,
1547   // the list can go away.
1548   coalBirth(size);
1549 }
1550 
1551 void
1552 CompactibleFreeListSpace::addChunkToFreeLists(HeapWord* chunk,
1553                                               size_t     size) {
1554   // check that the chunk does lie in this space!
1555   assert(chunk != NULL && is_in_reserved(chunk), "Not in this space!");
1556   assert_locked();
1557   _bt.verify_single_block(chunk, size);
1558 
1559   FreeChunk* fc = (FreeChunk*) chunk;
1560   fc->setSize(size);
1561   debug_only(fc->mangleFreed(size));
1562   if (size < SmallForDictionary) {
1563     returnChunkToFreeList(fc);
1564   } else {
1565     returnChunkToDictionary(fc);
1566   }
1567 }
1568 
1569 void
1570 CompactibleFreeListSpace::addChunkAndRepairOffsetTable(HeapWord* chunk,
1571   size_t size, bool coalesced) {
1572   assert_locked();
1573   assert(chunk != NULL, "null chunk");
1574   if (coalesced) {
1575     // repair BOT
1576     _bt.single_block(chunk, size);
1577   }
1578   addChunkToFreeLists(chunk, size);
1579 }
1580 
1581 // We _must_ find the purported chunk on our free lists;
1582 // we assert if we don't.
1583 void
1584 CompactibleFreeListSpace::removeFreeChunkFromFreeLists(FreeChunk* fc) {
1585   size_t size = fc->size();
1586   assert_locked();
1587   debug_only(verifyFreeLists());
1588   if (size < SmallForDictionary) {
1589     removeChunkFromIndexedFreeList(fc);
1590   } else {
1591     removeChunkFromDictionary(fc);
1592   }
1593   _bt.verify_single_block((HeapWord*)fc, size);
1594   debug_only(verifyFreeLists());
1595 }
1596 
1597 void
1598 CompactibleFreeListSpace::removeChunkFromDictionary(FreeChunk* fc) {
1599   size_t size = fc->size();
1600   assert_locked();
1601   assert(fc != NULL, "null chunk");
1602   _bt.verify_single_block((HeapWord*)fc, size);
1603   _dictionary->removeChunk(fc);
1604   // adjust _unallocated_block upward, as necessary
1605   _bt.allocated((HeapWord*)fc, size);
1606 }
1607 
1608 void
1609 CompactibleFreeListSpace::removeChunkFromIndexedFreeList(FreeChunk* fc) {
1610   assert_locked();
1611   size_t size = fc->size();
1612   _bt.verify_single_block((HeapWord*)fc, size);
1613   NOT_PRODUCT(
1614     if (FLSVerifyIndexTable) {
1615       verifyIndexedFreeList(size);
1616     }
1617   )
1618   _indexedFreeList[size].removeChunk(fc);
1619   debug_only(fc->clearNext());
1620   debug_only(fc->clearPrev());
1621   NOT_PRODUCT(
1622     if (FLSVerifyIndexTable) {
1623       verifyIndexedFreeList(size);
1624     }
1625   )
1626 }
1627 
1628 FreeChunk* CompactibleFreeListSpace::bestFitSmall(size_t numWords) {
1629   /* A hint is the next larger size that has a surplus.
1630      Start search at a size large enough to guarantee that
1631      the excess is >= MIN_CHUNK. */
1632   size_t start = align_object_size(numWords + MinChunkSize);
1633   if (start < IndexSetSize) {
1634     FreeList* it   = _indexedFreeList;
1635     size_t    hint = _indexedFreeList[start].hint();
1636     while (hint < IndexSetSize) {
1637       assert(hint % MinObjAlignment == 0, "hint should be aligned");
1638       FreeList *fl = &_indexedFreeList[hint];
1639       if (fl->surplus() > 0 && fl->head() != NULL) {
1640         // Found a list with surplus, reset original hint
1641         // and split out a free chunk which is returned.
1642         _indexedFreeList[start].set_hint(hint);
1643         FreeChunk* res = getFromListGreater(fl, numWords);
1644         assert(res == NULL || res->isFree(), 
1645           "Should be returning a free chunk");
1646         return res;
1647       }
1648       hint = fl->hint(); /* keep looking */
1649     }
1650     /* None found. */
1651     it[start].set_hint(IndexSetSize);
1652   }
1653   return NULL;
1654 }
1655 
1656 /* Requires fl->size >= numWords + MinChunkSize */
1657 FreeChunk* CompactibleFreeListSpace::getFromListGreater(FreeList* fl,
1658   size_t numWords) {
1659   FreeChunk *curr = fl->head();
1660   size_t oldNumWords = curr->size();
1661   assert(numWords >= MinChunkSize, "Word size is too small");
1662   assert(curr != NULL, "List is empty");
1663   assert(oldNumWords >= numWords + MinChunkSize, 
1664         "Size of chunks in the list is too small");
1665  
1666   fl->removeChunk(curr);
1667   // recorded indirectly by splitChunkAndReturnRemainder - 
1668   // smallSplit(oldNumWords, numWords);
1669   FreeChunk* new_chunk = splitChunkAndReturnRemainder(curr, numWords);
1670   // Does anything have to be done for the remainder in terms of
1671   // fixing the card table?
1672   assert(new_chunk == NULL || new_chunk->isFree(), 
1673     "Should be returning a free chunk");
1674   return new_chunk;
1675 }
1676 
1677 FreeChunk*
1678 CompactibleFreeListSpace::splitChunkAndReturnRemainder(FreeChunk* chunk,
1679   size_t new_size) {
1680   assert_locked();
1681   size_t size = chunk->size();
1682   assert(size > new_size, "Split from a smaller block?");
1683   assert(is_aligned(chunk), "alignment problem");
1684   assert(size == adjustObjectSize(size), "alignment problem");
1685   size_t rem_size = size - new_size;
1686   assert(rem_size == adjustObjectSize(rem_size), "alignment problem");
1687   assert(rem_size >= MinChunkSize, "Free chunk smaller than minimum");
1688   FreeChunk* ffc = (FreeChunk*)((HeapWord*)chunk + new_size);
1689   assert(is_aligned(ffc), "alignment problem");
1690   ffc->setSize(rem_size);
1691   ffc->linkNext(NULL);
1692   ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
1693   // Above must occur before BOT is updated below.
1694   // adjust block offset table
1695   _bt.split_block((HeapWord*)chunk, chunk->size(), new_size);
1696   if (rem_size < SmallForDictionary) {
1697     bool is_par = (SharedHeap::heap()->n_par_threads() > 0);
1698     if (is_par) _indexedFreeListParLocks[rem_size]->lock();
1699     returnChunkToFreeList(ffc);
1700     split(size, rem_size);
1701     if (is_par) _indexedFreeListParLocks[rem_size]->unlock();
1702   } else {
1703     returnChunkToDictionary(ffc);
1704     split(size ,rem_size);
1705   }
1706   chunk->setSize(new_size);
1707   return chunk;
1708 }
1709 
1710 void
1711 CompactibleFreeListSpace::sweep_completed() {
1712   // Now that space is probably plentiful, refill linear
1713   // allocation blocks as needed.
1714   refillLinearAllocBlocksIfNeeded();
1715 }
1716 
1717 void
1718 CompactibleFreeListSpace::gc_prologue() {
1719   assert_locked();
1720   if (PrintFLSStatistics != 0) {
1721     gclog_or_tty->print("Before GC:\n");
1722     reportFreeListStatistics();
1723   }
1724   refillLinearAllocBlocksIfNeeded();
1725 }
1726 
1727 void
1728 CompactibleFreeListSpace::gc_epilogue() {
1729   assert_locked();
1730   if (PrintGCDetails && Verbose && !_adaptive_freelists) {
1731     if (_smallLinearAllocBlock._word_size == 0)
1732       warning("CompactibleFreeListSpace(epilogue):: Linear allocation failure");
1733   }
1734   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1735   _promoInfo.stopTrackingPromotions();
1736   repairLinearAllocationBlocks();
1737   // Print Space's stats
1738   if (PrintFLSStatistics != 0) {
1739     gclog_or_tty->print("After GC:\n");
1740     reportFreeListStatistics();
1741   }
1742 }
1743 
1744 // Iteration support, mostly delegated from a CMS generation
1745 
1746 void CompactibleFreeListSpace::save_marks() {
1747   // mark the "end" of the used space at the time of this call;
1748   // note, however, that promoted objects from this point
1749   // on are tracked in the _promoInfo below.
1750   set_saved_mark_word(BlockOffsetArrayUseUnallocatedBlock ? 
1751                       unallocated_block() : end());
1752   // inform allocator that promotions should be tracked.
1753   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");
1754   _promoInfo.startTrackingPromotions();
1755 }
1756 
1757 bool CompactibleFreeListSpace::no_allocs_since_save_marks() {
1758   assert(_promoInfo.tracking(), "No preceding save_marks?");
1759   guarantee(SharedHeap::heap()->n_par_threads() == 0,
1760             "Shouldn't be called (yet) during parallel part of gc.");
1761   return _promoInfo.noPromotions();
1762 }
1763 
1764 #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix)           \
1765                                                                             \
1766 void CompactibleFreeListSpace::                                             \
1767 oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) {              \
1768   assert(SharedHeap::heap()->n_par_threads() == 0,                          \
1769          "Shouldn't be called (yet) during parallel part of gc.");          \
1770   _promoInfo.promoted_oops_iterate##nv_suffix(blk);                         \
1771   /*                                                                        \
1772    * This also restores any displaced headers and removes the elements from \
1773    * the iteration set as they are processed, so that we have a clean slate \
1774    * at the end of the iteration. Note, thus, that if new objects are       \
1775    * promoted as a result of the iteration they are iterated over as well.  \
1776    */                                                                       \
1777   assert(_promoInfo.noPromotions(), "_promoInfo inconsistency");            \
1778 }
1779 
1780 ALL_SINCE_SAVE_MARKS_CLOSURES(CFLS_OOP_SINCE_SAVE_MARKS_DEFN)
1781 
1782 //////////////////////////////////////////////////////////////////////////////
1783 // We go over the list of promoted objects, removing each from the list,    
1784 // and applying the closure (this may, in turn, add more elements to  
1785 // the tail of the promoted list, and these newly added objects will 
1786 // also be processed) until the list is empty.                      
1787 // To aid verification and debugging, in the non-product builds
1788 // we actually forward _promoHead each time we process a promoted oop.
1789 // Note that this is not necessary in general (i.e. when we don't need to
1790 // call PromotionInfo::verify()) because oop_iterate can only add to the
1791 // end of _promoTail, and never needs to look at _promoHead.
1792 
1793 #define PROMOTED_OOPS_ITERATE_DEFN(OopClosureType, nv_suffix)               \
1794                                                                             \
1795 void PromotionInfo::promoted_oops_iterate##nv_suffix(OopClosureType* cl) {  \
1796   NOT_PRODUCT(verify());                                                    \
1797   PromotedObject *curObj, *nextObj;                                         \
1798   for (curObj = _promoHead; curObj != NULL; curObj = nextObj) {             \
1799     if ((nextObj = curObj->next()) == NULL) {                               \
1800       /* protect ourselves against additions due to closure application     \
1801          below by resetting the list.  */                                   \
1802       assert(_promoTail == curObj, "Should have been the tail");            \
1803       _promoHead = _promoTail = NULL;                                       \
1804     }                                                                       \
1805     if (curObj->hasDisplacedMark()) {                                       \
1806       /* restore displaced header */                                        \
1807       oop(curObj)->set_mark(nextDisplacedHeader());                         \
1808     } else {                                                                \
1809       /* restore prototypical header */                                     \
1810       oop(curObj)->init_mark();                                             \
1811     }                                                                       \
1812     /* The "promoted_mark" should now not be set */                         \
1813     assert(!curObj->hasPromotedMark(),                                      \
1814            "Should have been cleared by restoring displaced mark-word");    \
1815     NOT_PRODUCT(_promoHead = nextObj);                                      \
1816     if (cl != NULL) oop(curObj)->oop_iterate(cl);                           \
1817     if (nextObj == NULL) { /* start at head of list reset above */          \
1818       nextObj = _promoHead;                                                 \
1819     }                                                                       \
1820   }                                                                         \
1821   assert(noPromotions(), "post-condition violation");                       \
1822   assert(_promoHead == NULL && _promoTail == NULL, "emptied promoted list");\
1823   assert(_spoolHead == _spoolTail, "emptied spooling buffers");             \
1824   assert(_firstIndex == _nextIndex, "empty buffer");                        \
1825 }
1826 
1827 // This should have been ALL_SINCE_...() just like the others,
1828 // but, because the body of the method above is somehwat longer,
1829 // the MSVC compiler cannot cope; as a workaround, we split the
1830 // macro into its 3 constituent parts below (see original macro
1831 // definition in specializedOopClosures.hpp).
1832 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES_YOUNG(PROMOTED_OOPS_ITERATE_DEFN)
1833 PROMOTED_OOPS_ITERATE_DEFN(OopsInGenClosure,_v)
1834 
1835 
1836 void CompactibleFreeListSpace::object_iterate_since_last_GC(ObjectClosure* cl) {
1837   // ugghh... how would one do this efficiently for a non-contiguous space?
1838   guarantee(false, "NYI");
1839 }
1840 
1841 bool CompactibleFreeListSpace::linearAllocationWouldFail() {
1842   return _smallLinearAllocBlock._word_size == 0;
1843 }
1844 
1845 void CompactibleFreeListSpace::repairLinearAllocationBlocks() {
1846   // Fix up linear allocation blocks to look like free blocks
1847   repairLinearAllocBlock(&_smallLinearAllocBlock);
1848 }
1849 
1850 void CompactibleFreeListSpace::repairLinearAllocBlock(LinearAllocBlock* blk) {
1851   assert_locked();
1852   if (blk->_ptr != NULL) {
1853     assert(blk->_word_size != 0 && blk->_word_size >= MinChunkSize,
1854            "Minimum block size requirement");
1855     FreeChunk* fc = (FreeChunk*)(blk->_ptr);
1856     fc->setSize(blk->_word_size);
1857     fc->linkPrev(NULL);   // mark as free
1858     fc->dontCoalesce();
1859     assert(fc->isFree(), "just marked it free");
1860     assert(fc->cantCoalesce(), "just marked it uncoalescable");
1861   }
1862 }
1863 
1864 void CompactibleFreeListSpace::refillLinearAllocBlocksIfNeeded() {
1865   assert_locked();
1866   if (_smallLinearAllocBlock._ptr == NULL) {
1867     assert(_smallLinearAllocBlock._word_size == 0, 
1868       "Size of linAB should be zero if the ptr is NULL");
1869     // Reset the linAB refill and allocation size limit.
1870     _smallLinearAllocBlock.set(0, 0, 1024*SmallForLinearAlloc, SmallForLinearAlloc);
1871   }
1872   refillLinearAllocBlockIfNeeded(&_smallLinearAllocBlock);
1873 }
1874 
1875 void
1876 CompactibleFreeListSpace::refillLinearAllocBlockIfNeeded(LinearAllocBlock* blk) {
1877   assert_locked();
1878   assert((blk->_ptr == NULL && blk->_word_size == 0) ||
1879          (blk->_ptr != NULL && blk->_word_size >= MinChunkSize),
1880          "blk invariant");
1881   if (blk->_ptr == NULL) {
1882     refillLinearAllocBlock(blk);
1883   }
1884   if (PrintMiscellaneous && Verbose) {
1885     if (blk->_word_size == 0) {
1886       warning("CompactibleFreeListSpace(prologue):: Linear allocation failure");
1887     }
1888   }
1889 }
1890 
1891 void
1892 CompactibleFreeListSpace::refillLinearAllocBlock(LinearAllocBlock* blk) {
1893   assert_locked();
1894   assert(blk->_word_size == 0 && blk->_ptr == NULL,
1895          "linear allocation block should be empty");
1896   FreeChunk* fc;
1897   if (blk->_refillSize < SmallForDictionary && 
1898       (fc = getChunkFromIndexedFreeList(blk->_refillSize)) != NULL) {
1899     // A linAB's strategy might be to use small sizes to reduce
1900     // fragmentation but still get the benefits of allocation from a
1901     // linAB.
1902   } else {
1903     fc = getChunkFromDictionary(blk->_refillSize);
1904   }
1905   if (fc != NULL) {
1906     blk->_ptr  = (HeapWord*)fc;
1907     blk->_word_size = fc->size();
1908     fc->dontCoalesce();   // to prevent sweeper from sweeping us up
1909   }
1910 }
1911 
1912 // Support for compaction
1913 
1914 void CompactibleFreeListSpace::prepare_for_compaction(CompactPoint* cp) {
1915   SCAN_AND_FORWARD(cp,end,block_is_obj,block_size);
1916   // prepare_for_compaction() uses the space between live objects
1917   // so that later phase can skip dead space quickly.  So verification
1918   // of the free lists doesn't work after.
1919 }
1920 
1921 #define obj_size(q) adjustObjectSize(oop(q)->size())
1922 #define adjust_obj_size(s) adjustObjectSize(s)
1923 
1924 void CompactibleFreeListSpace::adjust_pointers() {
1925   // In other versions of adjust_pointers(), a bail out
1926   // based on the amount of live data in the generation
1927   // (i.e., if 0, bail out) may be used.
1928   // Cannot test used() == 0 here because the free lists have already
1929   // been mangled by the compaction.
1930 
1931   SCAN_AND_ADJUST_POINTERS(adjust_obj_size);
1932   // See note about verification in prepare_for_compaction().
1933 }
1934 
1935 void CompactibleFreeListSpace::compact() {
1936   SCAN_AND_COMPACT(obj_size);
1937 }
1938 
1939 // fragmentation_metric = 1 - [sum of (fbs**2) / (sum of fbs)**2]
1940 // where fbs is free block sizes
1941 double CompactibleFreeListSpace::flsFrag() const {
1942   size_t itabFree = totalSizeInIndexedFreeLists();
1943   double frag = 0.0;
1944   size_t i;
1945 
1946   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1947     double sz  = i;
1948     frag      += _indexedFreeList[i].count() * (sz * sz);
1949   }
1950 
1951   double totFree = itabFree +
1952                    _dictionary->totalChunkSize(DEBUG_ONLY(freelistLock()));
1953   if (totFree > 0) {
1954     frag = ((frag + _dictionary->sum_of_squared_block_sizes()) / 
1955             (totFree * totFree));
1956     frag = (double)1.0  - frag;
1957   } else {
1958     assert(frag == 0.0, "Follows from totFree == 0");
1959   }
1960   return frag;
1961 }
1962 
1963 #define CoalSurplusPercent 1.05
1964 #define SplitSurplusPercent 1.10
1965 
1966 void CompactibleFreeListSpace::beginSweepFLCensus(
1967   float inter_sweep_current,
1968   float inter_sweep_estimate) {
1969   assert_locked();
1970   size_t i;
1971   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1972     FreeList* fl    = &_indexedFreeList[i];
1973     fl->compute_desired(inter_sweep_current, inter_sweep_estimate);
1974     fl->set_coalDesired((ssize_t)((double)fl->desired() * CoalSurplusPercent));
1975     fl->set_beforeSweep(fl->count());
1976     fl->set_bfrSurp(fl->surplus());
1977   }
1978   _dictionary->beginSweepDictCensus(CoalSurplusPercent,
1979                                     inter_sweep_current,
1980                                     inter_sweep_estimate);
1981 }
1982 
1983 void CompactibleFreeListSpace::setFLSurplus() {
1984   assert_locked();
1985   size_t i;
1986   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
1987     FreeList *fl = &_indexedFreeList[i];
1988     fl->set_surplus(fl->count() - 
1989                     (ssize_t)((double)fl->desired() * SplitSurplusPercent));
1990   }
1991 }
1992 
1993 void CompactibleFreeListSpace::setFLHints() {
1994   assert_locked();
1995   size_t i;
1996   size_t h = IndexSetSize;
1997   for (i = IndexSetSize - 1; i != 0; i -= IndexSetStride) {
1998     FreeList *fl = &_indexedFreeList[i];
1999     fl->set_hint(h);
2000     if (fl->surplus() > 0) {
2001       h = i;
2002     }
2003   }
2004 }
2005 
2006 void CompactibleFreeListSpace::clearFLCensus() {
2007   assert_locked();
2008   int i;
2009   for (i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2010     FreeList *fl = &_indexedFreeList[i];
2011     fl->set_prevSweep(fl->count());
2012     fl->set_coalBirths(0);
2013     fl->set_coalDeaths(0);
2014     fl->set_splitBirths(0);
2015     fl->set_splitDeaths(0);  
2016   }
2017 }
2018 
2019 void CompactibleFreeListSpace::endSweepFLCensus(int sweepCt) {
2020   setFLSurplus();
2021   setFLHints();
2022   if (PrintGC && PrintFLSCensus > 0) {
2023     printFLCensus(sweepCt);
2024   }
2025   clearFLCensus();
2026   assert_locked();
2027   _dictionary->endSweepDictCensus(SplitSurplusPercent);
2028 }
2029 
2030 bool CompactibleFreeListSpace::coalOverPopulated(size_t size) {
2031   if (size < SmallForDictionary) {
2032     FreeList *fl = &_indexedFreeList[size];
2033     return (fl->coalDesired() < 0) ||
2034            ((int)fl->count() > fl->coalDesired());
2035   } else {
2036     return dictionary()->coalDictOverPopulated(size);
2037   }
2038 }
2039 
2040 void CompactibleFreeListSpace::smallCoalBirth(size_t size) {
2041   assert(size < SmallForDictionary, "Size too large for indexed list");
2042   FreeList *fl = &_indexedFreeList[size];
2043   fl->increment_coalBirths();
2044   fl->increment_surplus();
2045 }
2046 
2047 void CompactibleFreeListSpace::smallCoalDeath(size_t size) {
2048   assert(size < SmallForDictionary, "Size too large for indexed list");
2049   FreeList *fl = &_indexedFreeList[size];
2050   fl->increment_coalDeaths();
2051   fl->decrement_surplus();
2052 }
2053 
2054 void CompactibleFreeListSpace::coalBirth(size_t size) {
2055   if (size  < SmallForDictionary) {
2056     smallCoalBirth(size);
2057   } else {
2058     dictionary()->dictCensusUpdate(size, 
2059                                    false /* split */, 
2060                                    true /* birth */);
2061   }
2062 }
2063 
2064 void CompactibleFreeListSpace::coalDeath(size_t size) {
2065   if(size  < SmallForDictionary) {
2066     smallCoalDeath(size);
2067   } else {
2068     dictionary()->dictCensusUpdate(size, 
2069                                    false /* split */, 
2070                                    false /* birth */);
2071   }
2072 }
2073 
2074 void CompactibleFreeListSpace::smallSplitBirth(size_t size) {
2075   assert(size < SmallForDictionary, "Size too large for indexed list");
2076   FreeList *fl = &_indexedFreeList[size];
2077   fl->increment_splitBirths();
2078   fl->increment_surplus();
2079 }
2080 
2081 void CompactibleFreeListSpace::smallSplitDeath(size_t size) {
2082   assert(size < SmallForDictionary, "Size too large for indexed list");
2083   FreeList *fl = &_indexedFreeList[size];
2084   fl->increment_splitDeaths();
2085   fl->decrement_surplus();
2086 }
2087 
2088 void CompactibleFreeListSpace::splitBirth(size_t size) {
2089   if (size  < SmallForDictionary) {
2090     smallSplitBirth(size);
2091   } else {
2092     dictionary()->dictCensusUpdate(size, 
2093                                    true /* split */, 
2094                                    true /* birth */);
2095   }
2096 }
2097 
2098 void CompactibleFreeListSpace::splitDeath(size_t size) {
2099   if (size  < SmallForDictionary) {
2100     smallSplitDeath(size);
2101   } else {
2102     dictionary()->dictCensusUpdate(size, 
2103                                    true /* split */, 
2104                                    false /* birth */);
2105   }
2106 }
2107 
2108 void CompactibleFreeListSpace::split(size_t from, size_t to1) {
2109   size_t to2 = from - to1;
2110   splitDeath(from);
2111   splitBirth(to1);
2112   splitBirth(to2);
2113 }
2114 
2115 
2116 void CompactibleFreeListSpace::print() const {
2117   tty->print(" CompactibleFreeListSpace");
2118   Space::print();
2119 }
2120 
2121 void CompactibleFreeListSpace::prepare_for_verify() {
2122   assert_locked();
2123   repairLinearAllocationBlocks();
2124   // Verify that the SpoolBlocks look like free blocks of
2125   // appropriate sizes... To be done ...
2126 }
2127 
2128 class VerifyAllBlksClosure: public BlkClosure {
2129   const CompactibleFreeListSpace* _sp;
2130   const MemRegion                 _span;
2131 
2132  public:
2133   VerifyAllBlksClosure(const CompactibleFreeListSpace* sp,
2134     MemRegion span) :  _sp(sp), _span(span) { }
2135 
2136   size_t do_blk(HeapWord* addr) {
2137     size_t res;
2138     if (_sp->block_is_obj(addr)) {
2139       oop p = oop(addr);
2140       guarantee(p->is_oop(), "Should be an oop");
2141       res = _sp->adjustObjectSize(p->size());
2142       if (_sp->obj_is_alive(addr)) {
2143         p->verify();
2144       }
2145     } else {
2146       FreeChunk* fc = (FreeChunk*)addr;
2147       res = fc->size();
2148       if (FLSVerifyLists && !fc->cantCoalesce()) {
2149         guarantee(_sp->verifyChunkInFreeLists(fc),
2150                   "Chunk should be on a free list");
2151       }
2152     }
2153     guarantee(res != 0, "Livelock: no rank reduction!");
2154     return res;
2155   }
2156 };
2157 
2158 class VerifyAllOopsClosure: public OopClosure {
2159   const CMSCollector*             _collector;
2160   const CompactibleFreeListSpace* _sp;
2161   const MemRegion                 _span;
2162   const bool                      _past_remark;
2163   const CMSBitMap*                _bit_map;
2164 
2165  public:
2166   VerifyAllOopsClosure(const CMSCollector* collector,
2167     const CompactibleFreeListSpace* sp, MemRegion span,
2168     bool past_remark, CMSBitMap* bit_map) :
2169     OopClosure(), _collector(collector), _sp(sp), _span(span),
2170     _past_remark(past_remark), _bit_map(bit_map) { }
2171 
2172   void do_oop(oop* ptr) {
2173     oop p = *ptr;
2174     if (p != NULL) {
2175       if (_span.contains(p)) { // the interior oop points into CMS heap
2176         if (!_span.contains(ptr)) { // reference from outside CMS heap
2177           // Should be a valid object; the first disjunct below allows
2178           // us to sidestep an assertion in block_is_obj() that insists
2179           // that p be in _sp. Note that several generations (and spaces)
2180           // are spanned by _span (CMS heap) above.
2181           guarantee(!_sp->is_in_reserved(p) || _sp->block_is_obj((HeapWord*)p),
2182                     "Should be an object");
2183           guarantee(p->is_oop(), "Should be an oop");
2184           p->verify();
2185           if (_past_remark) {
2186             // Remark has been completed, the object should be marked
2187             _bit_map->isMarked((HeapWord*)p);
2188           }
2189         }
2190         else { // reference within CMS heap
2191           if (_past_remark) {
2192             // Remark has been completed -- so the referent should have
2193             // been marked, if referring object is.
2194             if (_bit_map->isMarked(_collector->block_start(ptr))) {
2195               guarantee(_bit_map->isMarked((HeapWord*)p), "Marking error?");
2196             }
2197           }
2198         }
2199       } else if (_sp->is_in_reserved(ptr)) {
2200         // the reference is from FLS, and points out of FLS
2201         guarantee(p->is_oop(), "Should be an oop");
2202         p->verify();
2203       }
2204     }
2205   }
2206 };
2207 
2208 void CompactibleFreeListSpace::verify(bool ignored) const {
2209   assert_lock_strong(&_freelistLock);
2210   verify_objects_initialized();
2211   MemRegion span = _collector->_span;
2212   bool past_remark = (_collector->abstract_state() ==
2213                       CMSCollector::Sweeping);
2214 
2215   ResourceMark rm;
2216   HandleMark  hm;
2217 
2218   // Check integrity of CFL data structures
2219   _promoInfo.verify();
2220   _dictionary->verify();
2221   if (FLSVerifyIndexTable) {
2222     verifyIndexedFreeLists();
2223   }
2224   // Check integrity of all objects and free blocks in space
2225   {
2226     VerifyAllBlksClosure cl(this, span);
2227     ((CompactibleFreeListSpace*)this)->blk_iterate(&cl);  // cast off const
2228   }
2229   // Check that all references in the heap to FLS
2230   // are to valid objects in FLS or that references in
2231   // FLS are to valid objects elsewhere in the heap
2232   if (FLSVerifyAllHeapReferences)
2233   {
2234     VerifyAllOopsClosure cl(_collector, this, span, past_remark,
2235       _collector->markBitMap());
2236     CollectedHeap* ch = Universe::heap();
2237     ch->oop_iterate(&cl);              // all oops in generations
2238     ch->permanent_oop_iterate(&cl);    // all oops in perm gen
2239   }
2240 
2241   if (VerifyObjectStartArray) {
2242     // Verify the block offset table
2243     _bt.verify();
2244   }
2245 }
2246 
2247 #ifndef PRODUCT
2248 void CompactibleFreeListSpace::verifyFreeLists() const {
2249   if (FLSVerifyLists) {
2250     _dictionary->verify();
2251     verifyIndexedFreeLists();
2252   } else {
2253     if (FLSVerifyDictionary) {
2254       _dictionary->verify();
2255     }
2256     if (FLSVerifyIndexTable) {
2257       verifyIndexedFreeLists();
2258     }
2259   }
2260 }
2261 #endif
2262 
2263 void CompactibleFreeListSpace::verifyIndexedFreeLists() const {
2264   size_t i = 0;
2265   for (; i < MinChunkSize; i++) {
2266     guarantee(_indexedFreeList[i].head() == NULL, "should be NULL");
2267   }
2268   for (; i < IndexSetSize; i++) {
2269     verifyIndexedFreeList(i);
2270   }
2271 }
2272 
2273 void CompactibleFreeListSpace::verifyIndexedFreeList(size_t size) const {
2274   guarantee(size % 2 == 0, "Odd slots should be empty");
2275   for (FreeChunk* fc = _indexedFreeList[size].head(); fc != NULL;
2276     fc = fc->next()) {
2277     guarantee(fc->size() == size, "Size inconsistency");
2278     guarantee(fc->isFree(), "!free?");
2279     guarantee(fc->next() == NULL || fc->next()->prev() == fc, "Broken list");
2280   }
2281 }
2282 
2283 #ifndef PRODUCT
2284 void CompactibleFreeListSpace::checkFreeListConsistency() const {
2285   assert(_dictionary->minSize() <= IndexSetSize,
2286     "Some sizes can't be allocated without recourse to"
2287     " linear allocation buffers");
2288   assert(MIN_TREE_CHUNK_SIZE*HeapWordSize == sizeof(TreeChunk),
2289     "else MIN_TREE_CHUNK_SIZE is wrong");
2290   assert((IndexSetStride == 2 && IndexSetStart == 2) ||
2291          (IndexSetStride == 1 && IndexSetStart == 1), "just checking");
2292   assert((IndexSetStride != 2) || (MinChunkSize % 2 == 0),
2293       "Some for-loops may be incorrectly initialized");
2294   assert((IndexSetStride != 2) || (IndexSetSize % 2 == 1),
2295       "For-loops that iterate over IndexSet with stride 2 may be wrong");
2296 }
2297 #endif
2298 
2299 void CompactibleFreeListSpace::printFLCensus(int sweepCt) const {
2300   assert_lock_strong(&_freelistLock);
2301   ssize_t bfrSurp     = 0;
2302   ssize_t surplus     = 0;
2303   ssize_t desired     = 0;
2304   ssize_t prevSweep   = 0;
2305   ssize_t beforeSweep = 0;
2306   ssize_t count       = 0;
2307   ssize_t coalBirths  = 0;
2308   ssize_t coalDeaths  = 0;
2309   ssize_t splitBirths = 0;
2310   ssize_t splitDeaths = 0;
2311   gclog_or_tty->print("end sweep# %d\n", sweepCt);
2312   gclog_or_tty->print("%4s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
2313              "%7s\t"    "%7s\t"      "%7s\t"      "%7s\t"      "%7s\t"
2314              "%7s\t"    "\n",
2315              "size",    "bfrsurp",   "surplus",   "desired",   "prvSwep",     
2316              "bfrSwep", "count",     "cBirths",   "cDeaths",   "sBirths",
2317              "sDeaths");
2318 
2319   size_t totalFree = 0;
2320   for (size_t i = IndexSetStart; i < IndexSetSize; i += IndexSetStride) {
2321     const FreeList *fl = &_indexedFreeList[i];                                                       
2322         totalFree += fl->count() * fl->size();
2323 
2324     gclog_or_tty->print("%4d\t"          "%7d\t"             "%7d\t"        "%7d\t"
2325                "%7d\t"          "%7d\t"             "%7d\t"        "%7d\t"
2326                "%7d\t"          "%7d\t"             "%7d\t"        "\n",
2327                fl->size(),       fl->bfrSurp(),     fl->surplus(), fl->desired(), 
2328                fl->prevSweep(),  fl->beforeSweep(), fl->count(),   fl->coalBirths(), 
2329                fl->coalDeaths(), fl->splitBirths(), fl->splitDeaths());
2330     bfrSurp     += fl->bfrSurp();
2331     surplus     += fl->surplus();
2332     desired     += fl->desired();
2333     prevSweep   += fl->prevSweep();
2334     beforeSweep += fl->beforeSweep();
2335     count       += fl->count();
2336     coalBirths  += fl->coalBirths();
2337     coalDeaths  += fl->coalDeaths();
2338     splitBirths += fl->splitBirths();
2339     splitDeaths += fl->splitDeaths();
2340   }                                                                                             
2341   gclog_or_tty->print("%4s\t"
2342             "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t"
2343             "%7d\t"      "%7d\t"     "%7d\t"        "%7d\t"       "%7d\t" "\n",
2344             "totl",
2345             bfrSurp,     surplus,     desired,     prevSweep,     beforeSweep,
2346             count,       coalBirths,  coalDeaths,  splitBirths,   splitDeaths);
2347   gclog_or_tty->print_cr("Total free in indexed lists %d words", totalFree);
2348   gclog_or_tty->print("growth: %8.5f  deficit: %8.5f\n",
2349     (double)(splitBirths+coalBirths-splitDeaths-coalDeaths)/
2350             (prevSweep != 0 ? (double)prevSweep : 1.0),
2351     (double)(desired - count)/(desired != 0 ? (double)desired : 1.0));
2352   _dictionary->printDictCensus();
2353 }
2354 
2355 // Return the next displaced header, incrementing the pointer and
2356 // recycling spool area as necessary.
2357 markOop PromotionInfo::nextDisplacedHeader() {
2358   assert(_spoolHead != NULL, "promotionInfo inconsistency");
2359   assert(_spoolHead != _spoolTail || _firstIndex < _nextIndex,
2360          "Empty spool space: no displaced header can be fetched");
2361   assert(_spoolHead->bufferSize > _firstIndex, "Off by one error at head?");
2362   markOop hdr = _spoolHead->displacedHdr[_firstIndex];
2363   // Spool forward
2364   if (++_firstIndex == _spoolHead->bufferSize) { // last location in this block
2365     // forward to next block, recycling this block into spare spool buffer
2366     SpoolBlock* tmp = _spoolHead->nextSpoolBlock;
2367     assert(_spoolHead != _spoolTail, "Spooling storage mix-up");
2368     _spoolHead->nextSpoolBlock = _spareSpool;
2369     _spareSpool = _spoolHead;
2370     _spoolHead = tmp;
2371     _firstIndex = 1;
2372     NOT_PRODUCT(
2373       if (_spoolHead == NULL) {  // all buffers fully consumed
2374         assert(_spoolTail == NULL && _nextIndex == 1,
2375                "spool buffers processing inconsistency");
2376       }
2377     )
2378   } 
2379   return hdr;
2380 }
2381 
2382 void PromotionInfo::track(PromotedObject* trackOop) {
2383   track(trackOop, oop(trackOop)->klass());
2384 }
2385 
2386 void PromotionInfo::track(PromotedObject* trackOop, klassOop klassOfOop) {
2387   // make a copy of header as it may need to be spooled
2388   markOop mark = oop(trackOop)->mark();
2389   trackOop->clearNext();
2390   if (mark->must_be_preserved_for_cms_scavenge(klassOfOop)) {
2391     // save non-prototypical header, and mark oop
2392     saveDisplacedHeader(mark);
2393     trackOop->setDisplacedMark();
2394   } else {
2395     // we'd like to assert something like the following:
2396     // assert(mark == markOopDesc::prototype(), "consistency check");
2397     // ... but the above won't work because the age bits have not (yet) been
2398     // cleared. The remainder of the check would be identical to the
2399     // condition checked in must_be_preserved() above, so we don't really
2400     // have anything useful to check here!
2401   }
2402   if (_promoTail != NULL) {
2403     assert(_promoHead != NULL, "List consistency");
2404     _promoTail->setNext(trackOop);
2405     _promoTail = trackOop;
2406   } else {
2407     assert(_promoHead == NULL, "List consistency");
2408     _promoHead = _promoTail = trackOop;
2409   }
2410   // Mask as newly promoted, so we can skip over such objects
2411   // when scanning dirty cards
2412   assert(!trackOop->hasPromotedMark(), "Should not have been marked");
2413   trackOop->setPromotedMark();
2414 }
2415 
2416 // Save the given displaced header, incrementing the pointer and
2417 // obtaining more spool area as necessary.
2418 void PromotionInfo::saveDisplacedHeader(markOop hdr) {
2419   assert(_spoolHead != NULL && _spoolTail != NULL,
2420          "promotionInfo inconsistency");
2421   assert(_spoolTail->bufferSize > _nextIndex, "Off by one error at tail?");
2422   _spoolTail->displacedHdr[_nextIndex] = hdr;
2423   // Spool forward
2424   if (++_nextIndex == _spoolTail->bufferSize) { // last location in this block
2425     // get a new spooling block
2426     assert(_spoolTail->nextSpoolBlock == NULL, "tail should terminate spool list");
2427     _splice_point = _spoolTail;                   // save for splicing
2428     _spoolTail->nextSpoolBlock = getSpoolBlock(); // might fail
2429     _spoolTail = _spoolTail->nextSpoolBlock;      // might become NULL ...
2430     // ... but will attempt filling before next promotion attempt
2431     _nextIndex = 1;
2432   }
2433 }
2434 
2435 // Ensure that spooling space exists. Return false if spooling space
2436 // could not be obtained.
2437 bool PromotionInfo::ensure_spooling_space_work() {
2438   assert(!has_spooling_space(), "Only call when there is no spooling space");
2439   // Try and obtain more spooling space
2440   SpoolBlock* newSpool = getSpoolBlock();
2441   assert(newSpool == NULL ||
2442          (newSpool->bufferSize != 0 && newSpool->nextSpoolBlock == NULL),
2443         "getSpoolBlock() sanity check");
2444   if (newSpool == NULL) {
2445     return false;
2446   }
2447   _nextIndex = 1;
2448   if (_spoolTail == NULL) {
2449     _spoolTail = newSpool;
2450     if (_spoolHead == NULL) {
2451       _spoolHead = newSpool;
2452       _firstIndex = 1;
2453     } else {
2454       assert(_splice_point != NULL && _splice_point->nextSpoolBlock == NULL,
2455              "Splice point invariant");
2456       // Extra check that _splice_point is connected to list
2457       #ifdef ASSERT
2458       {
2459         SpoolBlock* blk = _spoolHead;
2460         for (; blk->nextSpoolBlock != NULL;
2461              blk = blk->nextSpoolBlock);
2462         assert(blk != NULL && blk == _splice_point,
2463                "Splice point incorrect");
2464       }
2465       #endif // ASSERT
2466       _splice_point->nextSpoolBlock = newSpool;
2467     }
2468   } else {
2469     assert(_spoolHead != NULL, "spool list consistency");
2470     _spoolTail->nextSpoolBlock = newSpool;
2471     _spoolTail = newSpool;
2472   }
2473   return true;
2474 }
2475 
2476 // Get a free spool buffer from the free pool, getting a new block
2477 // from the heap if necessary.
2478 SpoolBlock* PromotionInfo::getSpoolBlock() {
2479   SpoolBlock* res;
2480   if ((res = _spareSpool) != NULL) {
2481     _spareSpool = _spareSpool->nextSpoolBlock;
2482     res->nextSpoolBlock = NULL;
2483   } else {  // spare spool exhausted, get some from heap
2484     res = (SpoolBlock*)(space()->allocateScratch(refillSize()));
2485     if (res != NULL) {
2486       res->init();
2487     }
2488   }
2489   assert(res == NULL || res->nextSpoolBlock == NULL, "postcondition");
2490   return res;
2491 }
2492 
2493 void PromotionInfo::startTrackingPromotions() {
2494   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2495          "spooling inconsistency?");
2496   _firstIndex = _nextIndex = 1;
2497   _tracking = true;
2498 }
2499 
2500 void PromotionInfo::stopTrackingPromotions() {
2501   assert(_spoolHead == _spoolTail && _firstIndex == _nextIndex,
2502          "spooling inconsistency?");
2503   _firstIndex = _nextIndex = 1;
2504   _tracking = false;
2505 }
2506 
2507 // When _spoolTail is not NULL, then the slot <_spoolTail, _nextIndex>
2508 // points to the next slot available for filling.
2509 // The set of slots holding displaced headers are then all those in the
2510 // right-open interval denoted by: 
2511 // 
2512 //    [ <_spoolHead, _firstIndex>, <_spoolTail, _nextIndex> )
2513 // 
2514 // When _spoolTail is NULL, then the set of slots with displaced headers
2515 // is all those starting at the slot <_spoolHead, _firstIndex> and
2516 // going up to the last slot of last block in the linked list.
2517 // In this lartter case, _splice_point points to the tail block of
2518 // this linked list of blocks holding displaced headers.
2519 void PromotionInfo::verify() const {
2520   // Verify the following:
2521   // 1. the number of displaced headers matches the number of promoted
2522   //    objects that have displaced headers
2523   // 2. each promoted object lies in this space
2524   debug_only(
2525     PromotedObject* junk = NULL;
2526     assert(junk->next_addr() == (void*)(oop(junk)->mark_addr()),
2527            "Offset of PromotedObject::_next is expected to align with "
2528            "  the OopDesc::_mark within OopDesc");
2529   )
2530   // FIXME: guarantee????
2531   guarantee(_spoolHead == NULL || _spoolTail != NULL ||
2532             _splice_point != NULL, "list consistency");
2533   guarantee(_promoHead == NULL || _promoTail != NULL, "list consistency");
2534   // count the number of objects with displaced headers
2535   size_t numObjsWithDisplacedHdrs = 0;
2536   for (PromotedObject* curObj = _promoHead; curObj != NULL; curObj = curObj->next()) {
2537     guarantee(space()->is_in_reserved((HeapWord*)curObj), "Containment");
2538     // the last promoted object may fail the mark() != NULL test of is_oop().
2539     guarantee(curObj->next() == NULL || oop(curObj)->is_oop(), "must be an oop");
2540     if (curObj->hasDisplacedMark()) {
2541       numObjsWithDisplacedHdrs++;
2542     }
2543   }
2544   // Count the number of displaced headers
2545   size_t numDisplacedHdrs = 0;
2546   for (SpoolBlock* curSpool = _spoolHead;
2547        curSpool != _spoolTail && curSpool != NULL;
2548        curSpool = curSpool->nextSpoolBlock) {
2549     // the first entry is just a self-pointer; indices 1 through
2550     // bufferSize - 1 are occupied (thus, bufferSize - 1 slots).
2551     guarantee((void*)curSpool->displacedHdr == (void*)&curSpool->displacedHdr,
2552               "first entry of displacedHdr should be self-referential");
2553     numDisplacedHdrs += curSpool->bufferSize - 1;
2554   }
2555   guarantee((_spoolHead == _spoolTail) == (numDisplacedHdrs == 0),
2556             "internal consistency");
2557   guarantee(_spoolTail != NULL || _nextIndex == 1,
2558             "Inconsistency between _spoolTail and _nextIndex");
2559   // We overcounted (_firstIndex-1) worth of slots in block
2560   // _spoolHead and we undercounted (_nextIndex-1) worth of
2561   // slots in block _spoolTail. We make an appropriate
2562   // adjustment by subtracting the first and adding the
2563   // second:  - (_firstIndex - 1) + (_nextIndex - 1) 
2564   numDisplacedHdrs += (_nextIndex - _firstIndex);
2565   guarantee(numDisplacedHdrs == numObjsWithDisplacedHdrs, "Displaced hdr count");
2566 }
2567 
2568 
2569 CFLS_LAB::CFLS_LAB(CompactibleFreeListSpace* cfls) :
2570   _cfls(cfls)
2571 {
2572   _blocks_to_claim = CMSParPromoteBlocksToClaim;
2573   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2574        i < CompactibleFreeListSpace::IndexSetSize;
2575        i += CompactibleFreeListSpace::IndexSetStride) {
2576     _indexedFreeList[i].set_size(i);
2577   }
2578 }
2579 
2580 HeapWord* CFLS_LAB::alloc(size_t word_sz) {
2581   FreeChunk* res;
2582   word_sz = _cfls->adjustObjectSize(word_sz);
2583   if (word_sz >=  CompactibleFreeListSpace::IndexSetSize) {
2584     // This locking manages sync with other large object allocations.
2585     MutexLockerEx x(_cfls->parDictionaryAllocLock(),
2586                     Mutex::_no_safepoint_check_flag);
2587     res = _cfls->getChunkFromDictionaryExact(word_sz);
2588     if (res == NULL) return NULL;
2589   } else {
2590     FreeList* fl = &_indexedFreeList[word_sz];
2591     bool filled = false; //TRAP
2592     if (fl->count() == 0) {
2593       bool filled = true; //TRAP
2594       // Attempt to refill this local free list.
2595       _cfls->par_get_chunk_of_blocks(word_sz, _blocks_to_claim, fl);
2596       // If it didn't work, give up.
2597       if (fl->count() == 0) return NULL;
2598     }
2599     res = fl->getChunkAtHead();
2600     assert(res != NULL, "Why was count non-zero?");
2601   }
2602   res->markNotFree();
2603   assert(!res->isFree(), "shouldn't be marked free");
2604   assert(oop(res)->klass() == NULL, "should look uninitialized");
2605   // mangle a just allocated object with a distinct pattern.
2606   debug_only(res->mangleAllocated(word_sz));
2607   return (HeapWord*)res;
2608 }
2609 
2610 void CFLS_LAB::retire() {
2611   for (size_t i = CompactibleFreeListSpace::IndexSetStart;
2612        i < CompactibleFreeListSpace::IndexSetSize;
2613        i += CompactibleFreeListSpace::IndexSetStride) {
2614     if (_indexedFreeList[i].count() > 0) {
2615       MutexLockerEx x(_cfls->_indexedFreeListParLocks[i],
2616                       Mutex::_no_safepoint_check_flag);
2617       _cfls->_indexedFreeList[i].prepend(&_indexedFreeList[i]);
2618       // Reset this list.
2619       _indexedFreeList[i] = FreeList();
2620       _indexedFreeList[i].set_size(i);
2621     }
2622   }
2623 }
2624 
2625 void
2626 CompactibleFreeListSpace::
2627 par_get_chunk_of_blocks(size_t word_sz, size_t n, FreeList* fl) {
2628   assert(fl->count() == 0, "Precondition.");
2629   assert(word_sz < CompactibleFreeListSpace::IndexSetSize,
2630          "Precondition");
2631 
2632   // We'll try all multiples of word_sz in the indexed set (starting with
2633   // word_sz itself), then try getting a big chunk and splitting it.
2634   int k = 1;
2635   size_t cur_sz = k * word_sz;
2636   bool found = false;
2637   while (cur_sz < CompactibleFreeListSpace::IndexSetSize && k == 1) {
2638     FreeList* gfl = &_indexedFreeList[cur_sz];
2639     FreeList fl_for_cur_sz;  // Empty.
2640     fl_for_cur_sz.set_size(cur_sz);
2641     {
2642       MutexLockerEx x(_indexedFreeListParLocks[cur_sz],
2643                       Mutex::_no_safepoint_check_flag);
2644       if (gfl->count() != 0) {
2645         size_t nn = MAX2(n/k, (size_t)1);
2646         gfl->getFirstNChunksFromList(nn, &fl_for_cur_sz);
2647         found = true;
2648       }
2649     }
2650     // Now transfer fl_for_cur_sz to fl.  Common case, we hope, is k = 1.
2651     if (found) {
2652       if (k == 1) {
2653         fl->prepend(&fl_for_cur_sz);
2654       } else {
2655         // Divide each block on fl_for_cur_sz up k ways.
2656         FreeChunk* fc;
2657         while ((fc = fl_for_cur_sz.getChunkAtHead()) != NULL) {
2658           // Must do this in reverse order, so that anybody attempting to
2659           // access the main chunk sees it as a single free block until we
2660           // change it.
2661           size_t fc_size = fc->size();
2662           for (int i = k-1; i >= 0; i--) {
2663             FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2664             ffc->setSize(word_sz);
2665             ffc->linkNext(NULL);
2666             ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2667             // Above must occur before BOT is updated below.
2668             // splitting from the right, fc_size == (k - i + 1) * wordsize
2669             _bt.mark_block((HeapWord*)ffc, word_sz);
2670             fc_size -= word_sz;
2671             _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2672             _bt.verify_single_block((HeapWord*)fc, fc_size);
2673             _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2674             // Push this on "fl".
2675             fl->returnChunkAtHead(ffc);
2676           }
2677           // TRAP
2678           assert(fl->tail()->next() == NULL, "List invariant.");
2679         }
2680       }
2681       return;
2682     }
2683     k++; cur_sz = k * word_sz;
2684   }
2685   // Otherwise, we'll split a block from the dictionary.
2686   FreeChunk* fc = NULL;
2687   FreeChunk* rem_fc = NULL;
2688   size_t rem;
2689   {
2690     MutexLockerEx x(parDictionaryAllocLock(),
2691                     Mutex::_no_safepoint_check_flag);
2692     while (n > 0) {
2693       fc = dictionary()->getChunk(MAX2(n * word_sz, 
2694                                   _dictionary->minSize()),
2695                                   FreeBlockDictionary::atLeast);
2696       if (fc != NULL) {
2697         _bt.allocated((HeapWord*)fc, fc->size());  // update _unallocated_blk
2698         dictionary()->dictCensusUpdate(fc->size(),
2699                                        true /*split*/,
2700                                        false /*birth*/);
2701         break;
2702       } else {
2703         n--;
2704       }
2705     }
2706     if (fc == NULL) return;
2707     // Otherwise, split up that block.
2708     size_t nn = fc->size() / word_sz;
2709     n = MIN2(nn, n);
2710     rem = fc->size() - n * word_sz;
2711     // If there is a remainder, and it's too small, allocate one fewer.
2712     if (rem > 0 && rem < MinChunkSize) {
2713       n--; rem += word_sz;
2714     }
2715     // First return the remainder, if any.
2716     // Note that we hold the lock until we decide if we're going to give
2717     // back the remainder to the dictionary, since a contending allocator
2718     // may otherwise see the heap as empty.  (We're willing to take that
2719     // hit if the block is a small block.)
2720     if (rem > 0) {
2721       size_t prefix_size = n * word_sz;
2722       rem_fc = (FreeChunk*)((HeapWord*)fc + prefix_size);
2723       rem_fc->setSize(rem);
2724       rem_fc->linkNext(NULL);
2725       rem_fc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2726       // Above must occur before BOT is updated below.
2727       _bt.split_block((HeapWord*)fc, fc->size(), prefix_size);
2728       if (rem >= IndexSetSize) {
2729         returnChunkToDictionary(rem_fc);
2730         dictionary()->dictCensusUpdate(fc->size(),
2731                                        true /*split*/,
2732                                        true /*birth*/);
2733         rem_fc = NULL;
2734       }
2735       // Otherwise, return it to the small list below.
2736     }
2737   }
2738   // 
2739   if (rem_fc != NULL) {
2740     MutexLockerEx x(_indexedFreeListParLocks[rem],
2741                     Mutex::_no_safepoint_check_flag);
2742     _bt.verify_not_unallocated((HeapWord*)rem_fc, rem_fc->size());
2743     _indexedFreeList[rem].returnChunkAtHead(rem_fc);
2744     smallSplitBirth(rem);
2745   }
2746 
2747   // Now do the splitting up.
2748   // Must do this in reverse order, so that anybody attempting to
2749   // access the main chunk sees it as a single free block until we
2750   // change it.
2751   size_t fc_size = n * word_sz;
2752   // All but first chunk in this loop
2753   for (ssize_t i = n-1; i > 0; i--) {
2754     FreeChunk* ffc = (FreeChunk*)((HeapWord*)fc + i * word_sz);
2755     ffc->setSize(word_sz);
2756     ffc->linkNext(NULL);
2757     ffc->linkPrev(NULL); // Mark as a free block for other (parallel) GC threads.
2758     // Above must occur before BOT is updated below.
2759     // splitting from the right, fc_size == (n - i + 1) * wordsize
2760     _bt.mark_block((HeapWord*)ffc, word_sz);
2761     fc_size -= word_sz;
2762     _bt.verify_not_unallocated((HeapWord*)ffc, ffc->size());
2763     _bt.verify_single_block((HeapWord*)ffc, ffc->size());
2764     _bt.verify_single_block((HeapWord*)fc, fc_size);
2765     // Push this on "fl".
2766     fl->returnChunkAtHead(ffc);
2767   }
2768   // First chunk
2769   fc->setSize(word_sz);
2770   fc->linkNext(NULL);
2771   fc->linkPrev(NULL);
2772   _bt.verify_not_unallocated((HeapWord*)fc, fc->size());
2773   _bt.verify_single_block((HeapWord*)fc, fc->size());
2774   fl->returnChunkAtHead(fc);
2775 
2776   {
2777     MutexLockerEx x(_indexedFreeListParLocks[word_sz],
2778                     Mutex::_no_safepoint_check_flag);
2779     ssize_t new_births = _indexedFreeList[word_sz].splitBirths() + n;
2780     _indexedFreeList[word_sz].set_splitBirths(new_births);
2781     ssize_t new_surplus = _indexedFreeList[word_sz].surplus() + n;
2782     _indexedFreeList[word_sz].set_surplus(new_surplus);
2783   }
2784 
2785   // TRAP
2786   assert(fl->tail()->next() == NULL, "List invariant.");
2787 }
2788 
2789 // Set up the space's par_seq_tasks structure for work claiming
2790 // for parallel rescan. See CMSParRemarkTask where this is currently used.
2791 // XXX Need to suitably abstract and generalize this and the next
2792 // method into one.
2793 void
2794 CompactibleFreeListSpace::
2795 initialize_sequential_subtasks_for_rescan(int n_threads) {
2796   // The "size" of each task is fixed according to rescan_task_size.
2797   assert(n_threads > 0, "Unexpected n_threads argument");
2798   const size_t task_size = rescan_task_size();
2799   size_t n_tasks = (used_region().word_size() + task_size - 1)/task_size;
2800   assert((n_tasks == 0) == used_region().is_empty(), "n_tasks incorrect");
2801   assert(n_tasks == 0 ||
2802   ((used_region().start() + (n_tasks - 1)*task_size < used_region().end()) &&
2803   (used_region().start() + n_tasks*task_size >= used_region().end())),
2804   "n_tasks calculation incorrect"); 
2805 
2806   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2807   assert(!pst->valid(), "Clobbering existing data?");
2808   pst->set_par_threads(n_threads);
2809   pst->set_n_tasks((int)n_tasks);
2810 }
2811 
2812 // Set up the space's par_seq_tasks structure for work claiming
2813 // for parallel concurrent marking. See CMSConcMarkTask where this is currently used.
2814 void
2815 CompactibleFreeListSpace::
2816 initialize_sequential_subtasks_for_marking(int n_threads,
2817                                            HeapWord* low) {
2818   // The "size" of each task is fixed according to rescan_task_size.
2819   assert(n_threads > 0, "Unexpected n_threads argument");
2820   const size_t task_size = marking_task_size();
2821   assert(task_size > CardTableModRefBS::card_size_in_words &&
2822          (task_size %  CardTableModRefBS::card_size_in_words == 0),
2823          "Otherwise arithmetic below would be incorrect");
2824   MemRegion span = _gen->reserved();
2825   if (low != NULL) {
2826     if (span.contains(low)) {
2827       // Align low down to  a card boundary so that
2828       // we can use block_offset_careful() on span boundaries.
2829       HeapWord* aligned_low = (HeapWord*)align_size_down((uintptr_t)low,
2830                                  CardTableModRefBS::card_size);
2831       // Clip span prefix at aligned_low
2832       span = span.intersection(MemRegion(aligned_low, span.end()));
2833     } else if (low > span.end()) {
2834       span = MemRegion(low, low);  // Null region
2835     } // else use entire span
2836   }
2837   assert(span.is_empty() || 
2838          ((uintptr_t)span.start() %  CardTableModRefBS::card_size == 0),
2839         "span should start at a card boundary");
2840   size_t n_tasks = (span.word_size() + task_size - 1)/task_size;
2841   assert((n_tasks == 0) == span.is_empty(), "Inconsistency");
2842   assert(n_tasks == 0 ||
2843          ((span.start() + (n_tasks - 1)*task_size < span.end()) &&
2844           (span.start() + n_tasks*task_size >= span.end())),
2845          "n_tasks calculation incorrect");
2846   SequentialSubTasksDone* pst = conc_par_seq_tasks();
2847   assert(!pst->valid(), "Clobbering existing data?");
2848   pst->set_par_threads(n_threads);
2849   pst->set_n_tasks((int)n_tasks);
2850 }
2851