< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page




7331   }
7332   if (CMSTraceSweeper) {
7333     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7334                            p2i(_limit));
7335   }
7336 }
7337 #endif  // PRODUCT
7338 
7339 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7340     bool freeRangeInFreeLists) {
7341   if (CMSTraceSweeper) {
7342     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7343                p2i(freeFinger), freeRangeInFreeLists);
7344   }
7345   assert(!inFreeRange(), "Trampling existing free range");
7346   set_inFreeRange(true);
7347   set_lastFreeRangeCoalesced(false);
7348 
7349   set_freeFinger(freeFinger);
7350   set_freeRangeInFreeLists(freeRangeInFreeLists);








7351 }
7352 
7353 // Note that the sweeper runs concurrently with mutators. Thus,
7354 // it is possible for direct allocation in this generation to happen
7355 // in the middle of the sweep. Note that the sweeper also coalesces
7356 // contiguous free blocks. Thus, unless the sweeper and the allocator
7357 // synchronize appropriately freshly allocated blocks may get swept up.
7358 // This is accomplished by the sweeper locking the free lists while
7359 // it is sweeping. Thus blocks that are determined to be free are
7360 // indeed free. There is however one additional complication:
7361 // blocks that have been allocated since the final checkpoint and
7362 // mark, will not have been marked and so would be treated as
7363 // unreachable and swept up. To prevent this, the allocator marks
7364 // the bit map when allocating during the sweep phase. This leads,
7365 // however, to a further complication -- objects may have been allocated
7366 // but not yet initialized -- in the sense that the header isn't yet
7367 // installed. The sweeper can not then determine the size of the block
7368 // in order to skip over it. To deal with this case, we use a technique
7369 // (due to Printezis) to encode such uninitialized block sizes in the
7370 // bit map. Since the bit map uses a bit per every HeapWord, but the


7483 // is being considered for coalescing will be referred to as the
7484 // right-hand chunk.
7485 //
7486 // When making a decision on whether to coalesce a right-hand chunk with
7487 // the current left-hand chunk, the current count vs. the desired count
7488 // of the left-hand chunk is considered.  Also if the right-hand chunk
7489 // is near the large chunk at the end of the heap (see
7490 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7491 // left-hand chunk is coalesced.
7492 //
7493 // When making a decision about whether to split a chunk, the desired count
7494 // vs. the current count of the candidate to be split is also considered.
7495 // If the candidate is underpopulated (currently fewer chunks than desired)
7496 // a chunk of an overpopulated (currently more chunks than desired) size may
7497 // be chosen.  The "hint" associated with a free list, if non-null, points
7498 // to a free list which may be overpopulated.
7499 //
7500 
7501 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7502   const size_t size = fc->size();
7503 





7504   // a chunk that is already free, should not have been
7505   // marked in the bit map
7506   HeapWord* const addr = (HeapWord*) fc;
7507   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7508   // Verify that the bit map has no bits marked between
7509   // addr and purported end of this block.
7510   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7511 
7512   // Some chunks cannot be coalesced under any circumstances.
7513   // See the definition of cantCoalesce().
7514   if (!fc->cantCoalesce()) {
7515     // This chunk can potentially be coalesced.
7516     // All the work is done in
7517     do_post_free_or_garbage_chunk(fc, size);
7518     // Note that if the chunk is not coalescable (the else arm
7519     // below), we unconditionally flush, without needing to do
7520     // a "lookahead," as we do below.
7521     if (inFreeRange()) lookahead_and_flush(fc, size);
7522   } else {
7523     // Code path common to both original and adaptive free lists.


7590     assert(oop(addr)->klass_or_null() != NULL,
7591            "Should be an initialized object");
7592     // Ignore mark word because we are running concurrent with mutators
7593     assert(oop(addr)->is_oop(true), "live block should be an oop");
7594     // Verify that the bit map has no bits marked between
7595     // addr and purported end of this block.
7596     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7597     assert(size >= 3, "Necessary for Printezis marks to work");
7598     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7599     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7600   }
7601   return size;
7602 }
7603 
7604 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7605                                                  size_t chunkSize) {
7606   // do_post_free_or_garbage_chunk() should only be called in the case
7607   // of the adaptive free list allocator.
7608   const bool fcInFreeLists = fc->is_free();
7609   assert((HeapWord*)fc <= _limit, "sweep invariant");



7610 
7611   if (CMSTraceSweeper) {
7612     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7613   }
7614 
7615   HeapWord* const fc_addr = (HeapWord*) fc;
7616 
7617   bool coalesce = false;
7618   const size_t left  = pointer_delta(fc_addr, freeFinger());
7619   const size_t right = chunkSize;
7620   switch (FLSCoalescePolicy) {
7621     // numeric value forms a coalition aggressiveness metric
7622     case 0:  { // never coalesce
7623       coalesce = false;
7624       break;
7625     }
7626     case 1: { // coalesce if left & right chunks on overpopulated lists
7627       coalesce = _sp->coalOverPopulated(left) &&
7628                  _sp->coalOverPopulated(right);
7629       break;


7642       break;
7643     }
7644     default:
7645      ShouldNotReachHere();
7646   }
7647 
7648   // Should the current free range be coalesced?
7649   // If the chunk is in a free range and either we decided to coalesce above
7650   // or the chunk is near the large block at the end of the heap
7651   // (isNearLargestChunk() returns true), then coalesce this chunk.
7652   const bool doCoalesce = inFreeRange()
7653                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7654   if (doCoalesce) {
7655     // Coalesce the current free range on the left with the new
7656     // chunk on the right.  If either is on a free list,
7657     // it must be removed from the list and stashed in the closure.
7658     if (freeRangeInFreeLists()) {
7659       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7660       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7661         "Size of free range is inconsistent with chunk size.");




7662       _sp->coalDeath(ffc->size());
7663       _sp->removeFreeChunkFromFreeLists(ffc);
7664       set_freeRangeInFreeLists(false);
7665     }
7666     if (fcInFreeLists) {
7667       _sp->coalDeath(chunkSize);
7668       assert(fc->size() == chunkSize,
7669         "The chunk has the wrong size or is not in the free lists");
7670       _sp->removeFreeChunkFromFreeLists(fc);
7671     }
7672     set_lastFreeRangeCoalesced(true);
7673     print_free_block_coalesced(fc);
7674   } else {  // not in a free range and/or should not coalesce
7675     // Return the current free range and start a new one.
7676     if (inFreeRange()) {
7677       // In a free range but cannot coalesce with the right hand chunk.
7678       // Put the current free range into the free lists.
7679       flush_cur_free_chunk(freeFinger(),
7680                            pointer_delta(fc_addr, freeFinger()));
7681     }


7710     if (CMSTraceSweeper) {
7711       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7712                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7713                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7714                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7715     }
7716     // Return the storage we are tracking back into the free lists.
7717     if (CMSTraceSweeper) {
7718       gclog_or_tty->print_cr("Flushing ... ");
7719     }
7720     assert(freeFinger() < eob, "Error");
7721     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7722   }
7723 }
7724 
7725 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7726   assert(inFreeRange(), "Should only be called if currently in a free range.");
7727   assert(size > 0,
7728     "A zero sized chunk cannot be added to the free lists.");
7729   if (!freeRangeInFreeLists()) {






7730     if (CMSTraceSweeper) {
7731       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7732                     p2i(chunk), size);
7733     }
7734     // A new free range is going to be starting.  The current
7735     // free range has not been added to the free lists yet or
7736     // was removed so add it back.
7737     // If the current free range was coalesced, then the death
7738     // of the free range was recorded.  Record a birth now.
7739     if (lastFreeRangeCoalesced()) {
7740       _sp->coalBirth(size);
7741     }
7742     _sp->addChunkAndRepairOffsetTable(chunk, size,
7743             lastFreeRangeCoalesced());
7744   } else if (CMSTraceSweeper) {
7745     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7746   }
7747   set_inFreeRange(false);
7748   set_freeRangeInFreeLists(false);
7749 }




7331   }
7332   if (CMSTraceSweeper) {
7333     gclog_or_tty->print_cr("end of sweep with _limit = " PTR_FORMAT "\n================",
7334                            p2i(_limit));
7335   }
7336 }
7337 #endif  // PRODUCT
7338 
7339 void SweepClosure::initialize_free_range(HeapWord* freeFinger,
7340     bool freeRangeInFreeLists) {
7341   if (CMSTraceSweeper) {
7342     gclog_or_tty->print("---- Start free range at " PTR_FORMAT " with free block (%d)\n",
7343                p2i(freeFinger), freeRangeInFreeLists);
7344   }
7345   assert(!inFreeRange(), "Trampling existing free range");
7346   set_inFreeRange(true);
7347   set_lastFreeRangeCoalesced(false);
7348 
7349   set_freeFinger(freeFinger);
7350   set_freeRangeInFreeLists(freeRangeInFreeLists);
7351   if (CMSTestInFreeList) {
7352     if (freeRangeInFreeLists) {
7353       FreeChunk* fc = (FreeChunk*) freeFinger;
7354       assert(fc->is_free(), "A chunk on the free list should be free.");
7355       assert(fc->size() > 0, "Free range should have a size");
7356       assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists");
7357     }
7358   }
7359 }
7360 
7361 // Note that the sweeper runs concurrently with mutators. Thus,
7362 // it is possible for direct allocation in this generation to happen
7363 // in the middle of the sweep. Note that the sweeper also coalesces
7364 // contiguous free blocks. Thus, unless the sweeper and the allocator
7365 // synchronize appropriately freshly allocated blocks may get swept up.
7366 // This is accomplished by the sweeper locking the free lists while
7367 // it is sweeping. Thus blocks that are determined to be free are
7368 // indeed free. There is however one additional complication:
7369 // blocks that have been allocated since the final checkpoint and
7370 // mark, will not have been marked and so would be treated as
7371 // unreachable and swept up. To prevent this, the allocator marks
7372 // the bit map when allocating during the sweep phase. This leads,
7373 // however, to a further complication -- objects may have been allocated
7374 // but not yet initialized -- in the sense that the header isn't yet
7375 // installed. The sweeper can not then determine the size of the block
7376 // in order to skip over it. To deal with this case, we use a technique
7377 // (due to Printezis) to encode such uninitialized block sizes in the
7378 // bit map. Since the bit map uses a bit per every HeapWord, but the


7491 // is being considered for coalescing will be referred to as the
7492 // right-hand chunk.
7493 //
7494 // When making a decision on whether to coalesce a right-hand chunk with
7495 // the current left-hand chunk, the current count vs. the desired count
7496 // of the left-hand chunk is considered.  Also if the right-hand chunk
7497 // is near the large chunk at the end of the heap (see
7498 // ConcurrentMarkSweepGeneration::isNearLargestChunk()), then the
7499 // left-hand chunk is coalesced.
7500 //
7501 // When making a decision about whether to split a chunk, the desired count
7502 // vs. the current count of the candidate to be split is also considered.
7503 // If the candidate is underpopulated (currently fewer chunks than desired)
7504 // a chunk of an overpopulated (currently more chunks than desired) size may
7505 // be chosen.  The "hint" associated with a free list, if non-null, points
7506 // to a free list which may be overpopulated.
7507 //
7508 
7509 void SweepClosure::do_already_free_chunk(FreeChunk* fc) {
7510   const size_t size = fc->size();
7511   // Chunks that cannot be coalesced are not in the
7512   // free lists.
7513   if (CMSTestInFreeList && !fc->cantCoalesce()) {
7514     assert(_sp->verify_chunk_in_free_list(fc),
7515            "free chunk should be in free lists");
7516   }
7517   // a chunk that is already free, should not have been
7518   // marked in the bit map
7519   HeapWord* const addr = (HeapWord*) fc;
7520   assert(!_bitMap->isMarked(addr), "free chunk should be unmarked");
7521   // Verify that the bit map has no bits marked between
7522   // addr and purported end of this block.
7523   _bitMap->verifyNoOneBitsInRange(addr + 1, addr + size);
7524 
7525   // Some chunks cannot be coalesced under any circumstances.
7526   // See the definition of cantCoalesce().
7527   if (!fc->cantCoalesce()) {
7528     // This chunk can potentially be coalesced.
7529     // All the work is done in
7530     do_post_free_or_garbage_chunk(fc, size);
7531     // Note that if the chunk is not coalescable (the else arm
7532     // below), we unconditionally flush, without needing to do
7533     // a "lookahead," as we do below.
7534     if (inFreeRange()) lookahead_and_flush(fc, size);
7535   } else {
7536     // Code path common to both original and adaptive free lists.


7603     assert(oop(addr)->klass_or_null() != NULL,
7604            "Should be an initialized object");
7605     // Ignore mark word because we are running concurrent with mutators
7606     assert(oop(addr)->is_oop(true), "live block should be an oop");
7607     // Verify that the bit map has no bits marked between
7608     // addr and purported end of this block.
7609     size = CompactibleFreeListSpace::adjustObjectSize(oop(addr)->size());
7610     assert(size >= 3, "Necessary for Printezis marks to work");
7611     assert(!_bitMap->isMarked(addr+1), "Tautology for this control point");
7612     DEBUG_ONLY(_bitMap->verifyNoOneBitsInRange(addr+2, addr+size);)
7613   }
7614   return size;
7615 }
7616 
7617 void SweepClosure::do_post_free_or_garbage_chunk(FreeChunk* fc,
7618                                                  size_t chunkSize) {
7619   // do_post_free_or_garbage_chunk() should only be called in the case
7620   // of the adaptive free list allocator.
7621   const bool fcInFreeLists = fc->is_free();
7622   assert((HeapWord*)fc <= _limit, "sweep invariant");
7623   if (CMSTestInFreeList && fcInFreeLists) {
7624     assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists");
7625   }
7626 
7627   if (CMSTraceSweeper) {
7628     gclog_or_tty->print_cr("  -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize);
7629   }
7630 
7631   HeapWord* const fc_addr = (HeapWord*) fc;
7632 
7633   bool coalesce = false;
7634   const size_t left  = pointer_delta(fc_addr, freeFinger());
7635   const size_t right = chunkSize;
7636   switch (FLSCoalescePolicy) {
7637     // numeric value forms a coalition aggressiveness metric
7638     case 0:  { // never coalesce
7639       coalesce = false;
7640       break;
7641     }
7642     case 1: { // coalesce if left & right chunks on overpopulated lists
7643       coalesce = _sp->coalOverPopulated(left) &&
7644                  _sp->coalOverPopulated(right);
7645       break;


7658       break;
7659     }
7660     default:
7661      ShouldNotReachHere();
7662   }
7663 
7664   // Should the current free range be coalesced?
7665   // If the chunk is in a free range and either we decided to coalesce above
7666   // or the chunk is near the large block at the end of the heap
7667   // (isNearLargestChunk() returns true), then coalesce this chunk.
7668   const bool doCoalesce = inFreeRange()
7669                           && (coalesce || _g->isNearLargestChunk(fc_addr));
7670   if (doCoalesce) {
7671     // Coalesce the current free range on the left with the new
7672     // chunk on the right.  If either is on a free list,
7673     // it must be removed from the list and stashed in the closure.
7674     if (freeRangeInFreeLists()) {
7675       FreeChunk* const ffc = (FreeChunk*)freeFinger();
7676       assert(ffc->size() == pointer_delta(fc_addr, freeFinger()),
7677              "Size of free range is inconsistent with chunk size.");
7678       if (CMSTestInFreeList) {
7679         assert(_sp->verify_chunk_in_free_list(ffc),
7680                "Chunk is not in free lists");
7681       }
7682       _sp->coalDeath(ffc->size());
7683       _sp->removeFreeChunkFromFreeLists(ffc);
7684       set_freeRangeInFreeLists(false);
7685     }
7686     if (fcInFreeLists) {
7687       _sp->coalDeath(chunkSize);
7688       assert(fc->size() == chunkSize,
7689         "The chunk has the wrong size or is not in the free lists");
7690       _sp->removeFreeChunkFromFreeLists(fc);
7691     }
7692     set_lastFreeRangeCoalesced(true);
7693     print_free_block_coalesced(fc);
7694   } else {  // not in a free range and/or should not coalesce
7695     // Return the current free range and start a new one.
7696     if (inFreeRange()) {
7697       // In a free range but cannot coalesce with the right hand chunk.
7698       // Put the current free range into the free lists.
7699       flush_cur_free_chunk(freeFinger(),
7700                            pointer_delta(fc_addr, freeFinger()));
7701     }


7730     if (CMSTraceSweeper) {
7731       gclog_or_tty->print_cr("_limit " PTR_FORMAT " reached or crossed by block "
7732                              "[" PTR_FORMAT "," PTR_FORMAT ") in space "
7733                              "[" PTR_FORMAT "," PTR_FORMAT ")",
7734                              p2i(_limit), p2i(fc), p2i(eob), p2i(_sp->bottom()), p2i(_sp->end()));
7735     }
7736     // Return the storage we are tracking back into the free lists.
7737     if (CMSTraceSweeper) {
7738       gclog_or_tty->print_cr("Flushing ... ");
7739     }
7740     assert(freeFinger() < eob, "Error");
7741     flush_cur_free_chunk( freeFinger(), pointer_delta(eob, freeFinger()));
7742   }
7743 }
7744 
7745 void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) {
7746   assert(inFreeRange(), "Should only be called if currently in a free range.");
7747   assert(size > 0,
7748     "A zero sized chunk cannot be added to the free lists.");
7749   if (!freeRangeInFreeLists()) {
7750     if (CMSTestInFreeList) {
7751       FreeChunk* fc = (FreeChunk*) chunk;
7752       fc->set_size(size);
7753       assert(!_sp->verify_chunk_in_free_list(fc),
7754              "chunk should not be in free lists yet");
7755     }
7756     if (CMSTraceSweeper) {
7757       gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists",
7758                     p2i(chunk), size);
7759     }
7760     // A new free range is going to be starting.  The current
7761     // free range has not been added to the free lists yet or
7762     // was removed so add it back.
7763     // If the current free range was coalesced, then the death
7764     // of the free range was recorded.  Record a birth now.
7765     if (lastFreeRangeCoalesced()) {
7766       _sp->coalBirth(size);
7767     }
7768     _sp->addChunkAndRepairOffsetTable(chunk, size,
7769             lastFreeRangeCoalesced());
7770   } else if (CMSTraceSweeper) {
7771     gclog_or_tty->print_cr("Already in free list: nothing to flush");
7772   }
7773   set_inFreeRange(false);
7774   set_freeRangeInFreeLists(false);
7775 }


< prev index next >