< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 7346,7355 **** --- 7346,7363 ---- set_inFreeRange(true); set_lastFreeRangeCoalesced(false); set_freeFinger(freeFinger); set_freeRangeInFreeLists(freeRangeInFreeLists); + if (CMSTestInFreeList) { + if (freeRangeInFreeLists) { + FreeChunk* fc = (FreeChunk*) freeFinger; + assert(fc->is_free(), "A chunk on the free list should be free."); + assert(fc->size() > 0, "Free range should have a size"); + assert(_sp->verify_chunk_in_free_list(fc), "Chunk is not in free lists"); + } + } } // Note that the sweeper runs concurrently with mutators. Thus, // it is possible for direct allocation in this generation to happen // in the middle of the sweep. Note that the sweeper also coalesces
*** 7498,7508 **** // to a free list which may be overpopulated. // void SweepClosure::do_already_free_chunk(FreeChunk* fc) { const size_t size = fc->size(); ! // a chunk that is already free, should not have been // marked in the bit map HeapWord* const addr = (HeapWord*) fc; assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); // Verify that the bit map has no bits marked between --- 7506,7521 ---- // to a free list which may be overpopulated. // void SweepClosure::do_already_free_chunk(FreeChunk* fc) { const size_t size = fc->size(); ! // Chunks that cannot be coalesced are not in the ! // free lists. ! if (CMSTestInFreeList && !fc->cantCoalesce()) { ! assert(_sp->verify_chunk_in_free_list(fc), ! "free chunk should be in free lists"); ! } // a chunk that is already free, should not have been // marked in the bit map HeapWord* const addr = (HeapWord*) fc; assert(!_bitMap->isMarked(addr), "free chunk should be unmarked"); // Verify that the bit map has no bits marked between
*** 7605,7614 **** --- 7618,7630 ---- size_t chunkSize) { // do_post_free_or_garbage_chunk() should only be called in the case // of the adaptive free list allocator. const bool fcInFreeLists = fc->is_free(); assert((HeapWord*)fc <= _limit, "sweep invariant"); + if (CMSTestInFreeList && fcInFreeLists) { + assert(_sp->verify_chunk_in_free_list(fc), "free chunk is not in free lists"); + } if (CMSTraceSweeper) { gclog_or_tty->print_cr(" -- pick up another chunk at " PTR_FORMAT " (" SIZE_FORMAT ")", p2i(fc), chunkSize); }
*** 7657,7666 **** --- 7673,7686 ---- // it must be removed from the list and stashed in the closure. if (freeRangeInFreeLists()) { FreeChunk* const ffc = (FreeChunk*)freeFinger(); assert(ffc->size() == pointer_delta(fc_addr, freeFinger()), "Size of free range is inconsistent with chunk size."); + if (CMSTestInFreeList) { + assert(_sp->verify_chunk_in_free_list(ffc), + "Chunk is not in free lists"); + } _sp->coalDeath(ffc->size()); _sp->removeFreeChunkFromFreeLists(ffc); set_freeRangeInFreeLists(false); } if (fcInFreeLists) {
*** 7725,7734 **** --- 7745,7760 ---- void SweepClosure::flush_cur_free_chunk(HeapWord* chunk, size_t size) { assert(inFreeRange(), "Should only be called if currently in a free range."); assert(size > 0, "A zero sized chunk cannot be added to the free lists."); if (!freeRangeInFreeLists()) { + if (CMSTestInFreeList) { + FreeChunk* fc = (FreeChunk*) chunk; + fc->set_size(size); + assert(!_sp->verify_chunk_in_free_list(fc), + "chunk should not be in free lists yet"); + } if (CMSTraceSweeper) { gclog_or_tty->print_cr(" -- add free block " PTR_FORMAT " (" SIZE_FORMAT ") to free lists", p2i(chunk), size); } // A new free range is going to be starting. The current
< prev index next >