< prev index next >

src/share/vm/gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.cpp

Print this page

        

*** 639,648 **** --- 639,649 ---- } class FreeListSpace_DCTOC : public Filtering_DCTOC { CompactibleFreeListSpace* _cfls; CMSCollector* _collector; + bool _parallel; protected: // Override. #define walk_mem_region_with_cl_DECL(ClosureType) \ virtual void walk_mem_region_with_cl(MemRegion mr, \ HeapWord* bottom, HeapWord* top, \
*** 659,685 **** public: FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, CMSCollector* collector, ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, ! HeapWord* boundary) : Filtering_DCTOC(sp, cl, precision, boundary), ! _cfls(sp), _collector(collector) {} }; // We de-virtualize the block-related calls below, since we know that our // space is a CompactibleFreeListSpace. #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ HeapWord* bottom, \ HeapWord* top, \ ClosureType* cl) { \ ! bool is_par = GenCollectedHeap::heap()->n_par_threads() > 0; \ ! if (is_par) { \ ! assert(GenCollectedHeap::heap()->n_par_threads() == \ ! GenCollectedHeap::heap()->workers()->active_workers(), "Mismatch"); \ walk_mem_region_with_cl_par(mr, bottom, top, cl); \ } else { \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ } \ } \ --- 660,684 ---- public: FreeListSpace_DCTOC(CompactibleFreeListSpace* sp, CMSCollector* collector, ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, ! HeapWord* boundary, ! bool parallel) : Filtering_DCTOC(sp, cl, precision, boundary), ! _cfls(sp), _collector(collector), _parallel(parallel) {} }; // We de-virtualize the block-related calls below, since we know that our // space is a CompactibleFreeListSpace. #define FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(ClosureType) \ void FreeListSpace_DCTOC::walk_mem_region_with_cl(MemRegion mr, \ HeapWord* bottom, \ HeapWord* top, \ ClosureType* cl) { \ ! if (_parallel) { \ walk_mem_region_with_cl_par(mr, bottom, top, cl); \ } else { \ walk_mem_region_with_cl_nopar(mr, bottom, top, cl); \ } \ } \
*** 745,756 **** FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) DirtyCardToOopClosure* CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, ! HeapWord* boundary) { ! return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary); } // Note on locking for the space iteration functions: // since the collector's iteration activities are concurrent with --- 744,756 ---- FreeListSpace_DCTOC__walk_mem_region_with_cl_DEFN(FilteringClosure) DirtyCardToOopClosure* CompactibleFreeListSpace::new_dcto_cl(ExtendedOopClosure* cl, CardTableModRefBS::PrecisionStyle precision, ! HeapWord* boundary, ! bool parallel) { ! return new FreeListSpace_DCTOC(this, _collector, cl, precision, boundary, parallel); } // Note on locking for the space iteration functions: // since the collector's iteration activities are concurrent with
*** 1895,1909 **** // adjust block offset table OrderAccess::storestore(); assert(chunk->is_free() && ffc->is_free(), "Error"); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); if (rem_sz < SmallForDictionary) { ! bool is_par = (GenCollectedHeap::heap()->n_par_threads() > 0); if (is_par) _indexedFreeListParLocks[rem_sz]->lock(); - assert(!is_par || - (GenCollectedHeap::heap()->n_par_threads() == - GenCollectedHeap::heap()->workers()->active_workers()), "Mismatch"); returnChunkToFreeList(ffc); split(size, rem_sz); if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); } else { returnChunkToDictionary(ffc); --- 1895,1907 ---- // adjust block offset table OrderAccess::storestore(); assert(chunk->is_free() && ffc->is_free(), "Error"); _bt.split_block((HeapWord*)chunk, chunk->size(), new_size); if (rem_sz < SmallForDictionary) { ! // The freeList lock is held, but multiple GC task threads might be executing in parallel. ! bool is_par = Thread::current()->is_GC_task_thread(); if (is_par) _indexedFreeListParLocks[rem_sz]->lock(); returnChunkToFreeList(ffc); split(size, rem_sz); if (is_par) _indexedFreeListParLocks[rem_sz]->unlock(); } else { returnChunkToDictionary(ffc);
*** 1970,1990 **** _promoInfo.startTrackingPromotions(); } bool CompactibleFreeListSpace::no_allocs_since_save_marks() { assert(_promoInfo.tracking(), "No preceding save_marks?"); - assert(GenCollectedHeap::heap()->n_par_threads() == 0, - "Shouldn't be called if using parallel gc."); return _promoInfo.noPromotions(); } #define CFLS_OOP_SINCE_SAVE_MARKS_DEFN(OopClosureType, nv_suffix) \ \ void CompactibleFreeListSpace:: \ oop_since_save_marks_iterate##nv_suffix(OopClosureType* blk) { \ - assert(GenCollectedHeap::heap()->n_par_threads() == 0, \ - "Shouldn't be called (yet) during parallel part of gc."); \ _promoInfo.promoted_oops_iterate##nv_suffix(blk); \ /* \ * This also restores any displaced headers and removes the elements from \ * the iteration set as they are processed, so that we have a clean slate \ * at the end of the iteration. Note, thus, that if new objects are \ --- 1968,1984 ----
< prev index next >