< prev index next >

src/share/vm/gc/cms/concurrentMarkSweepGeneration.cpp

Print this page

        

*** 181,191 **** // This struct contains per-thread things necessary to support parallel // young-gen collection. class CMSParGCThreadState: public CHeapObj<mtGC> { public: ! CFLS_LAB lab; PromotionInfo promo; // Constructor. CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { promo.setSpace(cfls); --- 181,191 ---- // This struct contains per-thread things necessary to support parallel // young-gen collection. class CMSParGCThreadState: public CHeapObj<mtGC> { public: ! CompactibleFreeListSpaceLAB lab; PromotionInfo promo; // Constructor. CMSParGCThreadState(CompactibleFreeListSpace* cfls) : lab(cfls) { promo.setSpace(cfls);
*** 1108,1118 **** // hip and should be fixed by untying them. } bool CMSCollector::shouldConcurrentCollect() { if (_full_gc_requested) { ! log_trace(gc)("CMSCollector: collect because of explicit gc request (or gc_locker)"); return true; } FreelistLocker x(this); // ------------------------------------------------------------------ --- 1108,1118 ---- // hip and should be fixed by untying them. } bool CMSCollector::shouldConcurrentCollect() { if (_full_gc_requested) { ! log_trace(gc)("CMSCollector: collect because of explicit gc request (or GCLocker)"); return true; } FreelistLocker x(this); // ------------------------------------------------------------------
*** 1267,1282 **** size_t size, bool tlab) { // The following "if" branch is present for defensive reasons. // In the current uses of this interface, it can be replaced with: ! // assert(!GC_locker.is_active(), "Can't be called otherwise"); // But I am not placing that assert here to allow future // generality in invoking this interface. ! if (GC_locker::is_active()) { ! // A consistency test for GC_locker ! assert(GC_locker::needs_gc(), "Should have been set already"); // Skip this foreground collection, instead // expanding the heap if necessary. // Need the free list locks for the call to free() in compute_new_size() compute_new_size(); return; --- 1267,1282 ---- size_t size, bool tlab) { // The following "if" branch is present for defensive reasons. // In the current uses of this interface, it can be replaced with: ! // assert(!GCLocker.is_active(), "Can't be called otherwise"); // But I am not placing that assert here to allow future // generality in invoking this interface. ! if (GCLocker::is_active()) { ! // A consistency test for GCLocker ! assert(GCLocker::needs_gc(), "Should have been set already"); // Skip this foreground collection, instead // expanding the heap if necessary. // Need the free list locks for the call to free() in compute_new_size() compute_new_size(); return;
*** 3270,3280 **** if (prev_obj < span.end()) { MemRegion my_span = MemRegion(prev_obj, span.end()); // Do the marking work within a non-empty span -- // the last argument to the constructor indicates whether the // iteration should be incremental with periodic yields. ! Par_MarkFromRootsClosure cl(this, _collector, my_span, &_collector->_markBitMap, work_queue(i), &_collector->_markStack); _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); } // else nothing to do for this task --- 3270,3280 ---- if (prev_obj < span.end()) { MemRegion my_span = MemRegion(prev_obj, span.end()); // Do the marking work within a non-empty span -- // the last argument to the constructor indicates whether the // iteration should be incremental with periodic yields. ! ParMarkFromRootsClosure cl(this, _collector, my_span, &_collector->_markBitMap, work_queue(i), &_collector->_markStack); _collector->_markBitMap.iterate(&cl, my_span.start(), my_span.end()); } // else nothing to do for this task
*** 3289,3310 **** // have been bumped up by the thread that claimed the last // task. pst->all_tasks_completed(); } ! class Par_ConcMarkingClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; CMSConcMarkingTask* _task; MemRegion _span; CMSBitMap* _bit_map; CMSMarkStack* _overflow_stack; OopTaskQueue* _work_queue; protected: DO_OOP_WORK_DEFN public: ! Par_ConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, CMSBitMap* bit_map, CMSMarkStack* overflow_stack): MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _task(task), _span(collector->_span), --- 3289,3310 ---- // have been bumped up by the thread that claimed the last // task. pst->all_tasks_completed(); } ! class ParConcMarkingClosure: public MetadataAwareOopClosure { private: CMSCollector* _collector; CMSConcMarkingTask* _task; MemRegion _span; CMSBitMap* _bit_map; CMSMarkStack* _overflow_stack; OopTaskQueue* _work_queue; protected: DO_OOP_WORK_DEFN public: ! ParConcMarkingClosure(CMSCollector* collector, CMSConcMarkingTask* task, OopTaskQueue* work_queue, CMSBitMap* bit_map, CMSMarkStack* overflow_stack): MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _task(task), _span(collector->_span),
*** 3328,3338 **** // the salient assumption here is that any references // that are in these stolen objects being scanned must // already have been initialized (else they would not have // been published), so we do not need to check for // uninitialized objects before pushing here. ! void Par_ConcMarkingClosure::do_oop(oop obj) { assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); HeapWord* addr = (HeapWord*)obj; // Check if oop points into the CMS generation // and is not marked if (_span.contains(addr) && !_bit_map->isMarked(addr)) { --- 3328,3338 ---- // the salient assumption here is that any references // that are in these stolen objects being scanned must // already have been initialized (else they would not have // been published), so we do not need to check for // uninitialized objects before pushing here. ! void ParConcMarkingClosure::do_oop(oop obj) { assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); HeapWord* addr = (HeapWord*)obj; // Check if oop points into the CMS generation // and is not marked if (_span.contains(addr) && !_bit_map->isMarked(addr)) {
*** 3364,3377 **** } // Else, some other thread got there first do_yield_check(); } } ! void Par_ConcMarkingClosure::do_oop(oop* p) { Par_ConcMarkingClosure::do_oop_work(p); } ! void Par_ConcMarkingClosure::do_oop(narrowOop* p) { Par_ConcMarkingClosure::do_oop_work(p); } ! void Par_ConcMarkingClosure::trim_queue(size_t max) { while (_work_queue->size() > max) { oop new_oop; if (_work_queue->pop_local(new_oop)) { assert(new_oop->is_oop(), "Should be an oop"); assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object"); --- 3364,3377 ---- } // Else, some other thread got there first do_yield_check(); } } ! void ParConcMarkingClosure::do_oop(oop* p) { ParConcMarkingClosure::do_oop_work(p); } ! void ParConcMarkingClosure::do_oop(narrowOop* p) { ParConcMarkingClosure::do_oop_work(p); } ! void ParConcMarkingClosure::trim_queue(size_t max) { while (_work_queue->size() > max) { oop new_oop; if (_work_queue->pop_local(new_oop)) { assert(new_oop->is_oop(), "Should be an oop"); assert(_bit_map->isMarked((HeapWord*)new_oop), "Grey object");
*** 3383,3393 **** } // Upon stack overflow, we discard (part of) the stack, // remembering the least address amongst those discarded // in CMSCollector's _restart_address. ! void Par_ConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded --- 3383,3393 ---- } // Upon stack overflow, we discard (part of) the stack, // remembering the least address amongst those discarded // in CMSCollector's _restart_address. ! void ParConcMarkingClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded
*** 3402,3412 **** OopTaskQueue* work_q = work_queue(i); oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap); CMSMarkStack* ovflw = &(_collector->_markStack); int* seed = _collector->hash_seed(i); ! Par_ConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); while (true) { cl.trim_queue(0); assert(work_q->size() == 0, "Should have been emptied above"); if (get_work_from_overflow_stack(ovflw, work_q)) { // Can't assert below because the work obtained from the --- 3402,3412 ---- OopTaskQueue* work_q = work_queue(i); oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap); CMSMarkStack* ovflw = &(_collector->_markStack); int* seed = _collector->hash_seed(i); ! ParConcMarkingClosure cl(_collector, this, work_q, bm, ovflw); while (true) { cl.trim_queue(0); assert(work_q->size() == 0, "Should have been emptied above"); if (get_work_from_overflow_stack(ovflw, work_q)) { // Can't assert below because the work obtained from the
*** 4244,4254 **** HandleMark hm; // ---------- scan from roots -------------- _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); ! Par_MarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mri_cl); _timer.stop(); --- 4244,4254 ---- HandleMark hm; // ---------- scan from roots -------------- _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); ! ParMarkRefsIntoClosure par_mri_cl(_collector->_span, &(_collector->_markBitMap)); // ---------- young gen roots -------------- { work_on_young_gen_roots(worker_id, &par_mri_cl); _timer.stop();
*** 4310,4323 **** void work(uint worker_id); private: // ... of dirty cards in old space void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, ! Par_MarkRefsIntoAndScanClosure* cl); // ... work stealing for the above ! void do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed); }; class RemarkKlassClosure : public KlassClosure { KlassToOopClosure _cm_klass_closure; public: --- 4310,4323 ---- void work(uint worker_id); private: // ... of dirty cards in old space void do_dirty_card_rescan_tasks(CompactibleFreeListSpace* sp, int i, ! ParMarkRefsIntoAndScanClosure* cl); // ... work stealing for the above ! void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed); }; class RemarkKlassClosure : public KlassClosure { KlassToOopClosure _cm_klass_closure; public:
*** 4359,4369 **** do_young_space_rescan(worker_id, cl, from_space, sca, sct); do_young_space_rescan(worker_id, cl, eden_space, eca, ect); } // work_queue(i) is passed to the closure ! // Par_MarkRefsIntoAndScanClosure. The "i" parameter // also is passed to do_dirty_card_rescan_tasks() and to // do_work_steal() to select the i-th task_queue. void CMSParRemarkTask::work(uint worker_id) { elapsedTimer _timer; --- 4359,4369 ---- do_young_space_rescan(worker_id, cl, from_space, sca, sct); do_young_space_rescan(worker_id, cl, eden_space, eca, ect); } // work_queue(i) is passed to the closure ! // ParMarkRefsIntoAndScanClosure. The "i" parameter // also is passed to do_dirty_card_rescan_tasks() and to // do_work_steal() to select the i-th task_queue. void CMSParRemarkTask::work(uint worker_id) { elapsedTimer _timer;
*** 4371,4381 **** HandleMark hm; // ---------- rescan from roots -------------- _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); ! Par_MarkRefsIntoAndScanClosure par_mrias_cl(_collector, _collector->_span, _collector->ref_processor(), &(_collector->_markBitMap), work_queue(worker_id)); // Rescan young gen roots first since these are likely --- 4371,4381 ---- HandleMark hm; // ---------- rescan from roots -------------- _timer.start(); GenCollectedHeap* gch = GenCollectedHeap::heap(); ! ParMarkRefsIntoAndScanClosure par_mrias_cl(_collector, _collector->_span, _collector->ref_processor(), &(_collector->_markBitMap), work_queue(worker_id)); // Rescan young gen roots first since these are likely
*** 4520,4530 **** } void CMSParRemarkTask::do_dirty_card_rescan_tasks( CompactibleFreeListSpace* sp, int i, ! Par_MarkRefsIntoAndScanClosure* cl) { // Until all tasks completed: // . claim an unclaimed task // . compute region boundaries corresponding to task claimed // . transfer dirty bits ct->mut for that region // . apply rescanclosure to dirty mut bits for that region --- 4520,4530 ---- } void CMSParRemarkTask::do_dirty_card_rescan_tasks( CompactibleFreeListSpace* sp, int i, ! ParMarkRefsIntoAndScanClosure* cl) { // Until all tasks completed: // . claim an unclaimed task // . compute region boundaries corresponding to task claimed // . transfer dirty bits ct->mut for that region // . apply rescanclosure to dirty mut bits for that region
*** 4612,4622 **** pst->all_tasks_completed(); // declare that i am done } // . see if we can share work_queues with ParNew? XXX void ! CMSParRemarkTask::do_work_steal(int i, Par_MarkRefsIntoAndScanClosure* cl, int* seed) { OopTaskQueue* work_q = work_queue(i); NOT_PRODUCT(int num_steals = 0;) oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap); --- 4612,4622 ---- pst->all_tasks_completed(); // declare that i am done } // . see if we can share work_queues with ParNew? XXX void ! CMSParRemarkTask::do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed) { OopTaskQueue* work_q = work_queue(i); NOT_PRODUCT(int num_steals = 0;) oop obj_to_scan; CMSBitMap* bm = &(_collector->_markBitMap);
*** 5830,5860 **** } void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } ! Par_MarkRefsIntoClosure::Par_MarkRefsIntoClosure( MemRegion span, CMSBitMap* bitMap): _span(span), _bitMap(bitMap) { assert(ref_processor() == NULL, "deliberately left NULL"); assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); } ! void Par_MarkRefsIntoClosure::do_oop(oop obj) { // if p points into _span, then mark corresponding bit in _markBitMap assert(obj->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr)) { // this should be made more efficient _bitMap->par_mark(addr); } } ! void Par_MarkRefsIntoClosure::do_oop(oop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } ! void Par_MarkRefsIntoClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoClosure::do_oop_work(p); } // A variant of the above, used for CMS marking verification. MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): _span(span), --- 5830,5860 ---- } void MarkRefsIntoClosure::do_oop(oop* p) { MarkRefsIntoClosure::do_oop_work(p); } void MarkRefsIntoClosure::do_oop(narrowOop* p) { MarkRefsIntoClosure::do_oop_work(p); } ! ParMarkRefsIntoClosure::ParMarkRefsIntoClosure( MemRegion span, CMSBitMap* bitMap): _span(span), _bitMap(bitMap) { assert(ref_processor() == NULL, "deliberately left NULL"); assert(_bitMap->covers(_span), "_bitMap/_span mismatch"); } ! void ParMarkRefsIntoClosure::do_oop(oop obj) { // if p points into _span, then mark corresponding bit in _markBitMap assert(obj->is_oop(), "expected an oop"); HeapWord* addr = (HeapWord*)obj; if (_span.contains(addr)) { // this should be made more efficient _bitMap->par_mark(addr); } } ! void ParMarkRefsIntoClosure::do_oop(oop* p) { ParMarkRefsIntoClosure::do_oop_work(p); } ! void ParMarkRefsIntoClosure::do_oop(narrowOop* p) { ParMarkRefsIntoClosure::do_oop_work(p); } // A variant of the above, used for CMS marking verification. MarkRefsIntoVerifyClosure::MarkRefsIntoVerifyClosure( MemRegion span, CMSBitMap* verification_bm, CMSBitMap* cms_bm): _span(span),
*** 5987,6008 **** _bit_map->lock()->lock_without_safepoint_check(); _collector->startTimer(); } /////////////////////////////////////////////////////////// ! // Par_MarkRefsIntoAndScanClosure: a parallel version of // MarkRefsIntoAndScanClosure /////////////////////////////////////////////////////////// ! Par_MarkRefsIntoAndScanClosure::Par_MarkRefsIntoAndScanClosure( CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): _span(span), _bit_map(bit_map), _work_queue(work_queue), _low_water_mark(MIN2((work_queue->max_elems()/4), ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))), ! _par_pushAndMarkClosure(collector, span, rp, bit_map, work_queue) { // FIXME: Should initialize in base class constructor. assert(rp != NULL, "ref_processor shouldn't be NULL"); set_ref_processor_internal(rp); } --- 5987,6008 ---- _bit_map->lock()->lock_without_safepoint_check(); _collector->startTimer(); } /////////////////////////////////////////////////////////// ! // ParMarkRefsIntoAndScanClosure: a parallel version of // MarkRefsIntoAndScanClosure /////////////////////////////////////////////////////////// ! ParMarkRefsIntoAndScanClosure::ParMarkRefsIntoAndScanClosure( CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): _span(span), _bit_map(bit_map), _work_queue(work_queue), _low_water_mark(MIN2((work_queue->max_elems()/4), ((uint)CMSWorkQueueDrainThreshold * ParallelGCThreads))), ! _parPushAndMarkClosure(collector, span, rp, bit_map, work_queue) { // FIXME: Should initialize in base class constructor. assert(rp != NULL, "ref_processor shouldn't be NULL"); set_ref_processor_internal(rp); }
*** 6012,6022 **** // the unmarked oops. The marks are made in the marking bit map and // the work_queue is used for keeping the (newly) grey objects during // the scan phase whence they are also available for stealing by parallel // threads. Since the marking bit map is shared, updates are // synchronized (via CAS). ! void Par_MarkRefsIntoAndScanClosure::do_oop(oop obj) { if (obj != NULL) { // Ignore mark word because this could be an already marked oop // that may be chained at the end of the overflow list. assert(obj->is_oop(true), "expected an oop"); HeapWord* addr = (HeapWord*)obj; --- 6012,6022 ---- // the unmarked oops. The marks are made in the marking bit map and // the work_queue is used for keeping the (newly) grey objects during // the scan phase whence they are also available for stealing by parallel // threads. Since the marking bit map is shared, updates are // synchronized (via CAS). ! void ParMarkRefsIntoAndScanClosure::do_oop(oop obj) { if (obj != NULL) { // Ignore mark word because this could be an already marked oop // that may be chained at the end of the overflow list. assert(obj->is_oop(true), "expected an oop"); HeapWord* addr = (HeapWord*)obj;
*** 6039,6050 **** } // Else, another thread claimed the object } } } ! void Par_MarkRefsIntoAndScanClosure::do_oop(oop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } ! void Par_MarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { Par_MarkRefsIntoAndScanClosure::do_oop_work(p); } // This closure is used to rescan the marked objects on the dirty cards // in the mod union table and the card table proper. size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( oop p, MemRegion mr) { --- 6039,6050 ---- } // Else, another thread claimed the object } } } ! void ParMarkRefsIntoAndScanClosure::do_oop(oop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); } ! void ParMarkRefsIntoAndScanClosure::do_oop(narrowOop* p) { ParMarkRefsIntoAndScanClosure::do_oop_work(p); } // This closure is used to rescan the marked objects on the dirty cards // in the mod union table and the card table proper. size_t ScanMarkedObjectsAgainCarefullyClosure::do_object_careful_m( oop p, MemRegion mr) {
*** 6424,6434 **** do_yield_check(); } assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); } ! Par_MarkFromRootsClosure::Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* overflow_stack): _collector(collector), --- 6424,6434 ---- do_yield_check(); } assert(_markStack->isEmpty(), "tautology, emphasizing post-condition"); } ! ParMarkFromRootsClosure::ParMarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* overflow_stack): _collector(collector),
*** 6447,6457 **** assert(_span.contains(_finger), "Out of bounds _finger?"); } // Should revisit to see if this should be restructured for // greater efficiency. ! bool Par_MarkFromRootsClosure::do_bit(size_t offset) { if (_skip_bits > 0) { _skip_bits--; return true; } // convert offset into a HeapWord* --- 6447,6457 ---- assert(_span.contains(_finger), "Out of bounds _finger?"); } // Should revisit to see if this should be restructured for // greater efficiency. ! bool ParMarkFromRootsClosure::do_bit(size_t offset) { if (_skip_bits > 0) { _skip_bits--; return true; } // convert offset into a HeapWord*
*** 6472,6482 **** } scan_oops_in_oop(addr); return true; } ! void Par_MarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { assert(_bit_map->isMarked(ptr), "expected bit to be set"); // Should we assert that our work queue is empty or // below some drain limit? assert(_work_queue->size() == 0, "should drain stack to limit stack usage"); --- 6472,6482 ---- } scan_oops_in_oop(addr); return true; } ! void ParMarkFromRootsClosure::scan_oops_in_oop(HeapWord* ptr) { assert(_bit_map->isMarked(ptr), "expected bit to be set"); // Should we assert that our work queue is empty or // below some drain limit? assert(_work_queue->size() == 0, "should drain stack to limit stack usage");
*** 6522,6532 **** } // Note: the local finger doesn't advance while we drain // the stack below, but the global finger sure can and will. HeapWord** gfa = _task->global_finger_addr(); ! Par_PushOrMarkClosure pushOrMarkClosure(_collector, _span, _bit_map, _work_queue, _overflow_stack, _finger, gfa, this); --- 6522,6532 ---- } // Note: the local finger doesn't advance while we drain // the stack below, but the global finger sure can and will. HeapWord** gfa = _task->global_finger_addr(); ! ParPushOrMarkClosure pushOrMarkClosure(_collector, _span, _bit_map, _work_queue, _overflow_stack, _finger, gfa, this);
*** 6555,6565 **** assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); } // Yield in response to a request from VM Thread or // from mutators. ! void Par_MarkFromRootsClosure::do_yield_work() { assert(_task != NULL, "sanity"); _task->yield(); } // A variant of the above used for verifying CMS marking work. --- 6555,6565 ---- assert(_work_queue->size() == 0, "tautology, emphasizing post-condition"); } // Yield in response to a request from VM Thread or // from mutators. ! void ParMarkFromRootsClosure::do_yield_work() { assert(_task != NULL, "sanity"); _task->yield(); } // A variant of the above used for verifying CMS marking work.
*** 6682,6699 **** _markStack(markStack), _finger(finger), _parent(parent) { } ! Par_PushOrMarkClosure::Par_PushOrMarkClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* overflow_stack, HeapWord* finger, HeapWord** global_finger_addr, ! Par_MarkFromRootsClosure* parent) : MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _whole_span(collector->_span), _span(span), _bit_map(bit_map), --- 6682,6699 ---- _markStack(markStack), _finger(finger), _parent(parent) { } ! ParPushOrMarkClosure::ParPushOrMarkClosure(CMSCollector* collector, MemRegion span, CMSBitMap* bit_map, OopTaskQueue* work_queue, CMSMarkStack* overflow_stack, HeapWord* finger, HeapWord** global_finger_addr, ! ParMarkFromRootsClosure* parent) : MetadataAwareOopClosure(collector->ref_processor()), _collector(collector), _whole_span(collector->_span), _span(span), _bit_map(bit_map),
*** 6727,6737 **** } // Upon stack overflow, we discard (part of) the stack, // remembering the least address amongst those discarded // in CMSCollector's _restart_address. ! void Par_PushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded --- 6727,6737 ---- } // Upon stack overflow, we discard (part of) the stack, // remembering the least address amongst those discarded // in CMSCollector's _restart_address. ! void ParPushOrMarkClosure::handle_stack_overflow(HeapWord* lost) { // We need to do this under a mutex to prevent other // workers from interfering with the work done below. MutexLockerEx ml(_overflow_stack->par_lock(), Mutex::_no_safepoint_check_flag); // Remember the least grey address discarded
*** 6774,6784 **** } void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } ! void Par_PushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); HeapWord* addr = (HeapWord*)obj; if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black --- 6774,6784 ---- } void PushOrMarkClosure::do_oop(oop* p) { PushOrMarkClosure::do_oop_work(p); } void PushOrMarkClosure::do_oop(narrowOop* p) { PushOrMarkClosure::do_oop_work(p); } ! void ParPushOrMarkClosure::do_oop(oop obj) { // Ignore mark word because we are running concurrent with mutators. assert(obj->is_oop_or_null(true), "Expected an oop or NULL at " PTR_FORMAT, p2i(obj)); HeapWord* addr = (HeapWord*)obj; if (_whole_span.contains(addr) && !_bit_map->isMarked(addr)) { // Oop lies in _span and isn't yet grey or black
*** 6820,6831 **** } do_yield_check(); } } ! void Par_PushOrMarkClosure::do_oop(oop* p) { Par_PushOrMarkClosure::do_oop_work(p); } ! void Par_PushOrMarkClosure::do_oop(narrowOop* p) { Par_PushOrMarkClosure::do_oop_work(p); } PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, --- 6820,6831 ---- } do_yield_check(); } } ! void ParPushOrMarkClosure::do_oop(oop* p) { ParPushOrMarkClosure::do_oop_work(p); } ! void ParPushOrMarkClosure::do_oop(narrowOop* p) { ParPushOrMarkClosure::do_oop_work(p); } PushAndMarkClosure::PushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map,
*** 6898,6908 **** } } } } ! Par_PushAndMarkClosure::Par_PushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): MetadataAwareOopClosure(rp), --- 6898,6908 ---- } } } } ! ParPushAndMarkClosure::ParPushAndMarkClosure(CMSCollector* collector, MemRegion span, ReferenceProcessor* rp, CMSBitMap* bit_map, OopTaskQueue* work_queue): MetadataAwareOopClosure(rp),
*** 6917,6927 **** void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } // Grey object rescan during second checkpoint phase -- // the parallel version. ! void Par_PushAndMarkClosure::do_oop(oop obj) { // In the assert below, we ignore the mark word because // this oop may point to an already visited object that is // on the overflow stack (in which case the mark word has // been hijacked for chaining into the overflow stack -- // if this is the last object in the overflow stack then --- 6917,6927 ---- void PushAndMarkClosure::do_oop(oop* p) { PushAndMarkClosure::do_oop_work(p); } void PushAndMarkClosure::do_oop(narrowOop* p) { PushAndMarkClosure::do_oop_work(p); } // Grey object rescan during second checkpoint phase -- // the parallel version. ! void ParPushAndMarkClosure::do_oop(oop obj) { // In the assert below, we ignore the mark word because // this oop may point to an already visited object that is // on the overflow stack (in which case the mark word has // been hijacked for chaining into the overflow stack -- // if this is the last object in the overflow stack then
*** 6957,6968 **** } } // Else, some other thread got there first } } ! void Par_PushAndMarkClosure::do_oop(oop* p) { Par_PushAndMarkClosure::do_oop_work(p); } ! void Par_PushAndMarkClosure::do_oop(narrowOop* p) { Par_PushAndMarkClosure::do_oop_work(p); } void CMSPrecleanRefsYieldClosure::do_yield_work() { Mutex* bml = _collector->bitMapLock(); assert_lock_strong(bml); assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(), --- 6957,6968 ---- } } // Else, some other thread got there first } } ! void ParPushAndMarkClosure::do_oop(oop* p) { ParPushAndMarkClosure::do_oop_work(p); } ! void ParPushAndMarkClosure::do_oop(narrowOop* p) { ParPushAndMarkClosure::do_oop_work(p); } void CMSPrecleanRefsYieldClosure::do_yield_work() { Mutex* bml = _collector->bitMapLock(); assert_lock_strong(bml); assert(ConcurrentMarkSweepThread::cms_thread_has_cms_token(),
< prev index next >