< prev index next >

src/share/vm/gc_implementation/g1/concurrentMark.cpp

Print this page
rev 7522 : * * *
8066827: Remove ReferenceProcessor::clean_up_discovered_references()
Summary: Abandon rather than clean up discovered references.
Reviewed-by:


 954   // For each region note start of marking.
 955   NoteStartOfMarkHRClosure startcl;
 956   g1h->heap_region_iterate(&startcl);
 957 }
 958 
 959 
 960 void ConcurrentMark::checkpointRootsInitialPost() {
 961   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 962 
 963   // If we force an overflow during remark, the remark operation will
 964   // actually abort and we'll restart concurrent marking. If we always
 965   // force an overflow during remark we'll never actually complete the
 966   // marking phase. So, we initialize this here, at the start of the
 967   // cycle, so that at the remaining overflow number will decrease at
 968   // every remark and we'll eventually not need to cause one.
 969   force_overflow_stw()->init();
 970 
 971   // Start Concurrent Marking weak-reference discovery.
 972   ReferenceProcessor* rp = g1h->ref_processor_cm();
 973   // enable ("weak") refs discovery
 974   rp->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
 975   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 976 
 977   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 978   // This is the start of  the marking cycle, we're expected all
 979   // threads to have SATB queues with active set to false.
 980   satb_mq_set.set_active_all_threads(true, /* new active value */
 981                                      false /* expected_active */);
 982 
 983   _root_regions.prepare_for_scan();
 984 
 985   // update_g1_committed() will be called at the end of an evac pause
 986   // when marking is on. So, it's also called at the end of the
 987   // initial-mark pause to update the heap end, if the heap expands
 988   // during it. No need to call it here.
 989 }
 990 
 991 /*
 992  * Notice that in the next two methods, we actually leave the STS
 993  * during the barrier sync and join it immediately afterwards. If we
 994  * do not do this, the following deadlock can occur: one thread could




 954   // For each region note start of marking.
 955   NoteStartOfMarkHRClosure startcl;
 956   g1h->heap_region_iterate(&startcl);
 957 }
 958 
 959 
 960 void ConcurrentMark::checkpointRootsInitialPost() {
 961   G1CollectedHeap*   g1h = G1CollectedHeap::heap();
 962 
 963   // If we force an overflow during remark, the remark operation will
 964   // actually abort and we'll restart concurrent marking. If we always
 965   // force an overflow during remark we'll never actually complete the
 966   // marking phase. So, we initialize this here, at the start of the
 967   // cycle, so that at the remaining overflow number will decrease at
 968   // every remark and we'll eventually not need to cause one.
 969   force_overflow_stw()->init();
 970 
 971   // Start Concurrent Marking weak-reference discovery.
 972   ReferenceProcessor* rp = g1h->ref_processor_cm();
 973   // enable ("weak") refs discovery
 974   rp->enable_discovery();
 975   rp->setup_policy(false); // snapshot the soft ref policy to be used in this cycle
 976 
 977   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 978   // This is the start of  the marking cycle, we're expected all
 979   // threads to have SATB queues with active set to false.
 980   satb_mq_set.set_active_all_threads(true, /* new active value */
 981                                      false /* expected_active */);
 982 
 983   _root_regions.prepare_for_scan();
 984 
 985   // update_g1_committed() will be called at the end of an evac pause
 986   // when marking is on. So, it's also called at the end of the
 987   // initial-mark pause to update the heap end, if the heap expands
 988   // during it. No need to call it here.
 989 }
 990 
 991 /*
 992  * Notice that in the next two methods, we actually leave the STS
 993  * during the barrier sync and join it immediately afterwards. If we
 994  * do not do this, the following deadlock can occur: one thread could


< prev index next >