src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 4973 : imported patch checkpointing.diff

*** 89,108 **** // Local to this file. class RefineCardTableEntryClosure: public CardTableEntryClosure { SuspendibleThreadSet* _sts; G1RemSet* _g1rs; - ConcurrentG1Refine* _cg1r; bool _concurrent; public: RefineCardTableEntryClosure(SuspendibleThreadSet* sts, ! G1RemSet* g1rs, ! ConcurrentG1Refine* cg1r) : ! _sts(sts), _g1rs(g1rs), _cg1r(cg1r), _concurrent(true) {} bool do_card_ptr(jbyte* card_ptr, int worker_i) { ! bool oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); // This path is executed by the concurrent refine or mutator threads, // concurrently, and so we do not care if card_ptr contains references // that point into the collection set. assert(!oops_into_cset, "should be"); --- 89,112 ---- // Local to this file. class RefineCardTableEntryClosure: public CardTableEntryClosure { SuspendibleThreadSet* _sts; G1RemSet* _g1rs; bool _concurrent; public: RefineCardTableEntryClosure(SuspendibleThreadSet* sts, ! G1RemSet* g1rs) : ! _sts(sts), _g1rs(g1rs), _concurrent(true) {} bool do_card_ptr(jbyte* card_ptr, int worker_i) { ! bool oops_into_cset; ! if (_concurrent) { ! oops_into_cset = _g1rs->refine_card_without_check(card_ptr, worker_i, false); ! } else { ! assert(SafepointSynchronize::is_at_safepoint(), "only safe if at safepoint"); ! oops_into_cset = _g1rs->refine_card(card_ptr, worker_i, false); ! } // This path is executed by the concurrent refine or mutator threads, // concurrently, and so we do not care if card_ptr contains references // that point into the collection set. assert(!oops_into_cset, "should be");
*** 114,123 **** --- 118,160 ---- return true; } void set_concurrent(bool b) { _concurrent = b; } }; + class TransferDirtyCardsToRefinementClosure: public CardTableEntryClosure { + public: + class FlushTransferClosure: public VoidClosure { + friend class TransferDirtyCardsToRefinementClosure; + TransferDirtyCardsToRefinementClosure* _cl; + public: + void do_void() { + assert(_cl != NULL, "should be inited"); + _cl->_dcq.flush(); + } + }; + + private: + FlushTransferClosure _flush_cl; + DirtyCardQueue _dcq; + public: + TransferDirtyCardsToRefinementClosure(DirtyCardQueueSet& target) : _dcq(&target) { + _flush_cl._cl = this; + } + + bool do_card_ptr(jbyte* card_ptr, int worker_i) { + assert(SafepointSynchronize::is_at_safepoint(), "only safe to transfer/clean cards at safepoint"); + assert(!G1CollectedHeap::heap()->is_gc_active(), "should not get here if doing a gc"); + _dcq.enqueue(card_ptr); + *card_ptr = CardTableModRefBS::clean_card_val(); + return true; + } + + public: + FlushTransferClosure* flush_cl() { return &_flush_cl; } + }; + + class ClearLoggedCardTableEntryClosure: public CardTableEntryClosure { int _calls; G1CollectedHeap* _g1h; CardTableModRefBS* _ctbs;
*** 1937,1946 **** --- 1974,1984 ---- _evac_failure_scan_stack(NULL), _mark_in_progress(false), _cg1r(NULL), _summary_bytes_used(0), _g1mm(NULL), _refine_cte_cl(NULL), + _transfer_cte_cl(NULL), _full_collection(false), _free_list("Master Free List"), _secondary_free_list("Secondary Free List"), _old_set("Old Set"), _humongous_set("Master Humongous Set"),
*** 2141,2153 **** // Perform any initialization actions delegated to the policy. g1_policy()->init(); _refine_cte_cl = new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), ! g1_rem_set(), ! concurrent_g1_refine()); ! JavaThread::dirty_card_queue_set().set_closure(_refine_cte_cl); JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, SATB_Q_FL_lock, G1SATBProcessCompletedThreshold, Shared_SATB_Q_lock); --- 2179,2196 ---- // Perform any initialization actions delegated to the policy. g1_policy()->init(); _refine_cte_cl = new RefineCardTableEntryClosure(ConcurrentG1RefineThread::sts(), ! g1_rem_set()); ! _transfer_cte_cl = ! new TransferDirtyCardsToRefinementClosure(concurrent_g1_refine()->cards_ready_for_refinement()); ! ! concurrent_g1_refine()->set_card_refinement_closure(_refine_cte_cl); ! concurrent_g1_refine()->set_flush_transfer_closure(_transfer_cte_cl->flush_cl()); ! ! JavaThread::dirty_card_queue_set().set_closure(_transfer_cte_cl); JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon, SATB_Q_FL_lock, G1SATBProcessCompletedThreshold, Shared_SATB_Q_lock);
*** 2331,2356 **** heap_region_iterate(&cl); guarantee(!cl.failures(), "all GC time stamps should have been reset"); } #endif // PRODUCT ! void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, DirtyCardQueue* into_cset_dcq, bool concurrent, int worker_i) { // Clean cards in the hot card cache G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); - DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); int n_completed_buffers = 0; while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { n_completed_buffers++; } ! g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers); dcqs.clear_n_completed_buffers(); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); } // Computes the sum of the storage used by the various regions. --- 2374,2409 ---- heap_region_iterate(&cl); guarantee(!cl.failures(), "all GC time stamps should have been reset"); } #endif // PRODUCT ! int G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, ! CardTableEntryClosure* cleaned_cl, DirtyCardQueue* into_cset_dcq, bool concurrent, int worker_i) { // Clean cards in the hot card cache G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq); int n_completed_buffers = 0; + if (worker_i == 0) { + concurrent_g1_refine()->flush_cards_in_buffers(); + } + DirtyCardQueueSet& refinement_dcqs = concurrent_g1_refine()->cards_ready_for_refinement(); + while (refinement_dcqs.apply_closure_to_completed_buffer(cleaned_cl, worker_i, 0, true)) { + n_completed_buffers++; + } + + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) { n_completed_buffers++; } ! dcqs.clear_n_completed_buffers(); assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!"); + return n_completed_buffers; } // Computes the sum of the storage used by the various regions.
*** 3046,3055 **** --- 3099,3113 ---- } else { return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab_size); } } + void G1CollectedHeap::inform_non_gc_safepoint() { + DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); + while (dcqs.apply_closure_to_completed_buffer(0, 0, true)); + } + size_t G1CollectedHeap::max_capacity() const { return _g1_reserved.byte_size(); } jlong G1CollectedHeap::millis_since_last_gc() {
*** 5815,5825 **** if (G1DeferredRSUpdate) { RedirtyLoggedCardTableEntryFastClosure redirty; dirty_card_queue_set().set_closure(&redirty); dirty_card_queue_set().apply_closure_to_all_completed_buffers(); ! DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set(); dcq.merge_bufferlists(&dirty_card_queue_set()); assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); } --- 5873,5883 ---- if (G1DeferredRSUpdate) { RedirtyLoggedCardTableEntryFastClosure redirty; dirty_card_queue_set().set_closure(&redirty); dirty_card_queue_set().apply_closure_to_all_completed_buffers(); ! DirtyCardQueueSet& dcq = concurrent_g1_refine()->cards_ready_for_refinement(); dcq.merge_bufferlists(&dirty_card_queue_set()); assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } COMPILER2_PRESENT(DerivedPointerTable::update_pointers()); }
*** 6355,6364 **** --- 6413,6433 ---- void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) { _refine_cte_cl->set_concurrent(concurrent); } + void G1CollectedHeap::set_use_transferring_cte_cl(bool transferring) { + CardTableEntryClosure* cte_cl = NULL; + if (transferring) { + cte_cl = _transfer_cte_cl; + } else { + cte_cl = _refine_cte_cl; + } + JavaThread::dirty_card_queue_set().set_closure(cte_cl); + } + + bool G1CollectedHeap::is_in_closed_subset(const void* p) const { HeapRegion* hr = heap_region_containing(p); if (hr == NULL) { return false; } else {