src/share/vm/gc_implementation/g1/g1RemSet.cpp

Print this page
rev 4973 : imported patch checkpointing.diff

*** 249,259 **** // is during RSet updating within an evacuation pause. // In this case worker_i should be the id of a GC worker thread. assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); ! if (_g1rs->refine_card(card_ptr, worker_i, true)) { // 'card_ptr' contains references that point into the collection // set. We need to record the card in the DCQS // (G1CollectedHeap::into_cset_dirty_card_queue_set()) // that's used for that purpose. // --- 249,259 ---- // is during RSet updating within an evacuation pause. // In this case worker_i should be the id of a GC worker thread. assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); ! if (_g1rs->refine_card(card_ptr, worker_i, (_into_cset_dcq != NULL))) { // 'card_ptr' contains references that point into the collection // set. We need to record the card in the DCQS // (G1CollectedHeap::into_cset_dirty_card_queue_set()) // that's used for that purpose. //
*** 262,288 **** } return true; } }; ! void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i) { double start = os::elapsedTime(); // Apply the given closure to all remaining log entries. ! RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, into_cset_dcq); ! _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, into_cset_dcq, false, worker_i); // Now there should be no dirty cards. if (G1RSLogCheckCardTable) { CountNonCleanMemRegionClosure cl(_g1); _ct_bs->mod_card_iterate(&cl); // XXX This isn't true any more: keeping cards of young regions // marked dirty broke it. Need some reasonable fix. guarantee(cl.n() == 0, "Card table should be clean."); } _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); } void G1RemSet::cleanupHRRS() { HeapRegionRemSet::cleanup(); } --- 262,323 ---- } return true; } }; ! class RefineTransferredCardsClosure: public CardTableEntryClosure { ! G1RemSet* _g1rs; ! DirtyCardQueue* _into_cset_dcq; ! public: ! RefineTransferredCardsClosure(G1CollectedHeap* g1h, ! DirtyCardQueue* into_cset_dcq) : ! _g1rs(g1h->g1_rem_set()), _into_cset_dcq(into_cset_dcq) ! {} ! bool do_card_ptr(jbyte* card_ptr, int worker_i) { ! // The only time we care about recording cards that ! // contain references that point into the collection set ! // is during RSet updating within an evacuation pause. ! // In this case worker_i should be the id of a GC worker thread. ! assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause"); ! assert(worker_i < (int) (ParallelGCThreads == 0 ? 1 : ParallelGCThreads), "should be a GC worker"); ! ! if (_g1rs->refine_card_without_check(card_ptr, worker_i, (_into_cset_dcq != NULL))) { ! // 'card_ptr' contains references that point into the collection ! // set. We need to record the card in the DCQS ! // (G1CollectedHeap::into_cset_dirty_card_queue_set()) ! // that's used for that purpose. ! // ! // Enqueue the card ! _into_cset_dcq->enqueue(card_ptr); ! } ! return true; ! } ! }; ! ! void G1RemSet::updateRS(DirtyCardQueue* into_cset_dcq, int worker_i, bool update_accounting) { double start = os::elapsedTime(); // Apply the given closure to all remaining log entries. ! RefineRecordRefsIntoCSCardTableEntryClosure into_cset_update_rs_cl(_g1, update_accounting ? into_cset_dcq : NULL); ! RefineTransferredCardsClosure update_cleaned_cards_rs_cl(_g1, update_accounting ? into_cset_dcq : NULL); ! int processed_buffers = _g1->iterate_dirty_card_closure(&into_cset_update_rs_cl, ! &update_cleaned_cards_rs_cl, ! into_cset_dcq, false, worker_i); // Now there should be no dirty cards. if (G1RSLogCheckCardTable) { CountNonCleanMemRegionClosure cl(_g1); _ct_bs->mod_card_iterate(&cl); // XXX This isn't true any more: keeping cards of young regions // marked dirty broke it. Need some reasonable fix. guarantee(cl.n() == 0, "Card table should be clean."); } + if (update_accounting) { + _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, processed_buffers); _g1p->phase_times()->record_update_rs_time(worker_i, (os::elapsedTime() - start) * 1000.0); + } } void G1RemSet::cleanupHRRS() { HeapRegionRemSet::cleanup(); }
*** 320,330 **** // race conditions when these two operations are done in parallel // and they are causing failures. When we resolve said race // conditions, we'll revert back to parallel remembered set // updating and scanning. See CRs 6677707 and 6677708. if (G1UseParallelRSetUpdating || (worker_i == 0)) { ! updateRS(&into_cset_dcq, worker_i); } else { _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0); _g1p->phase_times()->record_update_rs_time(worker_i, 0.0); } if (G1UseParallelRSetScanning || (worker_i == 0)) { --- 355,365 ---- // race conditions when these two operations are done in parallel // and they are causing failures. When we resolve said race // conditions, we'll revert back to parallel remembered set // updating and scanning. See CRs 6677707 and 6677708. if (G1UseParallelRSetUpdating || (worker_i == 0)) { ! updateRS(&into_cset_dcq, worker_i, /* update_accounting */ true); } else { _g1p->phase_times()->record_update_rs_processed_buffers(worker_i, 0); _g1p->phase_times()->record_update_rs_time(worker_i, 0.0); } if (G1UseParallelRSetScanning || (worker_i == 0)) {
*** 339,348 **** --- 374,384 ---- void G1RemSet::prepare_for_oops_into_collection_set_do() { cleanupHRRS(); ConcurrentG1Refine* cg1r = _g1->concurrent_g1_refine(); _g1->set_refine_cte_cl_concurrency(false); + _g1->set_use_transferring_cte_cl(false); DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); if (G1CollectedHeap::use_parallel_gc_threads()) { // Don't set the number of workers here. It will be set
*** 426,435 **** --- 462,472 ---- } FREE_C_HEAP_ARRAY(size_t, _cards_scanned, mtGC); _cards_scanned = NULL; // Cleanup after copy _g1->set_refine_cte_cl_concurrency(true); + _g1->set_use_transferring_cte_cl(true); // Set all cards back to clean. _g1->cleanUpCardTable(); DirtyCardQueueSet& into_cset_dcqs = _g1->into_cset_dirty_card_queue_set(); int into_cset_n_buffers = into_cset_dcqs.completed_buffers_num();
*** 535,544 **** --- 572,586 ---- // No need to return that this card contains refs that point // into the collection set. return false; } + return refine_card_without_check(card_ptr, worker_i, check_for_refs_into_cset); + } + + bool G1RemSet::refine_card_without_check(jbyte* card_ptr, int worker_i, + bool check_for_refs_into_cset) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); if (r == NULL) {
*** 731,740 **** --- 773,783 ---- #endif summary->print_on(gclog_or_tty); } + void G1RemSet::prepare_for_verify() { if (G1HRRSFlushLogBuffersOnVerify && (VerifyBeforeGC || VerifyAfterGC) && (!_g1->full_collection() || G1VerifyRSetsDuringFullGC)) { cleanupHRRS();
*** 747,757 **** G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); bool use_hot_card_cache = hot_card_cache->use_cache(); hot_card_cache->set_use_cache(false); DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); ! updateRS(&into_cset_dcq, 0); _g1->into_cset_dirty_card_queue_set().clear(); hot_card_cache->set_use_cache(use_hot_card_cache); assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } --- 790,800 ---- G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache(); bool use_hot_card_cache = hot_card_cache->use_cache(); hot_card_cache->set_use_cache(false); DirtyCardQueue into_cset_dcq(&_g1->into_cset_dirty_card_queue_set()); ! updateRS(&into_cset_dcq, 0, /* update_accounting */ false); _g1->into_cset_dirty_card_queue_set().clear(); hot_card_cache->set_use_cache(use_hot_card_cache); assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); }