< prev index next >

src/share/vm/gc/g1/g1RemSet.cpp

Print this page
rev 12906 : [mq]: gc_interface

*** 23,40 **** */ #include "precompiled.hpp" #include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/dirtyCardQueue.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1FromCardCache.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HotCardCache.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1RemSet.inline.hpp" - #include "gc/g1/g1SATBCardTableModRefBS.inline.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "memory/iterator.hpp" --- 23,41 ---- */ #include "precompiled.hpp" #include "gc/g1/concurrentG1Refine.hpp" #include "gc/g1/dirtyCardQueue.hpp" + #include "gc/g1/g1BarrierSet.hpp" #include "gc/g1/g1BlockOffsetTable.inline.hpp" + #include "gc/g1/g1CardTable.inline.hpp" #include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1FromCardCache.hpp" #include "gc/g1/g1GCPhaseTimes.hpp" #include "gc/g1/g1HotCardCache.hpp" #include "gc/g1/g1OopClosures.inline.hpp" #include "gc/g1/g1RemSet.inline.hpp" #include "gc/g1/heapRegion.inline.hpp" #include "gc/g1/heapRegionManager.inline.hpp" #include "gc/g1/heapRegionRemSet.hpp" #include "gc/shared/gcTraceTime.inline.hpp" #include "memory/iterator.hpp"
*** 70,89 **** } static size_t chunk_size() { return M; } void work(uint worker_id) { ! G1SATBCardTableModRefBS* ct_bs = _g1h->g1_barrier_set(); while (_cur_dirty_regions < _num_dirty_regions) { size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; size_t max = MIN2(next + _chunk_length, _num_dirty_regions); for (size_t i = next; i < max; i++) { HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); if (!r->is_survivor()) { ! ct_bs->clear(MemRegion(r->bottom(), r->end())); } } } } }; --- 71,90 ---- } static size_t chunk_size() { return M; } void work(uint worker_id) { ! G1CardTable* ct = _g1h->g1_card_table(); while (_cur_dirty_regions < _num_dirty_regions) { size_t next = Atomic::add(_chunk_length, &_cur_dirty_regions) - _chunk_length; size_t max = MIN2(next + _chunk_length, _num_dirty_regions); for (size_t i = next; i < max; i++) { HeapRegion* r = _g1h->region_at(_dirty_region_list[i]); if (!r->is_survivor()) { ! ct->clear(MemRegion(r->bottom(), r->end())); } } } } };
*** 238,253 **** #endif } }; G1RemSet::G1RemSet(G1CollectedHeap* g1, ! CardTableModRefBS* ct_bs, G1HotCardCache* hot_card_cache) : _g1(g1), _scan_state(new G1RemSetScanState()), _conc_refine_cards(0), ! _ct_bs(ct_bs), _g1p(_g1->g1_policy()), _hot_card_cache(hot_card_cache), _prev_period_summary(), _into_cset_dirty_card_queue_set(false) { --- 239,254 ---- #endif } }; G1RemSet::G1RemSet(G1CollectedHeap* g1, ! G1CardTable* ct, G1HotCardCache* hot_card_cache) : _g1(g1), _scan_state(new G1RemSetScanState()), _conc_refine_cards(0), ! _ct(ct), _g1p(_g1->g1_policy()), _hot_card_cache(hot_card_cache), _prev_period_summary(), _into_cset_dirty_card_queue_set(false) {
*** 260,270 **** DirtyCardQ_CBL_mon, DirtyCardQ_FL_lock, -1, // never trigger processing -1, // no limit on length Shared_DirtyCardQ_lock, ! &JavaThread::dirty_card_queue_set()); } G1RemSet::~G1RemSet() { if (_scan_state != NULL) { delete _scan_state; --- 261,271 ---- DirtyCardQ_CBL_mon, DirtyCardQ_FL_lock, -1, // never trigger processing -1, // no limit on length Shared_DirtyCardQ_lock, ! &G1BarrierSet::dirty_card_queue_set()); } G1RemSet::~G1RemSet() { if (_scan_state != NULL) { delete _scan_state;
*** 299,326 **** _cards(0), _cards_done(0), _worker_i(worker_i) { _g1h = G1CollectedHeap::heap(); _bot = _g1h->bot(); ! _ct_bs = _g1h->g1_barrier_set(); _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); } void G1ScanRSClosure::scan_card(size_t index, HeapRegion *r) { // Stack allocate the DirtyCardToOopClosure instance ! HeapRegionDCTOC cl(_g1h, r, _push_heap_cl, CardTableModRefBS::Precise); // Set the "from" region in the closure. _push_heap_cl->set_region(r); MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words); MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); MemRegion mr = pre_gc_allocated.intersection(card_region); ! if (!mr.is_empty() && !_ct_bs->is_card_claimed(index)) { // We make the card as "claimed" lazily (so races are possible // but they're benign), which reduces the number of duplicate // scans (the rsets of the regions in the cset can intersect). ! _ct_bs->set_card_claimed(index); _cards_done++; cl.do_MemRegion(mr); } } --- 300,327 ---- _cards(0), _cards_done(0), _worker_i(worker_i) { _g1h = G1CollectedHeap::heap(); _bot = _g1h->bot(); ! _ct = _g1h->g1_card_table(); _block_size = MAX2<size_t>(G1RSetScanBlockSize, 1); } void G1ScanRSClosure::scan_card(size_t index, HeapRegion *r) { // Stack allocate the DirtyCardToOopClosure instance ! HeapRegionDCTOC cl(_g1h, r, _push_heap_cl, G1CardTable::Precise); // Set the "from" region in the closure. _push_heap_cl->set_region(r); MemRegion card_region(_bot->address_for_index(index), BOTConstants::N_words); MemRegion pre_gc_allocated(r->bottom(), r->scan_top()); MemRegion mr = pre_gc_allocated.intersection(card_region); ! if (!mr.is_empty() && !_ct->is_card_claimed(index)) { // We make the card as "claimed" lazily (so races are possible // but they're benign), which reduces the number of duplicate // scans (the rsets of the regions in the cset can intersect). ! _ct->set_card_claimed(index); _cards_done++; cl.do_MemRegion(mr); } }
*** 364,374 **** _scan_state->add_dirty_region(card_region->hrm_index()); // If the card is dirty, then we will scan it during updateRS. if (!card_region->in_collection_set() && ! !_ct_bs->is_card_dirty(card_index)) { scan_card(card_index, card_region); } } if (_scan_state->set_iter_complete(region_idx)) { // Scan the strong code root list attached to the current region --- 365,375 ---- _scan_state->add_dirty_region(card_region->hrm_index()); // If the card is dirty, then we will scan it during updateRS. if (!card_region->in_collection_set() && ! !_ct->is_card_dirty(card_index)) { scan_card(card_index, card_region); } } if (_scan_state->set_iter_complete(region_idx)) { // Scan the strong code root list attached to the current region
*** 467,477 **** return scan_rem_set(cl, heap_region_codeblobs, worker_i);; } void G1RemSet::prepare_for_oops_into_collection_set_do() { _g1->set_refine_cte_cl_concurrency(false); ! DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); _scan_state->reset(); } --- 468,478 ---- return scan_rem_set(cl, heap_region_codeblobs, worker_i);; } void G1RemSet::prepare_for_oops_into_collection_set_do() { _g1->set_refine_cte_cl_concurrency(false); ! DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); dcqs.concatenate_logs(); _scan_state->reset(); }
*** 543,570 **** // false otherwise. bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, G1ParPushHeapRSClosure* oops_in_heap_closure) { ! assert(_g1->is_in_exact(_ct_bs->addr_for(card_ptr)), "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", p2i(card_ptr), ! _ct_bs->index_for(_ct_bs->addr_for(card_ptr)), ! p2i(_ct_bs->addr_for(card_ptr)), ! _g1->addr_to_region(_ct_bs->addr_for(card_ptr))); bool check_for_refs_into_cset = oops_in_heap_closure != NULL; // If the card is no longer dirty, nothing to do. ! if (*card_ptr != CardTableModRefBS::dirty_card_val()) { // No need to return that this card contains refs that point // into the collection set. return false; } // Construct the region representing the card. ! HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); // This check is needed for some uncommon cases where we should // ignore the card. --- 544,571 ---- // false otherwise. bool G1RemSet::refine_card(jbyte* card_ptr, uint worker_i, G1ParPushHeapRSClosure* oops_in_heap_closure) { ! assert(_g1->is_in_exact(_ct->addr_for(card_ptr)), "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap", p2i(card_ptr), ! _ct->index_for(_ct->addr_for(card_ptr)), ! p2i(_ct->addr_for(card_ptr)), ! _g1->addr_to_region(_ct->addr_for(card_ptr))); bool check_for_refs_into_cset = oops_in_heap_closure != NULL; // If the card is no longer dirty, nothing to do. ! if (*card_ptr != G1CardTable::dirty_card_val()) { // No need to return that this card contains refs that point // into the collection set. return false; } // Construct the region representing the card. ! HeapWord* start = _ct->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); // This check is needed for some uncommon cases where we should // ignore the card.
*** 621,631 **** if (card_ptr == NULL) { // There was no eviction. Nothing to do. return false; } else if (card_ptr != orig_card_ptr) { // Original card was inserted and an old card was evicted. ! start = _ct_bs->addr_for(card_ptr); r = _g1->heap_region_containing(start); // Check whether the region formerly in the cache should be // ignored, as discussed earlier for the original card. The // region could have been freed while in the cache. The cset is --- 622,632 ---- if (card_ptr == NULL) { // There was no eviction. Nothing to do. return false; } else if (card_ptr != orig_card_ptr) { // Original card was inserted and an old card was evicted. ! start = _ct->addr_for(card_ptr); r = _g1->heap_region_containing(start); // Check whether the region formerly in the cache should be // ignored, as discussed earlier for the original card. The // region could have been freed while in the cache. The cset is
*** 662,672 **** } // Okay to clean and process the card now. There are still some // stale card cases that may be detected by iteration and dealt with // as iteration failure. ! *const_cast<volatile jbyte*>(card_ptr) = CardTableModRefBS::clean_card_val(); // This fence serves two purposes. First, the card must be cleaned // before processing the contents. Second, we can't proceed with // processing until after the read of top, for synchronization with // possibly concurrent humongous object allocation. It's okay that --- 663,673 ---- } // Okay to clean and process the card now. There are still some // stale card cases that may be detected by iteration and dealt with // as iteration failure. ! *const_cast<volatile jbyte*>(card_ptr) = G1CardTable::clean_card_val(); // This fence serves two purposes. First, the card must be cleaned // before processing the contents. Second, we can't proceed with // processing until after the read of top, for synchronization with // possibly concurrent humongous object allocation. It's okay that
*** 674,684 **** // both set, in any order, to proceed. OrderAccess::fence(); // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. ! HeapWord* end = start + CardTableModRefBS::card_size_in_words; MemRegion dirty_region(start, MIN2(scan_limit, end)); assert(!dirty_region.is_empty(), "sanity"); G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, oops_in_heap_closure, --- 675,685 ---- // both set, in any order, to proceed. OrderAccess::fence(); // Don't use addr_for(card_ptr + 1) which can ask for // a card beyond the heap. ! HeapWord* end = start + G1CardTable::card_size_in_words; MemRegion dirty_region(start, MIN2(scan_limit, end)); assert(!dirty_region.is_empty(), "sanity"); G1UpdateRSOrPushRefOopClosure update_rs_oop_cl(_g1, oops_in_heap_closure,
*** 697,712 **** // this we could incorrectly discard a non-stale card. if (!card_processed) { assert(!_g1->is_gc_active(), "Unparsable heap during GC"); // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) ! if (*card_ptr != CardTableModRefBS::dirty_card_val()) { ! *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = ! JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { _conc_refine_cards++; } --- 698,713 ---- // this we could incorrectly discard a non-stale card. if (!card_processed) { assert(!_g1->is_gc_active(), "Unparsable heap during GC"); // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) ! if (*card_ptr != G1CardTable::dirty_card_val()) { ! *card_ptr = G1CardTable::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = ! G1BarrierSet::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { _conc_refine_cards++; }
*** 761,771 **** (VerifyBeforeGC || VerifyAfterGC) && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { cleanupHRRS(); _g1->set_refine_cte_cl_concurrency(false); if (SafepointSynchronize::is_at_safepoint()) { ! DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set(); dcqs.concatenate_logs(); } bool use_hot_card_cache = _hot_card_cache->use_cache(); _hot_card_cache->set_use_cache(false); --- 762,772 ---- (VerifyBeforeGC || VerifyAfterGC) && (!_g1->collector_state()->full_collection() || G1VerifyRSetsDuringFullGC)) { cleanupHRRS(); _g1->set_refine_cte_cl_concurrency(false); if (SafepointSynchronize::is_at_safepoint()) { ! DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set(); dcqs.concatenate_logs(); } bool use_hot_card_cache = _hot_card_cache->use_cache(); _hot_card_cache->set_use_cache(false);
*** 773,783 **** DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); update_rem_set(&into_cset_dcq, NULL, 0); _into_cset_dirty_card_queue_set.clear(); _hot_card_cache->set_use_cache(use_hot_card_cache); ! assert(JavaThread::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } } void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { _card_live_data.create(workers, mark_bitmap); --- 774,784 ---- DirtyCardQueue into_cset_dcq(&_into_cset_dirty_card_queue_set); update_rem_set(&into_cset_dcq, NULL, 0); _into_cset_dirty_card_queue_set.clear(); _hot_card_cache->set_use_cache(use_hot_card_cache); ! assert(G1BarrierSet::dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed"); } } void G1RemSet::create_card_live_data(WorkGang* workers, G1CMBitMap* mark_bitmap) { _card_live_data.create(workers, mark_bitmap);
< prev index next >