< prev index next >

src/hotspot/share/gc/g1/g1RemSet.cpp

Print this page
rev 56302 : [mq]: 8231189-worker_i-renamings


 822 
 823   Tickspan _rem_set_opt_root_scan_time;
 824   Tickspan _rem_set_opt_trim_partially_time;
 825 
 826   void scan_opt_rem_set_roots(HeapRegion* r) {
 827     EventGCPhaseParallel event;
 828 
 829     G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
 830 
 831     G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss);
 832     G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl);
 833     _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops());
 834     _opt_refs_memory_used += opt_rem_set_list->used_memory();
 835 
 836     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
 837   }
 838 
 839 public:
 840   G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state,
 841                                    G1ParScanThreadState* pss,
 842                                    uint worker_i,
 843                                    G1GCPhaseTimes::GCParPhases scan_phase,
 844                                    G1GCPhaseTimes::GCParPhases code_roots_phase) :
 845     _pss(pss),
 846     _scan_state(scan_state),
 847     _scan_phase(scan_phase),
 848     _code_roots_phase(code_roots_phase),
 849     _worker_id(worker_i),
 850     _opt_refs_scanned(0),
 851     _opt_refs_memory_used(0),
 852     _strong_code_root_scan_time(),
 853     _strong_code_trim_partially_time(),
 854     _rem_set_opt_root_scan_time(),
 855     _rem_set_opt_trim_partially_time() { }
 856 
 857   bool do_heap_region(HeapRegion* r) {
 858     uint const region_idx = r->hrm_index();
 859 
 860     // The individual references for the optional remembered set are per-worker, so we
 861     // always need to scan them.
 862     if (r->has_index_in_opt_cset()) {
 863       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time);
 864       scan_opt_rem_set_roots(r);
 865     }
 866 
 867     if (_scan_state->claim_collection_set_region(region_idx)) {
 868       EventGCPhaseParallel event;
 869 


1044       return false;
1045     }
1046 
1047     size_t merged_sparse() const { return _cl.merged_sparse(); }
1048     size_t merged_fine() const { return _cl.merged_fine(); }
1049     size_t merged_coarse() const { return _cl.merged_coarse(); }
1050   };
1051 
1052   // Visitor for the log buffer entries to merge them into the card table.
1053   class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1054     G1RemSetScanState* _scan_state;
1055     G1CardTable* _ct;
1056 
1057     size_t _cards_dirty;
1058     size_t _cards_skipped;
1059   public:
1060     G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1061       _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1062     {}
1063 
1064     void do_card_ptr(CardValue* card_ptr, uint worker_i) {
1065       // The only time we care about recording cards that
1066       // contain references that point into the collection set
1067       // is during RSet updating within an evacuation pause.
1068       // In this case worker_id should be the id of a GC worker thread.
1069       assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1070 
1071       uint const region_idx = _ct->region_idx_for(card_ptr);
1072 
1073       // The second clause must come after - the log buffers might contain cards to uncommited
1074       // regions.
1075       // This code may count duplicate entries in the log buffers (even if rare) multiple
1076       // times.
1077       if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) {
1078         _scan_state->add_dirty_region(region_idx);
1079         _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr));
1080         _cards_dirty++;
1081       } else {
1082         // We may have had dirty cards in the (initial) collection set (or the
1083         // young regions which are always in the initial collection set). We do
1084         // not fix their cards here: we already added these regions to the set of


1246 
1247   // Set all cards back to clean.
1248   double start = os::elapsedTime();
1249   _scan_state->cleanup(_g1h->workers());
1250   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
1251 }
1252 
1253 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
1254 #ifdef ASSERT
1255   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1256   assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
1257          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
1258          p2i(card_ptr),
1259          ct->index_for(ct->addr_for(card_ptr)),
1260          p2i(ct->addr_for(card_ptr)),
1261          g1h->addr_to_region(ct->addr_for(card_ptr)));
1262 #endif
1263 }
1264 
1265 void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
1266                                         uint worker_i) {
1267   assert(!_g1h->is_gc_active(), "Only call concurrently");
1268 
1269   // Construct the region representing the card.
1270   HeapWord* start = _ct->addr_for(card_ptr);
1271   // And find the region containing it.
1272   HeapRegion* r = _g1h->heap_region_containing_or_null(start);
1273 
1274   // If this is a (stale) card into an uncommitted region, exit.
1275   if (r == NULL) {
1276     return;
1277   }
1278 
1279   check_card_ptr(card_ptr, _ct);
1280 
1281   // If the card is no longer dirty, nothing to do.
1282   if (*card_ptr != G1CardTable::dirty_card_val()) {
1283     return;
1284   }
1285 
1286   // This check is needed for some uncommon cases where we should


1358 
1359   // Okay to clean and process the card now.  There are still some
1360   // stale card cases that may be detected by iteration and dealt with
1361   // as iteration failure.
1362   *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
1363 
1364   // This fence serves two purposes.  First, the card must be cleaned
1365   // before processing the contents.  Second, we can't proceed with
1366   // processing until after the read of top, for synchronization with
1367   // possibly concurrent humongous object allocation.  It's okay that
1368   // reading top and reading type were racy wrto each other.  We need
1369   // both set, in any order, to proceed.
1370   OrderAccess::fence();
1371 
1372   // Don't use addr_for(card_ptr + 1) which can ask for
1373   // a card beyond the heap.
1374   HeapWord* end = start + G1CardTable::card_size_in_words;
1375   MemRegion dirty_region(start, MIN2(scan_limit, end));
1376   assert(!dirty_region.is_empty(), "sanity");
1377 
1378   G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_i);
1379   if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
1380     _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
1381     return;
1382   }
1383 
1384   // If unable to process the card then we encountered an unparsable
1385   // part of the heap (e.g. a partially allocated object, so only
1386   // temporarily a problem) while processing a stale card.  Despite
1387   // the card being stale, we can't simply ignore it, because we've
1388   // already marked the card cleaned, so taken responsibility for
1389   // ensuring the card gets scanned.
1390   //
1391   // However, the card might have gotten re-dirtied and re-enqueued
1392   // while we worked.  (In fact, it's pretty likely.)
1393   if (*card_ptr == G1CardTable::dirty_card_val()) {
1394     return;
1395   }
1396 
1397   // Re-dirty the card and enqueue in the *shared* queue.  Can't use
1398   // the thread-local queue, because that might be the queue that is




 822 
 823   Tickspan _rem_set_opt_root_scan_time;
 824   Tickspan _rem_set_opt_trim_partially_time;
 825 
 826   void scan_opt_rem_set_roots(HeapRegion* r) {
 827     EventGCPhaseParallel event;
 828 
 829     G1OopStarChunkedList* opt_rem_set_list = _pss->oops_into_optional_region(r);
 830 
 831     G1ScanCardClosure scan_cl(G1CollectedHeap::heap(), _pss);
 832     G1ScanRSForOptionalClosure cl(G1CollectedHeap::heap(), &scan_cl);
 833     _opt_refs_scanned += opt_rem_set_list->oops_do(&cl, _pss->closures()->strong_oops());
 834     _opt_refs_memory_used += opt_rem_set_list->used_memory();
 835 
 836     event.commit(GCId::current(), _worker_id, G1GCPhaseTimes::phase_name(_scan_phase));
 837   }
 838 
 839 public:
 840   G1ScanCollectionSetRegionClosure(G1RemSetScanState* scan_state,
 841                                    G1ParScanThreadState* pss,
 842                                    uint worker_id,
 843                                    G1GCPhaseTimes::GCParPhases scan_phase,
 844                                    G1GCPhaseTimes::GCParPhases code_roots_phase) :
 845     _pss(pss),
 846     _scan_state(scan_state),
 847     _scan_phase(scan_phase),
 848     _code_roots_phase(code_roots_phase),
 849     _worker_id(worker_id),
 850     _opt_refs_scanned(0),
 851     _opt_refs_memory_used(0),
 852     _strong_code_root_scan_time(),
 853     _strong_code_trim_partially_time(),
 854     _rem_set_opt_root_scan_time(),
 855     _rem_set_opt_trim_partially_time() { }
 856 
 857   bool do_heap_region(HeapRegion* r) {
 858     uint const region_idx = r->hrm_index();
 859 
 860     // The individual references for the optional remembered set are per-worker, so we
 861     // always need to scan them.
 862     if (r->has_index_in_opt_cset()) {
 863       G1EvacPhaseWithTrimTimeTracker timer(_pss, _rem_set_opt_root_scan_time, _rem_set_opt_trim_partially_time);
 864       scan_opt_rem_set_roots(r);
 865     }
 866 
 867     if (_scan_state->claim_collection_set_region(region_idx)) {
 868       EventGCPhaseParallel event;
 869 


1044       return false;
1045     }
1046 
1047     size_t merged_sparse() const { return _cl.merged_sparse(); }
1048     size_t merged_fine() const { return _cl.merged_fine(); }
1049     size_t merged_coarse() const { return _cl.merged_coarse(); }
1050   };
1051 
1052   // Visitor for the log buffer entries to merge them into the card table.
1053   class G1MergeLogBufferCardsClosure : public G1CardTableEntryClosure {
1054     G1RemSetScanState* _scan_state;
1055     G1CardTable* _ct;
1056 
1057     size_t _cards_dirty;
1058     size_t _cards_skipped;
1059   public:
1060     G1MergeLogBufferCardsClosure(G1CollectedHeap* g1h, G1RemSetScanState* scan_state) :
1061       _scan_state(scan_state), _ct(g1h->card_table()), _cards_dirty(0), _cards_skipped(0)
1062     {}
1063 
1064     void do_card_ptr(CardValue* card_ptr, uint worker_id) {
1065       // The only time we care about recording cards that
1066       // contain references that point into the collection set
1067       // is during RSet updating within an evacuation pause.
1068       // In this case worker_id should be the id of a GC worker thread.
1069       assert(SafepointSynchronize::is_at_safepoint(), "not during an evacuation pause");
1070 
1071       uint const region_idx = _ct->region_idx_for(card_ptr);
1072 
1073       // The second clause must come after - the log buffers might contain cards to uncommited
1074       // regions.
1075       // This code may count duplicate entries in the log buffers (even if rare) multiple
1076       // times.
1077       if (_scan_state->contains_cards_to_process(region_idx) && (*card_ptr == G1CardTable::dirty_card_val())) {
1078         _scan_state->add_dirty_region(region_idx);
1079         _scan_state->set_chunk_dirty(_ct->index_for_cardvalue(card_ptr));
1080         _cards_dirty++;
1081       } else {
1082         // We may have had dirty cards in the (initial) collection set (or the
1083         // young regions which are always in the initial collection set). We do
1084         // not fix their cards here: we already added these regions to the set of


1246 
1247   // Set all cards back to clean.
1248   double start = os::elapsedTime();
1249   _scan_state->cleanup(_g1h->workers());
1250   phase_times->record_clear_ct_time((os::elapsedTime() - start) * 1000.0);
1251 }
1252 
1253 inline void check_card_ptr(CardTable::CardValue* card_ptr, G1CardTable* ct) {
1254 #ifdef ASSERT
1255   G1CollectedHeap* g1h = G1CollectedHeap::heap();
1256   assert(g1h->is_in_exact(ct->addr_for(card_ptr)),
1257          "Card at " PTR_FORMAT " index " SIZE_FORMAT " representing heap at " PTR_FORMAT " (%u) must be in committed heap",
1258          p2i(card_ptr),
1259          ct->index_for(ct->addr_for(card_ptr)),
1260          p2i(ct->addr_for(card_ptr)),
1261          g1h->addr_to_region(ct->addr_for(card_ptr)));
1262 #endif
1263 }
1264 
1265 void G1RemSet::refine_card_concurrently(CardValue* card_ptr,
1266                                         uint worker_id) {
1267   assert(!_g1h->is_gc_active(), "Only call concurrently");
1268 
1269   // Construct the region representing the card.
1270   HeapWord* start = _ct->addr_for(card_ptr);
1271   // And find the region containing it.
1272   HeapRegion* r = _g1h->heap_region_containing_or_null(start);
1273 
1274   // If this is a (stale) card into an uncommitted region, exit.
1275   if (r == NULL) {
1276     return;
1277   }
1278 
1279   check_card_ptr(card_ptr, _ct);
1280 
1281   // If the card is no longer dirty, nothing to do.
1282   if (*card_ptr != G1CardTable::dirty_card_val()) {
1283     return;
1284   }
1285 
1286   // This check is needed for some uncommon cases where we should


1358 
1359   // Okay to clean and process the card now.  There are still some
1360   // stale card cases that may be detected by iteration and dealt with
1361   // as iteration failure.
1362   *const_cast<volatile CardValue*>(card_ptr) = G1CardTable::clean_card_val();
1363 
1364   // This fence serves two purposes.  First, the card must be cleaned
1365   // before processing the contents.  Second, we can't proceed with
1366   // processing until after the read of top, for synchronization with
1367   // possibly concurrent humongous object allocation.  It's okay that
1368   // reading top and reading type were racy wrto each other.  We need
1369   // both set, in any order, to proceed.
1370   OrderAccess::fence();
1371 
1372   // Don't use addr_for(card_ptr + 1) which can ask for
1373   // a card beyond the heap.
1374   HeapWord* end = start + G1CardTable::card_size_in_words;
1375   MemRegion dirty_region(start, MIN2(scan_limit, end));
1376   assert(!dirty_region.is_empty(), "sanity");
1377 
1378   G1ConcurrentRefineOopClosure conc_refine_cl(_g1h, worker_id);
1379   if (r->oops_on_memregion_seq_iterate_careful<false>(dirty_region, &conc_refine_cl) != NULL) {
1380     _num_conc_refined_cards++; // Unsynchronized update, only used for logging.
1381     return;
1382   }
1383 
1384   // If unable to process the card then we encountered an unparsable
1385   // part of the heap (e.g. a partially allocated object, so only
1386   // temporarily a problem) while processing a stale card.  Despite
1387   // the card being stale, we can't simply ignore it, because we've
1388   // already marked the card cleaned, so taken responsibility for
1389   // ensuring the card gets scanned.
1390   //
1391   // However, the card might have gotten re-dirtied and re-enqueued
1392   // while we worked.  (In fact, it's pretty likely.)
1393   if (*card_ptr == G1CardTable::dirty_card_val()) {
1394     return;
1395   }
1396 
1397   // Re-dirty the card and enqueue in the *shared* queue.  Can't use
1398   // the thread-local queue, because that might be the queue that is


< prev index next >