1445 g1_rem_set()->cleanupHRRS();
1446 tear_down_region_lists();
1447
1448 // We may have added regions to the current incremental collection
1449 // set between the last GC or pause and now. We need to clear the
1450 // incremental collection set and then start rebuilding it afresh
1451 // after this full GC.
1452 abandon_collection_set(g1_policy()->inc_cset_head());
1453 g1_policy()->clear_incremental_cset();
1454 g1_policy()->stop_incremental_cset_building();
1455
1456 if (g1_policy()->in_young_gc_mode()) {
1457 empty_young_list();
1458 g1_policy()->set_full_young_gcs(true);
1459 }
1460
1461 // See the comment in G1CollectedHeap::ref_processing_init() about
1462 // how reference processing currently works in G1.
1463
1464 // Temporarily make reference _discovery_ single threaded (non-MT).
1465 ReferenceProcessorMTMutator rp_disc_ser(ref_processor(), false);
1466
1467 // Temporarily make refs discovery atomic
1468 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
1469
1470 // Temporarily clear _is_alive_non_header
1471 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
1472
1473 ref_processor()->enable_discovery();
1474 ref_processor()->setup_policy(do_clear_all_soft_refs);
1475
1476 // Do collection work
1477 {
1478 HandleMark hm; // Discard invalid handles created during gc
1479 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
1480 }
1481 assert(free_regions() == 0, "we should not have added any free regions");
1482 rebuild_region_lists();
1483
1484 _summary_bytes_used = recalculate_used();
1485
2202 // Reference processing in G1 currently works as follows:
2203 //
2204 // * There is only one reference processor instance that
2205 // 'spans' the entire heap. It is created by the code
2206 // below.
2207 // * Reference discovery is not enabled during an incremental
2208 // pause (see 6484982).
2209 // * Discoverered refs are not enqueued nor are they processed
2210 // during an incremental pause (see 6484982).
2211 // * Reference discovery is enabled at initial marking.
2212 // * Reference discovery is disabled and the discovered
2213 // references processed etc during remarking.
2214 // * Reference discovery is MT (see below).
2215 // * Reference discovery requires a barrier (see below).
2216 // * Reference processing is currently not MT (see 6608385).
2217 // * A full GC enables (non-MT) reference discovery and
2218 // processes any discovered references.
2219
2220 SharedHeap::ref_processing_init();
2221 MemRegion mr = reserved_region();
2222 _ref_processor = ReferenceProcessor::create_ref_processor(
2223 mr, // span
2224 false, // Reference discovery is not atomic
2225 true, // mt_discovery
2226 &_is_alive_closure, // is alive closure
2227 // for efficiency
2228 ParallelGCThreads,
2229 ParallelRefProcEnabled,
2230 true); // Setting next fields of discovered
2231 // lists requires a barrier.
2232 }
2233
2234 size_t G1CollectedHeap::capacity() const {
2235 return _g1_committed.byte_size();
2236 }
2237
2238 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2239 DirtyCardQueue* into_cset_dcq,
2240 bool concurrent,
2241 int worker_i) {
2242 // Clean cards in the hot card cache
2243 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2244
2245 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2246 int n_completed_buffers = 0;
2247 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2248 n_completed_buffers++;
2249 }
|
1445 g1_rem_set()->cleanupHRRS();
1446 tear_down_region_lists();
1447
1448 // We may have added regions to the current incremental collection
1449 // set between the last GC or pause and now. We need to clear the
1450 // incremental collection set and then start rebuilding it afresh
1451 // after this full GC.
1452 abandon_collection_set(g1_policy()->inc_cset_head());
1453 g1_policy()->clear_incremental_cset();
1454 g1_policy()->stop_incremental_cset_building();
1455
1456 if (g1_policy()->in_young_gc_mode()) {
1457 empty_young_list();
1458 g1_policy()->set_full_young_gcs(true);
1459 }
1460
1461 // See the comment in G1CollectedHeap::ref_processing_init() about
1462 // how reference processing currently works in G1.
1463
1464 // Temporarily make reference _discovery_ single threaded (non-MT).
1465 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
1466
1467 // Temporarily make refs discovery atomic
1468 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
1469
1470 // Temporarily clear _is_alive_non_header
1471 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
1472
1473 ref_processor()->enable_discovery();
1474 ref_processor()->setup_policy(do_clear_all_soft_refs);
1475
1476 // Do collection work
1477 {
1478 HandleMark hm; // Discard invalid handles created during gc
1479 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
1480 }
1481 assert(free_regions() == 0, "we should not have added any free regions");
1482 rebuild_region_lists();
1483
1484 _summary_bytes_used = recalculate_used();
1485
2202 // Reference processing in G1 currently works as follows:
2203 //
2204 // * There is only one reference processor instance that
2205 // 'spans' the entire heap. It is created by the code
2206 // below.
2207 // * Reference discovery is not enabled during an incremental
2208 // pause (see 6484982).
2209 // * Discoverered refs are not enqueued nor are they processed
2210 // during an incremental pause (see 6484982).
2211 // * Reference discovery is enabled at initial marking.
2212 // * Reference discovery is disabled and the discovered
2213 // references processed etc during remarking.
2214 // * Reference discovery is MT (see below).
2215 // * Reference discovery requires a barrier (see below).
2216 // * Reference processing is currently not MT (see 6608385).
2217 // * A full GC enables (non-MT) reference discovery and
2218 // processes any discovered references.
2219
2220 SharedHeap::ref_processing_init();
2221 MemRegion mr = reserved_region();
2222 _ref_processor =
2223 new ReferenceProcessor(mr, // span
2224 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
2225 ParallelGCThreads, // degree of mt processing
2226 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery
2227 MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
2228 false, // Reference discovery is not atomic
2229 &_is_alive_closure, // is alive closure for efficiency
2230 true); // Setting next fields of discovered
2231 // lists requires a barrier.
2232 }
2233
2234 size_t G1CollectedHeap::capacity() const {
2235 return _g1_committed.byte_size();
2236 }
2237
2238 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2239 DirtyCardQueue* into_cset_dcq,
2240 bool concurrent,
2241 int worker_i) {
2242 // Clean cards in the hot card cache
2243 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2244
2245 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2246 int n_completed_buffers = 0;
2247 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2248 n_completed_buffers++;
2249 }
|