25 #include "precompiled.hpp"
26 #include "code/icBuffer.hpp"
27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
35 #include "gc_implementation/g1/g1MarkSweep.hpp"
36 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
37 #include "gc_implementation/g1/g1RemSet.inline.hpp"
38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
39 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
40 #include "gc_implementation/g1/vm_operations_g1.hpp"
41 #include "gc_implementation/shared/isGCActiveMark.hpp"
42 #include "memory/gcLocker.inline.hpp"
43 #include "memory/genOopClosures.inline.hpp"
44 #include "memory/generationSpec.hpp"
45 #include "oops/oop.inline.hpp"
46 #include "oops/oop.pcgc.inline.hpp"
47 #include "runtime/aprofiler.hpp"
48 #include "runtime/vmThread.hpp"
49
50 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
51
52 // turn it on so that the contents of the young list (scan-only /
53 // to-be-collected) are printed at "strategic" points before / during
54 // / after the collection --- this is useful for debugging
55 #define YOUNG_LIST_VERBOSE 0
56 // CURRENT STATUS
57 // This file is under construction. Search for "FIXME".
58
59 // INVARIANTS/NOTES
60 //
61 // All allocation activity covered by the G1CollectedHeap interface is
62 // serialized by acquiring the HeapLock. This happens in mem_allocate
63 // and allocate_new_tlab, which are the "entry" points to the
64 // allocation code from the rest of the JVM. (Note that this does not
1227
1228 gc_prologue(true);
1229 increment_total_collections(true /* full gc */);
1230
1231 size_t g1h_prev_used = used();
1232 assert(used() == recalculate_used(), "Should be equal");
1233
1234 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
1235 HandleMark hm; // Discard invalid handles created during verification
1236 gclog_or_tty->print(" VerifyBeforeGC:");
1237 prepare_for_verify();
1238 Universe::verify(/* allow dirty */ true,
1239 /* silent */ false,
1240 /* option */ VerifyOption_G1UsePrevMarking);
1241
1242 }
1243 pre_full_gc_dump();
1244
1245 COMPILER2_PRESENT(DerivedPointerTable::clear());
1246
1247 // We want to discover references, but not process them yet.
1248 // This mode is disabled in
1249 // instanceRefKlass::process_discovered_references if the
1250 // generation does some collection work, or
1251 // instanceRefKlass::enqueue_discovered_references if the
1252 // generation returns without doing any work.
1253 ref_processor()->disable_discovery();
1254 ref_processor()->abandon_partial_discovery();
1255 ref_processor()->verify_no_references_recorded();
1256
1257 // Abandon current iterations of concurrent marking and concurrent
1258 // refinement, if any are in progress.
1259 concurrent_mark()->abort();
1260
1261 // Make sure we'll choose a new allocation region afterwards.
1262 release_mutator_alloc_region();
1263 abandon_gc_alloc_regions();
1264 g1_rem_set()->cleanupHRRS();
1265 tear_down_region_lists();
1266
1267 // We should call this after we retire any currently active alloc
1268 // regions so that all the ALLOC / RETIRE events are generated
1269 // before the start GC event.
1270 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1271
1272 // We may have added regions to the current incremental collection
1273 // set between the last GC or pause and now. We need to clear the
1274 // incremental collection set and then start rebuilding it afresh
1275 // after this full GC.
1276 abandon_collection_set(g1_policy()->inc_cset_head());
1277 g1_policy()->clear_incremental_cset();
1278 g1_policy()->stop_incremental_cset_building();
1279
1280 empty_young_list();
1281 g1_policy()->set_full_young_gcs(true);
1282
1283 // See the comment in G1CollectedHeap::ref_processing_init() about
1284 // how reference processing currently works in G1.
1285
1286 // Temporarily make reference _discovery_ single threaded (non-MT).
1287 ReferenceProcessorMTDiscoveryMutator rp_disc_ser(ref_processor(), false);
1288
1289 // Temporarily make refs discovery atomic
1290 ReferenceProcessorAtomicMutator rp_disc_atomic(ref_processor(), true);
1291
1292 // Temporarily clear _is_alive_non_header
1293 ReferenceProcessorIsAliveMutator rp_is_alive_null(ref_processor(), NULL);
1294
1295 ref_processor()->enable_discovery();
1296 ref_processor()->setup_policy(do_clear_all_soft_refs);
1297 // Do collection work
1298 {
1299 HandleMark hm; // Discard invalid handles created during gc
1300 G1MarkSweep::invoke_at_safepoint(ref_processor(), do_clear_all_soft_refs);
1301 }
1302 assert(free_regions() == 0, "we should not have added any free regions");
1303 rebuild_region_lists();
1304
1305 _summary_bytes_used = recalculate_used();
1306
1307 ref_processor()->enqueue_discovered_references();
1308
1309 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1310
1311 MemoryService::track_memory_usage();
1312
1313 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
1314 HandleMark hm; // Discard invalid handles created during verification
1315 gclog_or_tty->print(" VerifyAfterGC:");
1316 prepare_for_verify();
1317 Universe::verify(/* allow dirty */ false,
1318 /* silent */ false,
1319 /* option */ VerifyOption_G1UsePrevMarking);
1320
1321 }
1322 NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1323
1324 reset_gc_time_stamp();
1325 // Since everything potentially moved, we will clear all remembered
1326 // sets, and clear all cards. Later we will rebuild remebered
1327 // sets. We will also reset the GC time stamps of the regions.
1328 PostMCRemSetClearClosure rs_clear(mr_bs());
1329 heap_region_iterate(&rs_clear);
1330
1331 // Resize the heap if necessary.
1332 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1333
1334 if (_hr_printer.is_active()) {
1335 // We should do this after we potentially resize the heap so
1336 // that all the COMMIT / UNCOMMIT events are generated before
1337 // the end GC event.
1338
1339 PostCompactionPrinterClosure cl(hr_printer());
1340 heap_region_iterate(&cl);
1341
1342 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1755 tear_down_region_lists(); // We will rebuild them in a moment.
1756 shrink_helper(shrink_bytes);
1757 rebuild_region_lists();
1758
1759 _hrs.verify_optional();
1760 verify_region_sets_optional();
1761 }
1762
1763 // Public methods.
1764
1765 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1766 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1767 #endif // _MSC_VER
1768
1769
1770 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1771 SharedHeap(policy_),
1772 _g1_policy(policy_),
1773 _dirty_card_queue_set(false),
1774 _into_cset_dirty_card_queue_set(false),
1775 _is_alive_closure(this),
1776 _ref_processor(NULL),
1777 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1778 _bot_shared(NULL),
1779 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1780 _evac_failure_scan_stack(NULL) ,
1781 _mark_in_progress(false),
1782 _cg1r(NULL), _summary_bytes_used(0),
1783 _refine_cte_cl(NULL),
1784 _full_collection(false),
1785 _free_list("Master Free List"),
1786 _secondary_free_list("Secondary Free List"),
1787 _humongous_set("Master Humongous Set"),
1788 _free_regions_coming(false),
1789 _young_list(new YoungList(this)),
1790 _gc_time_stamp(0),
1791 _retained_old_gc_alloc_region(NULL),
1792 _surviving_young_words(NULL),
1793 _full_collections_completed(0),
1794 _in_cset_fast_test(NULL),
1795 _in_cset_fast_test_base(NULL),
1796 _dirty_cards_region_list(NULL) {
2050 // require BOT updates or not and, if it doesn't, then a non-young
2051 // region will complain that it cannot support allocations without
2052 // BOT updates. So we'll tag the dummy region as young to avoid that.
2053 dummy_region->set_young();
2054 // Make sure it's full.
2055 dummy_region->set_top(dummy_region->end());
2056 G1AllocRegion::setup(this, dummy_region);
2057
2058 init_mutator_alloc_region();
2059
2060 // Do create of the monitoring and management support so that
2061 // values in the heap have been properly initialized.
2062 _g1mm = new G1MonitoringSupport(this, &_g1_storage);
2063
2064 return JNI_OK;
2065 }
2066
2067 void G1CollectedHeap::ref_processing_init() {
2068 // Reference processing in G1 currently works as follows:
2069 //
2070 // * There is only one reference processor instance that
2071 // 'spans' the entire heap. It is created by the code
2072 // below.
2073 // * Reference discovery is not enabled during an incremental
2074 // pause (see 6484982).
2075 // * Discoverered refs are not enqueued nor are they processed
2076 // during an incremental pause (see 6484982).
2077 // * Reference discovery is enabled at initial marking.
2078 // * Reference discovery is disabled and the discovered
2079 // references processed etc during remarking.
2080 // * Reference discovery is MT (see below).
2081 // * Reference discovery requires a barrier (see below).
2082 // * Reference processing is currently not MT (see 6608385).
2083 // * A full GC enables (non-MT) reference discovery and
2084 // processes any discovered references.
2085
2086 SharedHeap::ref_processing_init();
2087 MemRegion mr = reserved_region();
2088 _ref_processor =
2089 new ReferenceProcessor(mr, // span
2090 ParallelRefProcEnabled && (ParallelGCThreads > 1), // mt processing
2091 (int) ParallelGCThreads, // degree of mt processing
2092 ParallelGCThreads > 1 || ConcGCThreads > 1, // mt discovery
2093 (int) MAX2(ParallelGCThreads, ConcGCThreads), // degree of mt discovery
2094 false, // Reference discovery is not atomic
2095 &_is_alive_closure, // is alive closure for efficiency
2096 true); // Setting next fields of discovered
2097 // lists requires a barrier.
2098 }
2099
2100 size_t G1CollectedHeap::capacity() const {
2101 return _g1_committed.byte_size();
2102 }
2103
2104 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2105 DirtyCardQueue* into_cset_dcq,
2106 bool concurrent,
2107 int worker_i) {
2108 // Clean cards in the hot card cache
2109 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2110
2111 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2112 int n_completed_buffers = 0;
2113 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2114 n_completed_buffers++;
2115 }
2116 g1_policy()->record_update_rs_processed_buffers(worker_i,
3100 "not a garbage-first heap");
3101 return _g1h;
3102 }
3103
3104 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3105 // always_do_update_barrier = false;
3106 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3107 // Call allocation profiler
3108 AllocationProfiler::iterate_since_last_gc();
3109 // Fill TLAB's and such
3110 ensure_parsability(true);
3111 }
3112
3113 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3114 // FIXME: what is this about?
3115 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3116 // is set.
3117 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3118 "derived pointer present"));
3119 // always_do_update_barrier = true;
3120 }
3121
3122 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3123 unsigned int gc_count_before,
3124 bool* succeeded) {
3125 assert_heap_not_locked_and_not_at_safepoint();
3126 g1_policy()->record_stop_world_start();
3127 VM_G1IncCollectionPause op(gc_count_before,
3128 word_size,
3129 false, /* should_initiate_conc_mark */
3130 g1_policy()->max_pause_time_ms(),
3131 GCCause::_g1_inc_collection_pause);
3132 VMThread::execute(&op);
3133
3134 HeapWord* result = op.result();
3135 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3136 assert(result == NULL || ret_succeeded,
3137 "the result should be NULL if the VM did not succeed");
3138 *succeeded = ret_succeeded;
3139
3337
3338 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3339 IsGCActiveMark x;
3340
3341 gc_prologue(false);
3342 increment_total_collections(false /* full gc */);
3343 increment_gc_time_stamp();
3344
3345 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3346 HandleMark hm; // Discard invalid handles created during verification
3347 gclog_or_tty->print(" VerifyBeforeGC:");
3348 prepare_for_verify();
3349 Universe::verify(/* allow dirty */ false,
3350 /* silent */ false,
3351 /* option */ VerifyOption_G1UsePrevMarking);
3352
3353 }
3354
3355 COMPILER2_PRESENT(DerivedPointerTable::clear());
3356
3357 // Please see comment in G1CollectedHeap::ref_processing_init()
3358 // to see how reference processing currently works in G1.
3359 //
3360 // We want to turn off ref discovery, if necessary, and turn it back on
3361 // on again later if we do. XXX Dubious: why is discovery disabled?
3362 bool was_enabled = ref_processor()->discovery_enabled();
3363 if (was_enabled) ref_processor()->disable_discovery();
3364
3365 // Forget the current alloc region (we might even choose it to be part
3366 // of the collection set!).
3367 release_mutator_alloc_region();
3368
3369 // We should call this after we retire the mutator alloc
3370 // region(s) so that all the ALLOC / RETIRE events are generated
3371 // before the start GC event.
3372 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3373
3374 // The elapsed time induced by the start time below deliberately elides
3375 // the possible verification above.
3376 double start_time_sec = os::elapsedTime();
3377 size_t start_used_bytes = used();
3378
3379 #if YOUNG_LIST_VERBOSE
3380 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3381 _young_list->print();
3382 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3383 #endif // YOUNG_LIST_VERBOSE
3560 // stamp here we invalidate all the GC time stamps on all the
3561 // regions and saved_mark_word() will simply return top() for
3562 // all the regions. This is a nicer way of ensuring this rather
3563 // than iterating over the regions and fixing them. In fact, the
3564 // GC time stamp increment here also ensures that
3565 // saved_mark_word() will return top() between pauses, i.e.,
3566 // during concurrent refinement. So we don't need the
3567 // is_gc_active() check to decided which top to use when
3568 // scanning cards (see CR 7039627).
3569 increment_gc_time_stamp();
3570
3571 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
3572 HandleMark hm; // Discard invalid handles created during verification
3573 gclog_or_tty->print(" VerifyAfterGC:");
3574 prepare_for_verify();
3575 Universe::verify(/* allow dirty */ true,
3576 /* silent */ false,
3577 /* option */ VerifyOption_G1UsePrevMarking);
3578 }
3579
3580 if (was_enabled) ref_processor()->enable_discovery();
3581
3582 {
3583 size_t expand_bytes = g1_policy()->expansion_amount();
3584 if (expand_bytes > 0) {
3585 size_t bytes_before = capacity();
3586 // No need for an ergo verbose message here,
3587 // expansion_amount() does this when it returns a value > 0.
3588 if (!expand(expand_bytes)) {
3589 // We failed to expand the heap so let's verify that
3590 // committed/uncommitted amount match the backing store
3591 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3592 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3593 }
3594 }
3595 }
3596
3597 // We should do this after we potentially expand the heap so
3598 // that all the COMMIT events are generated before the end GC
3599 // event, and after we retire the GC alloc regions so that all
3600 // RETIRE events are generated before the end GC event.
3711 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
3712 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
3713 _retained_old_gc_alloc_region = NULL;
3714 }
3715
3716 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
3717 _drain_in_progress = false;
3718 set_evac_failure_closure(cl);
3719 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3720 }
3721
3722 void G1CollectedHeap::finalize_for_evac_failure() {
3723 assert(_evac_failure_scan_stack != NULL &&
3724 _evac_failure_scan_stack->length() == 0,
3725 "Postcondition");
3726 assert(!_drain_in_progress, "Postcondition");
3727 delete _evac_failure_scan_stack;
3728 _evac_failure_scan_stack = NULL;
3729 }
3730
3731 // *** Sequential G1 Evacuation
3732
3733 class G1IsAliveClosure: public BoolObjectClosure {
3734 G1CollectedHeap* _g1;
3735 public:
3736 G1IsAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3737 void do_object(oop p) { assert(false, "Do not call."); }
3738 bool do_object_b(oop p) {
3739 // It is reachable if it is outside the collection set, or is inside
3740 // and forwarded.
3741 return !_g1->obj_in_cs(p) || p->is_forwarded();
3742 }
3743 };
3744
3745 class G1KeepAliveClosure: public OopClosure {
3746 G1CollectedHeap* _g1;
3747 public:
3748 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
3749 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
3750 void do_oop( oop* p) {
3751 oop obj = *p;
3752 if (_g1->obj_in_cs(obj)) {
3753 assert( obj->is_forwarded(), "invariant" );
3754 *p = obj->forwardee();
3755 }
3756 }
3757 };
3758
3759 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
3760 private:
3761 G1CollectedHeap* _g1;
3762 DirtyCardQueue *_dcq;
3763 CardTableModRefBS* _ct_bs;
3764
3765 public:
3766 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
3767 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
3768
3769 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3770 virtual void do_oop( oop* p) { do_oop_work(p); }
3771 template <class T> void do_oop_work(T* p) {
3772 assert(_from->is_in_reserved(p), "paranoia");
3773 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
3774 !_from->is_survivor()) {
3775 size_t card_index = _ct_bs->index_for(p);
3776 if (_ct_bs->mark_card_deferred(card_index)) {
3777 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
3778 }
4158 assert(_g1h->obj_in_cs(p),
4159 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4160 } else {
4161 oop p = oopDesc::load_decode_heap_oop(ref);
4162 assert(_g1h->is_in_g1_reserved(p),
4163 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4164 }
4165 return true;
4166 }
4167
4168 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4169 if (ref.is_narrow()) {
4170 return verify_ref((narrowOop*) ref);
4171 } else {
4172 return verify_ref((oop*) ref);
4173 }
4174 }
4175 #endif // ASSERT
4176
4177 void G1ParScanThreadState::trim_queue() {
4178 StarTask ref;
4179 do {
4180 // Drain the overflow stack first, so other threads can steal.
4181 while (refs()->pop_overflow(ref)) {
4182 deal_with_reference(ref);
4183 }
4184 while (refs()->pop_local(ref)) {
4185 deal_with_reference(ref);
4186 }
4187 } while (!refs()->is_empty());
4188 }
4189
4190 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
4191 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4192 _par_scan_state(par_scan_state),
4193 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4194 _mark_in_progress(_g1->mark_in_progress()) { }
4195
4196 template <class T> void G1ParCopyHelper::mark_object(T* p) {
4197 // This is called from do_oop_work for objects that are not
4198 // in the collection set. Objects in the collection set
4199 // are marked after they have been evacuated.
4200
4201 T heap_oop = oopDesc::load_heap_oop(p);
4202 if (!oopDesc::is_null(heap_oop)) {
4203 oop obj = oopDesc::decode_heap_oop(heap_oop);
4484 _terminator(workers, _queues),
4485 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
4486 _n_workers(workers)
4487 {}
4488
4489 RefToScanQueueSet* queues() { return _queues; }
4490
4491 RefToScanQueue *work_queue(int i) {
4492 return queues()->queue(i);
4493 }
4494
4495 void work(int i) {
4496 if (i >= _n_workers) return; // no work needed this round
4497
4498 double start_time_ms = os::elapsedTime() * 1000.0;
4499 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
4500
4501 ResourceMark rm;
4502 HandleMark hm;
4503
4504 G1ParScanThreadState pss(_g1h, i);
4505 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss);
4506 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss);
4507 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss);
4508
4509 pss.set_evac_closure(&scan_evac_cl);
4510 pss.set_evac_failure_closure(&evac_failure_cl);
4511 pss.set_partial_scan_closure(&partial_scan_cl);
4512
4513 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss);
4514 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss);
4515 G1ParScanHeapRSClosure only_scan_heap_rs_cl(_g1h, &pss);
4516 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4517
4518 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss);
4519 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss);
4520 G1ParScanAndMarkHeapRSClosure scan_mark_heap_rs_cl(_g1h, &pss);
4521
4522 OopsInHeapRegionClosure *scan_root_cl;
4523 OopsInHeapRegionClosure *scan_perm_cl;
4524
4525 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4526 scan_root_cl = &scan_mark_root_cl;
4527 scan_perm_cl = &scan_mark_perm_cl;
4528 } else {
4529 scan_root_cl = &only_scan_root_cl;
4530 scan_perm_cl = &only_scan_perm_cl;
4531 }
4532
4533 pss.start_strong_roots();
4534 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4535 SharedHeap::SO_AllClasses,
4536 scan_root_cl,
4537 &push_heap_rs_cl,
4538 scan_perm_cl,
4539 i);
4540 pss.end_strong_roots();
4541
4542 {
4543 double start = os::elapsedTime();
4544 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4545 evac.do_void();
4546 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4547 double term_ms = pss.term_time()*1000.0;
4548 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4549 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
4550 }
4551 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4552 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4560 }
4561
4562 assert(pss.refs()->is_empty(), "should be empty");
4563 double end_time_ms = os::elapsedTime() * 1000.0;
4564 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
4565 }
4566 };
4567
4568 // *** Common G1 Evacuation Stuff
4569
4570 // This method is run in a GC worker.
4571
4572 void
4573 G1CollectedHeap::
4574 g1_process_strong_roots(bool collecting_perm_gen,
4575 SharedHeap::ScanningOption so,
4576 OopClosure* scan_non_heap_roots,
4577 OopsInHeapRegionClosure* scan_rs,
4578 OopsInGenClosure* scan_perm,
4579 int worker_i) {
4580 // First scan the strong roots, including the perm gen.
4581 double ext_roots_start = os::elapsedTime();
4582 double closure_app_time_sec = 0.0;
4583
4584 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4585 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4586 buf_scan_perm.set_generation(perm_gen());
4587
4588 // Walk the code cache w/o buffering, because StarTask cannot handle
4589 // unaligned oop locations.
4590 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
4591
4592 process_strong_roots(false, // no scoping; this is parallel code
4593 collecting_perm_gen, so,
4594 &buf_scan_non_heap_roots,
4595 &eager_scan_code_roots,
4596 &buf_scan_perm);
4597
4598 // Now the ref_processor roots.
4599 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4600 // We need to treat the discovered reference lists as roots and
4601 // keep entries (which are added by the marking threads) on them
4602 // live until they can be processed at the end of marking.
4603 ref_processor()->weak_oops_do(&buf_scan_non_heap_roots);
4604 }
4605
4606 // Finish up any enqueued closure apps (attributed as object copy time).
4607 buf_scan_non_heap_roots.done();
4608 buf_scan_perm.done();
4609
4610 double ext_roots_end = os::elapsedTime();
4611
4612 g1_policy()->reset_obj_copy_time(worker_i);
4613 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4614 buf_scan_non_heap_roots.closure_app_seconds();
4615 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4616
4617 double ext_root_time_ms =
4618 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4619
4620 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4621
4622 // Scan strong roots in mark stack.
4623 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
4624 concurrent_mark()->oops_do(scan_non_heap_roots);
4625 }
4626 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4627 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
4628
4629 // Now scan the complement of the collection set.
4630 if (scan_rs != NULL) {
4631 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4632 }
4633
4634 _process_strong_tasks->all_tasks_completed();
4635 }
4636
4637 void
4638 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4639 OopClosure* non_root_closure) {
4640 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4641 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4642 }
4643
4644 void G1CollectedHeap::evacuate_collection_set() {
4645 set_evacuation_failed(false);
4646
4647 g1_rem_set()->prepare_for_oops_into_collection_set_do();
4648 concurrent_g1_refine()->set_use_cache(false);
4649 concurrent_g1_refine()->clear_hot_cache_claimed_index();
4650
4651 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
4652 set_par_threads(n_workers);
4653 G1ParTask g1_par_task(this, n_workers, _task_queues);
4654
4655 init_for_evac_failure(NULL);
4656
4657 rem_set()->prepare_for_younger_refs_iterate(true);
4658
4659 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
4660 double start_par = os::elapsedTime();
4661 if (G1CollectedHeap::use_parallel_gc_threads()) {
4662 // The individual threads will set their evac-failure closures.
4663 StrongRootsScope srs(this);
4664 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
4665 workers()->run_task(&g1_par_task);
4666 } else {
4667 StrongRootsScope srs(this);
4668 g1_par_task.work(0);
4669 }
4670
4671 double par_time = (os::elapsedTime() - start_par) * 1000.0;
4672 g1_policy()->record_par_time(par_time);
4673 set_par_threads(0);
4674
4675 // Weak root processing.
4676 // Note: when JSR 292 is enabled and code blobs can contain
4677 // non-perm oops then we will need to process the code blobs
4678 // here too.
4679 {
4680 G1IsAliveClosure is_alive(this);
4681 G1KeepAliveClosure keep_alive(this);
4682 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
4683 }
4684 release_gc_alloc_regions();
4685 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
4686
4687 concurrent_g1_refine()->clear_hot_cache();
4688 concurrent_g1_refine()->set_use_cache(true);
4689
4690 finalize_for_evac_failure();
4691
4692 // Must do this before removing self-forwarding pointers, which clears
4693 // the per-region evac-failure flags.
4694 concurrent_mark()->complete_marking_in_collection_set();
4695
4696 if (evacuation_failed()) {
4697 remove_self_forwarding_pointers();
4698 if (PrintGCDetails) {
4699 gclog_or_tty->print(" (to-space overflow)");
4700 } else if (PrintGC) {
4701 gclog_or_tty->print("--");
4702 }
4703 }
4704
4705 if (G1DeferredRSUpdate) {
4706 RedirtyLoggedCardTableEntryFastClosure redirty;
4707 dirty_card_queue_set().set_closure(&redirty);
4708 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
4709
4710 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
4711 dcq.merge_bufferlists(&dirty_card_queue_set());
4712 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
4713 }
4714 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
4715 }
4716
4717 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
4718 size_t* pre_used,
4719 FreeRegionList* free_list,
4720 HumongousRegionSet* humongous_proxy_set,
4721 HRRSCleanupTask* hrrs_cleanup_task,
4722 bool par) {
4723 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
4724 if (hr->isHumongous()) {
4885 G1ParCleanupCTTask cleanup_task(ct_bs, this);
4886
4887 if (ParallelGCThreads > 0) {
4888 set_par_threads(workers()->total_workers());
4889 workers()->run_task(&cleanup_task);
4890 set_par_threads(0);
4891 } else {
4892 while (_dirty_cards_region_list) {
4893 HeapRegion* r = _dirty_cards_region_list;
4894 cleanup_task.clear_cards(r);
4895 _dirty_cards_region_list = r->get_next_dirty_cards_region();
4896 if (_dirty_cards_region_list == r) {
4897 // The last region.
4898 _dirty_cards_region_list = NULL;
4899 }
4900 r->set_next_dirty_cards_region(NULL);
4901 }
4902 }
4903
4904 double elapsed = os::elapsedTime() - start;
4905 g1_policy()->record_clear_ct_time( elapsed * 1000.0);
4906 #ifndef PRODUCT
4907 if (G1VerifyCTCleanup || VerifyAfterGC) {
4908 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
4909 heap_region_iterate(&cleanup_verifier);
4910 }
4911 #endif
4912 }
4913
4914 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
4915 size_t pre_used = 0;
4916 FreeRegionList local_free_list("Local List for CSet Freeing");
4917
4918 double young_time_ms = 0.0;
4919 double non_young_time_ms = 0.0;
4920
4921 // Since the collection set is a superset of the the young list,
4922 // all we need to do to clear the young list is clear its
4923 // head and length, and unlink any young regions in the code below
4924 _young_list->clear();
4925
|
25 #include "precompiled.hpp"
26 #include "code/icBuffer.hpp"
27 #include "gc_implementation/g1/bufferingOopClosure.hpp"
28 #include "gc_implementation/g1/concurrentG1Refine.hpp"
29 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
30 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
31 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
32 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
33 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
34 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
35 #include "gc_implementation/g1/g1MarkSweep.hpp"
36 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
37 #include "gc_implementation/g1/g1RemSet.inline.hpp"
38 #include "gc_implementation/g1/heapRegionRemSet.hpp"
39 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
40 #include "gc_implementation/g1/vm_operations_g1.hpp"
41 #include "gc_implementation/shared/isGCActiveMark.hpp"
42 #include "memory/gcLocker.inline.hpp"
43 #include "memory/genOopClosures.inline.hpp"
44 #include "memory/generationSpec.hpp"
45 #include "memory/referenceProcessor.hpp"
46 #include "oops/oop.inline.hpp"
47 #include "oops/oop.pcgc.inline.hpp"
48 #include "runtime/aprofiler.hpp"
49 #include "runtime/vmThread.hpp"
50
51 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
52
53 // turn it on so that the contents of the young list (scan-only /
54 // to-be-collected) are printed at "strategic" points before / during
55 // / after the collection --- this is useful for debugging
56 #define YOUNG_LIST_VERBOSE 0
57 // CURRENT STATUS
58 // This file is under construction. Search for "FIXME".
59
60 // INVARIANTS/NOTES
61 //
62 // All allocation activity covered by the G1CollectedHeap interface is
63 // serialized by acquiring the HeapLock. This happens in mem_allocate
64 // and allocate_new_tlab, which are the "entry" points to the
65 // allocation code from the rest of the JVM. (Note that this does not
1228
1229 gc_prologue(true);
1230 increment_total_collections(true /* full gc */);
1231
1232 size_t g1h_prev_used = used();
1233 assert(used() == recalculate_used(), "Should be equal");
1234
1235 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
1236 HandleMark hm; // Discard invalid handles created during verification
1237 gclog_or_tty->print(" VerifyBeforeGC:");
1238 prepare_for_verify();
1239 Universe::verify(/* allow dirty */ true,
1240 /* silent */ false,
1241 /* option */ VerifyOption_G1UsePrevMarking);
1242
1243 }
1244 pre_full_gc_dump();
1245
1246 COMPILER2_PRESENT(DerivedPointerTable::clear());
1247
1248 // Disable discovery and empty the discovered lists
1249 // for the CM ref processor.
1250 ref_processor_cm()->disable_discovery();
1251 ref_processor_cm()->abandon_partial_discovery();
1252 ref_processor_cm()->verify_no_references_recorded();
1253
1254 // Abandon current iterations of concurrent marking and concurrent
1255 // refinement, if any are in progress.
1256 concurrent_mark()->abort();
1257
1258 // Make sure we'll choose a new allocation region afterwards.
1259 release_mutator_alloc_region();
1260 abandon_gc_alloc_regions();
1261 g1_rem_set()->cleanupHRRS();
1262 tear_down_region_lists();
1263
1264 // We should call this after we retire any currently active alloc
1265 // regions so that all the ALLOC / RETIRE events are generated
1266 // before the start GC event.
1267 _hr_printer.start_gc(true /* full */, (size_t) total_collections());
1268
1269 // We may have added regions to the current incremental collection
1270 // set between the last GC or pause and now. We need to clear the
1271 // incremental collection set and then start rebuilding it afresh
1272 // after this full GC.
1273 abandon_collection_set(g1_policy()->inc_cset_head());
1274 g1_policy()->clear_incremental_cset();
1275 g1_policy()->stop_incremental_cset_building();
1276
1277 empty_young_list();
1278 g1_policy()->set_full_young_gcs(true);
1279
1280 // See the comments in g1CollectedHeap.hpp and
1281 // G1CollectedHeap::ref_processing_init() about
1282 // how reference processing currently works in G1.
1283
1284 // Temporarily make discovery by the STW ref processor single threaded (non-MT).
1285 ReferenceProcessorMTDiscoveryMutator stw_rp_disc_ser(ref_processor_stw(), false);
1286
1287 // Temporarily clear the STW ref processor's _is_alive_non_header field.
1288 ReferenceProcessorIsAliveMutator stw_rp_is_alive_null(ref_processor_stw(), NULL);
1289
1290 ref_processor_stw()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
1291 ref_processor_stw()->setup_policy(do_clear_all_soft_refs);
1292
1293 // Do collection work
1294 {
1295 HandleMark hm; // Discard invalid handles created during gc
1296 G1MarkSweep::invoke_at_safepoint(ref_processor_stw(), do_clear_all_soft_refs);
1297 }
1298
1299 assert(free_regions() == 0, "we should not have added any free regions");
1300 rebuild_region_lists();
1301
1302 _summary_bytes_used = recalculate_used();
1303
1304 // Enqueue any discovered reference objects that have
1305 // not been removed from the discovered lists.
1306 ref_processor_stw()->enqueue_discovered_references();
1307
1308 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
1309
1310 MemoryService::track_memory_usage();
1311
1312 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
1313 HandleMark hm; // Discard invalid handles created during verification
1314 gclog_or_tty->print(" VerifyAfterGC:");
1315 prepare_for_verify();
1316 Universe::verify(/* allow dirty */ false,
1317 /* silent */ false,
1318 /* option */ VerifyOption_G1UsePrevMarking);
1319
1320 }
1321
1322 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
1323 ref_processor_stw()->verify_no_references_recorded();
1324
1325 // Note: since we've just done a full GC, concurrent
1326 // marking is no longer active. Therefore we need not
1327 // re-enable reference discovery for the CM ref processor.
1328 // That will be done at the start of the next marking cycle.
1329 assert(!ref_processor_cm()->discovery_enabled(), "Postcondition");
1330 ref_processor_cm()->verify_no_references_recorded();
1331
1332 reset_gc_time_stamp();
1333 // Since everything potentially moved, we will clear all remembered
1334 // sets, and clear all cards. Later we will rebuild remebered
1335 // sets. We will also reset the GC time stamps of the regions.
1336 PostMCRemSetClearClosure rs_clear(mr_bs());
1337 heap_region_iterate(&rs_clear);
1338
1339 // Resize the heap if necessary.
1340 resize_if_necessary_after_full_collection(explicit_gc ? 0 : word_size);
1341
1342 if (_hr_printer.is_active()) {
1343 // We should do this after we potentially resize the heap so
1344 // that all the COMMIT / UNCOMMIT events are generated before
1345 // the end GC event.
1346
1347 PostCompactionPrinterClosure cl(hr_printer());
1348 heap_region_iterate(&cl);
1349
1350 _hr_printer.end_gc(true /* full */, (size_t) total_collections());
1763 tear_down_region_lists(); // We will rebuild them in a moment.
1764 shrink_helper(shrink_bytes);
1765 rebuild_region_lists();
1766
1767 _hrs.verify_optional();
1768 verify_region_sets_optional();
1769 }
1770
1771 // Public methods.
1772
1773 #ifdef _MSC_VER // the use of 'this' below gets a warning, make it go away
1774 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
1775 #endif // _MSC_VER
1776
1777
1778 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1779 SharedHeap(policy_),
1780 _g1_policy(policy_),
1781 _dirty_card_queue_set(false),
1782 _into_cset_dirty_card_queue_set(false),
1783 _is_alive_closure_cm(this),
1784 _is_alive_closure_stw(this),
1785 _ref_processor_cm(NULL),
1786 _ref_processor_stw(NULL),
1787 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1788 _bot_shared(NULL),
1789 _objs_with_preserved_marks(NULL), _preserved_marks_of_objs(NULL),
1790 _evac_failure_scan_stack(NULL) ,
1791 _mark_in_progress(false),
1792 _cg1r(NULL), _summary_bytes_used(0),
1793 _refine_cte_cl(NULL),
1794 _full_collection(false),
1795 _free_list("Master Free List"),
1796 _secondary_free_list("Secondary Free List"),
1797 _humongous_set("Master Humongous Set"),
1798 _free_regions_coming(false),
1799 _young_list(new YoungList(this)),
1800 _gc_time_stamp(0),
1801 _retained_old_gc_alloc_region(NULL),
1802 _surviving_young_words(NULL),
1803 _full_collections_completed(0),
1804 _in_cset_fast_test(NULL),
1805 _in_cset_fast_test_base(NULL),
1806 _dirty_cards_region_list(NULL) {
2060 // require BOT updates or not and, if it doesn't, then a non-young
2061 // region will complain that it cannot support allocations without
2062 // BOT updates. So we'll tag the dummy region as young to avoid that.
2063 dummy_region->set_young();
2064 // Make sure it's full.
2065 dummy_region->set_top(dummy_region->end());
2066 G1AllocRegion::setup(this, dummy_region);
2067
2068 init_mutator_alloc_region();
2069
2070 // Do create of the monitoring and management support so that
2071 // values in the heap have been properly initialized.
2072 _g1mm = new G1MonitoringSupport(this, &_g1_storage);
2073
2074 return JNI_OK;
2075 }
2076
2077 void G1CollectedHeap::ref_processing_init() {
2078 // Reference processing in G1 currently works as follows:
2079 //
2080 // * There are two reference processor instances. One is
2081 // used to record and process discovered references
2082 // during concurrent marking; the other is used to
2083 // record and process references during STW pauses
2084 // (both full and incremental).
2085 // * Both ref processors need to 'span' the entire heap as
2086 // the regions in the collection set may be dotted around.
2087 //
2088 // * For the concurrent marking ref processor:
2089 // * Reference discovery is enabled at initial marking.
2090 // * Reference discovery is disabled and the discovered
2091 // references processed etc during remarking.
2092 // * Reference discovery is MT (see below).
2093 // * Reference discovery requires a barrier (see below).
2094 // * Reference processing may or may not be MT
2095 // (depending on the value of ParallelRefProcEnabled
2096 // and ParallelGCThreads).
2097 // * A full GC disables reference discovery by the CM
2098 // ref processor and abandons any entries on it's
2099 // discovered lists.
2100 //
2101 // * For the STW processor:
2102 // * Non MT discovery is enabled at the start of a full GC.
2103 // * Processing and enqueueing during a full GC is non-MT.
2104 // * During a full GC, references are processed after marking.
2105 //
2106 // * Discovery (may or may not be MT) is enabled at the start
2107 // of an incremental evacuation pause.
2108 // * References are processed near the end of a STW evacuation pause.
2109 // * For both types of GC:
2110 // * Discovery is atomic - i.e. not concurrent.
2111 // * Reference discovery will not need a barrier.
2112
2113 SharedHeap::ref_processing_init();
2114 MemRegion mr = reserved_region();
2115
2116 // Concurrent Mark ref processor
2117 _ref_processor_cm =
2118 new ReferenceProcessor(mr, // span
2119 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2120 // mt processing
2121 (int) ParallelGCThreads,
2122 // degree of mt processing
2123 (ParallelGCThreads > 1) || (ConcGCThreads > 1),
2124 // mt discovery
2125 (int) MAX2(ParallelGCThreads, ConcGCThreads),
2126 // degree of mt discovery
2127 false,
2128 // Reference discovery is not atomic
2129 &_is_alive_closure_cm,
2130 // is alive closure
2131 // (for efficiency/performance)
2132 true);
2133 // Setting next fields of discovered
2134 // lists requires a barrier.
2135
2136 // STW ref processor
2137 _ref_processor_stw =
2138 new ReferenceProcessor(mr, // span
2139 ParallelRefProcEnabled && (ParallelGCThreads > 1),
2140 // mt processing
2141 MAX2((int)ParallelGCThreads, 1),
2142 // degree of mt processing
2143 (ParallelGCThreads > 1),
2144 // mt discovery
2145 MAX2((int)ParallelGCThreads, 1),
2146 // degree of mt discovery
2147 true,
2148 // Reference discovery is atomic
2149 &_is_alive_closure_stw,
2150 // is alive closure
2151 // (for efficiency/performance)
2152 false);
2153 // Setting next fields of discovered
2154 // lists requires a barrier.
2155 }
2156
2157 size_t G1CollectedHeap::capacity() const {
2158 return _g1_committed.byte_size();
2159 }
2160
2161 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl,
2162 DirtyCardQueue* into_cset_dcq,
2163 bool concurrent,
2164 int worker_i) {
2165 // Clean cards in the hot card cache
2166 concurrent_g1_refine()->clean_up_cache(worker_i, g1_rem_set(), into_cset_dcq);
2167
2168 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2169 int n_completed_buffers = 0;
2170 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2171 n_completed_buffers++;
2172 }
2173 g1_policy()->record_update_rs_processed_buffers(worker_i,
3157 "not a garbage-first heap");
3158 return _g1h;
3159 }
3160
3161 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3162 // always_do_update_barrier = false;
3163 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3164 // Call allocation profiler
3165 AllocationProfiler::iterate_since_last_gc();
3166 // Fill TLAB's and such
3167 ensure_parsability(true);
3168 }
3169
3170 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3171 // FIXME: what is this about?
3172 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3173 // is set.
3174 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3175 "derived pointer present"));
3176 // always_do_update_barrier = true;
3177
3178 // We have just completed a GC. Update the soft reference
3179 // policy with the new heap occupancy
3180 Universe::update_heap_info_at_gc();
3181 }
3182
3183 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3184 unsigned int gc_count_before,
3185 bool* succeeded) {
3186 assert_heap_not_locked_and_not_at_safepoint();
3187 g1_policy()->record_stop_world_start();
3188 VM_G1IncCollectionPause op(gc_count_before,
3189 word_size,
3190 false, /* should_initiate_conc_mark */
3191 g1_policy()->max_pause_time_ms(),
3192 GCCause::_g1_inc_collection_pause);
3193 VMThread::execute(&op);
3194
3195 HeapWord* result = op.result();
3196 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3197 assert(result == NULL || ret_succeeded,
3198 "the result should be NULL if the VM did not succeed");
3199 *succeeded = ret_succeeded;
3200
3398
3399 { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3400 IsGCActiveMark x;
3401
3402 gc_prologue(false);
3403 increment_total_collections(false /* full gc */);
3404 increment_gc_time_stamp();
3405
3406 if (VerifyBeforeGC && total_collections() >= VerifyGCStartAt) {
3407 HandleMark hm; // Discard invalid handles created during verification
3408 gclog_or_tty->print(" VerifyBeforeGC:");
3409 prepare_for_verify();
3410 Universe::verify(/* allow dirty */ false,
3411 /* silent */ false,
3412 /* option */ VerifyOption_G1UsePrevMarking);
3413
3414 }
3415
3416 COMPILER2_PRESENT(DerivedPointerTable::clear());
3417
3418 // Please see comment in g1CollectedHeap.hpp and
3419 // G1CollectedHeap::ref_processing_init() to see how
3420 // reference processing currently works in G1.
3421
3422 // Enable discovery in the STW reference processor
3423 ref_processor_stw()->enable_discovery(true /*verify_disabled*/,
3424 true /*verify_no_refs*/);
3425
3426 {
3427 // We want to temporarily turn off discovery by the
3428 // CM ref processor, if necessary, and turn it back on
3429 // on again later if we do. Using a scoped
3430 // NoRefDiscovery object will do this.
3431 NoRefDiscovery no_cm_discovery(ref_processor_cm());
3432
3433 // Forget the current alloc region (we might even choose it to be part
3434 // of the collection set!).
3435 release_mutator_alloc_region();
3436
3437 // We should call this after we retire the mutator alloc
3438 // region(s) so that all the ALLOC / RETIRE events are generated
3439 // before the start GC event.
3440 _hr_printer.start_gc(false /* full */, (size_t) total_collections());
3441
3442 // The elapsed time induced by the start time below deliberately elides
3443 // the possible verification above.
3444 double start_time_sec = os::elapsedTime();
3445 size_t start_used_bytes = used();
3446
3447 #if YOUNG_LIST_VERBOSE
3448 gclog_or_tty->print_cr("\nBefore recording pause start.\nYoung_list:");
3449 _young_list->print();
3450 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3451 #endif // YOUNG_LIST_VERBOSE
3628 // stamp here we invalidate all the GC time stamps on all the
3629 // regions and saved_mark_word() will simply return top() for
3630 // all the regions. This is a nicer way of ensuring this rather
3631 // than iterating over the regions and fixing them. In fact, the
3632 // GC time stamp increment here also ensures that
3633 // saved_mark_word() will return top() between pauses, i.e.,
3634 // during concurrent refinement. So we don't need the
3635 // is_gc_active() check to decided which top to use when
3636 // scanning cards (see CR 7039627).
3637 increment_gc_time_stamp();
3638
3639 if (VerifyAfterGC && total_collections() >= VerifyGCStartAt) {
3640 HandleMark hm; // Discard invalid handles created during verification
3641 gclog_or_tty->print(" VerifyAfterGC:");
3642 prepare_for_verify();
3643 Universe::verify(/* allow dirty */ true,
3644 /* silent */ false,
3645 /* option */ VerifyOption_G1UsePrevMarking);
3646 }
3647
3648 assert(!ref_processor_stw()->discovery_enabled(), "Postcondition");
3649 ref_processor_stw()->verify_no_references_recorded();
3650
3651 // CM reference discovery will be re-enabled if necessary.
3652 }
3653
3654 {
3655 size_t expand_bytes = g1_policy()->expansion_amount();
3656 if (expand_bytes > 0) {
3657 size_t bytes_before = capacity();
3658 // No need for an ergo verbose message here,
3659 // expansion_amount() does this when it returns a value > 0.
3660 if (!expand(expand_bytes)) {
3661 // We failed to expand the heap so let's verify that
3662 // committed/uncommitted amount match the backing store
3663 assert(capacity() == _g1_storage.committed_size(), "committed size mismatch");
3664 assert(max_capacity() == _g1_storage.reserved_size(), "reserved size mismatch");
3665 }
3666 }
3667 }
3668
3669 // We should do this after we potentially expand the heap so
3670 // that all the COMMIT events are generated before the end GC
3671 // event, and after we retire the GC alloc regions so that all
3672 // RETIRE events are generated before the end GC event.
3783 assert(_survivor_gc_alloc_region.get() == NULL, "pre-condition");
3784 assert(_old_gc_alloc_region.get() == NULL, "pre-condition");
3785 _retained_old_gc_alloc_region = NULL;
3786 }
3787
3788 void G1CollectedHeap::init_for_evac_failure(OopsInHeapRegionClosure* cl) {
3789 _drain_in_progress = false;
3790 set_evac_failure_closure(cl);
3791 _evac_failure_scan_stack = new (ResourceObj::C_HEAP) GrowableArray<oop>(40, true);
3792 }
3793
3794 void G1CollectedHeap::finalize_for_evac_failure() {
3795 assert(_evac_failure_scan_stack != NULL &&
3796 _evac_failure_scan_stack->length() == 0,
3797 "Postcondition");
3798 assert(!_drain_in_progress, "Postcondition");
3799 delete _evac_failure_scan_stack;
3800 _evac_failure_scan_stack = NULL;
3801 }
3802
3803 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
3804 private:
3805 G1CollectedHeap* _g1;
3806 DirtyCardQueue *_dcq;
3807 CardTableModRefBS* _ct_bs;
3808
3809 public:
3810 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
3811 _g1(g1), _ct_bs((CardTableModRefBS*)_g1->barrier_set()), _dcq(dcq) {}
3812
3813 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
3814 virtual void do_oop( oop* p) { do_oop_work(p); }
3815 template <class T> void do_oop_work(T* p) {
3816 assert(_from->is_in_reserved(p), "paranoia");
3817 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
3818 !_from->is_survivor()) {
3819 size_t card_index = _ct_bs->index_for(p);
3820 if (_ct_bs->mark_card_deferred(card_index)) {
3821 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index));
3822 }
4202 assert(_g1h->obj_in_cs(p),
4203 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4204 } else {
4205 oop p = oopDesc::load_decode_heap_oop(ref);
4206 assert(_g1h->is_in_g1_reserved(p),
4207 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, ref, intptr_t(p)));
4208 }
4209 return true;
4210 }
4211
4212 bool G1ParScanThreadState::verify_task(StarTask ref) const {
4213 if (ref.is_narrow()) {
4214 return verify_ref((narrowOop*) ref);
4215 } else {
4216 return verify_ref((oop*) ref);
4217 }
4218 }
4219 #endif // ASSERT
4220
4221 void G1ParScanThreadState::trim_queue() {
4222 assert(_evac_cl != NULL, "not set");
4223 assert(_evac_failure_cl != NULL, "not set");
4224 assert(_partial_scan_cl != NULL, "not set");
4225
4226 StarTask ref;
4227 do {
4228 // Drain the overflow stack first, so other threads can steal.
4229 while (refs()->pop_overflow(ref)) {
4230 deal_with_reference(ref);
4231 }
4232
4233 while (refs()->pop_local(ref)) {
4234 deal_with_reference(ref);
4235 }
4236 } while (!refs()->is_empty());
4237 }
4238
4239 G1ParClosureSuper::G1ParClosureSuper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) :
4240 _g1(g1), _g1_rem(_g1->g1_rem_set()), _cm(_g1->concurrent_mark()),
4241 _par_scan_state(par_scan_state),
4242 _during_initial_mark(_g1->g1_policy()->during_initial_mark_pause()),
4243 _mark_in_progress(_g1->mark_in_progress()) { }
4244
4245 template <class T> void G1ParCopyHelper::mark_object(T* p) {
4246 // This is called from do_oop_work for objects that are not
4247 // in the collection set. Objects in the collection set
4248 // are marked after they have been evacuated.
4249
4250 T heap_oop = oopDesc::load_heap_oop(p);
4251 if (!oopDesc::is_null(heap_oop)) {
4252 oop obj = oopDesc::decode_heap_oop(heap_oop);
4533 _terminator(workers, _queues),
4534 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true),
4535 _n_workers(workers)
4536 {}
4537
4538 RefToScanQueueSet* queues() { return _queues; }
4539
4540 RefToScanQueue *work_queue(int i) {
4541 return queues()->queue(i);
4542 }
4543
4544 void work(int i) {
4545 if (i >= _n_workers) return; // no work needed this round
4546
4547 double start_time_ms = os::elapsedTime() * 1000.0;
4548 _g1h->g1_policy()->record_gc_worker_start_time(i, start_time_ms);
4549
4550 ResourceMark rm;
4551 HandleMark hm;
4552
4553 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4554
4555 G1ParScanThreadState pss(_g1h, i);
4556 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, rp);
4557 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4558 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, rp);
4559
4560 pss.set_evac_closure(&scan_evac_cl);
4561 pss.set_evac_failure_closure(&evac_failure_cl);
4562 pss.set_partial_scan_closure(&partial_scan_cl);
4563
4564 G1ParScanExtRootClosure only_scan_root_cl(_g1h, &pss, rp);
4565 G1ParScanPermClosure only_scan_perm_cl(_g1h, &pss, rp);
4566
4567 G1ParScanAndMarkExtRootClosure scan_mark_root_cl(_g1h, &pss, rp);
4568 G1ParScanAndMarkPermClosure scan_mark_perm_cl(_g1h, &pss, rp);
4569
4570 OopClosure* scan_root_cl = &only_scan_root_cl;
4571 OopsInHeapRegionClosure* scan_perm_cl = &only_scan_perm_cl;
4572
4573 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4574 // We also need to mark copied objects.
4575 scan_root_cl = &scan_mark_root_cl;
4576 scan_perm_cl = &scan_mark_perm_cl;
4577 }
4578
4579 // The following closure is used to scan RSets looking for reference
4580 // fields that point into the collection set. The actual field iteration
4581 // is performed by a FilterIntoCSClosure, whose do_oop method calls the
4582 // do_oop method of the following closure.
4583 // Therefore we want to record the reference processor in the
4584 // FilterIntoCSClosure. To do so we record the STW reference
4585 // processor into the following closure and pass it to the
4586 // FilterIntoCSClosure in HeapRegionDCTOC::walk_mem_region_with_cl.
4587 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss, rp);
4588
4589 pss.start_strong_roots();
4590 _g1h->g1_process_strong_roots(/* not collecting perm */ false,
4591 SharedHeap::SO_AllClasses,
4592 scan_root_cl,
4593 &push_heap_rs_cl,
4594 scan_perm_cl,
4595 i);
4596 pss.end_strong_roots();
4597
4598 {
4599 double start = os::elapsedTime();
4600 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4601 evac.do_void();
4602 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4603 double term_ms = pss.term_time()*1000.0;
4604 _g1h->g1_policy()->record_obj_copy_time(i, elapsed_ms-term_ms);
4605 _g1h->g1_policy()->record_termination(i, term_ms, pss.term_attempts());
4606 }
4607 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4608 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4616 }
4617
4618 assert(pss.refs()->is_empty(), "should be empty");
4619 double end_time_ms = os::elapsedTime() * 1000.0;
4620 _g1h->g1_policy()->record_gc_worker_end_time(i, end_time_ms);
4621 }
4622 };
4623
4624 // *** Common G1 Evacuation Stuff
4625
4626 // This method is run in a GC worker.
4627
4628 void
4629 G1CollectedHeap::
4630 g1_process_strong_roots(bool collecting_perm_gen,
4631 SharedHeap::ScanningOption so,
4632 OopClosure* scan_non_heap_roots,
4633 OopsInHeapRegionClosure* scan_rs,
4634 OopsInGenClosure* scan_perm,
4635 int worker_i) {
4636
4637 // First scan the strong roots, including the perm gen.
4638 double ext_roots_start = os::elapsedTime();
4639 double closure_app_time_sec = 0.0;
4640
4641 BufferingOopClosure buf_scan_non_heap_roots(scan_non_heap_roots);
4642 BufferingOopsInGenClosure buf_scan_perm(scan_perm);
4643 buf_scan_perm.set_generation(perm_gen());
4644
4645 // Walk the code cache w/o buffering, because StarTask cannot handle
4646 // unaligned oop locations.
4647 CodeBlobToOopClosure eager_scan_code_roots(scan_non_heap_roots, /*do_marking=*/ true);
4648
4649 process_strong_roots(false, // no scoping; this is parallel code
4650 collecting_perm_gen, so,
4651 &buf_scan_non_heap_roots,
4652 &eager_scan_code_roots,
4653 &buf_scan_perm);
4654
4655 // Now the CM ref_processor roots.
4656 if (!_process_strong_tasks->is_task_claimed(G1H_PS_refProcessor_oops_do)) {
4657 // We need to treat the discovered reference lists of the
4658 // concurrent mark ref processor as roots and keep entries
4659 // (which are added by the marking threads) on them live
4660 // until they can be processed at the end of marking.
4661 ref_processor_cm()->weak_oops_do(&buf_scan_non_heap_roots);
4662 }
4663
4664 // Finish up any enqueued closure apps (attributed as object copy time).
4665 buf_scan_non_heap_roots.done();
4666 buf_scan_perm.done();
4667
4668 double ext_roots_end = os::elapsedTime();
4669
4670 g1_policy()->reset_obj_copy_time(worker_i);
4671 double obj_copy_time_sec = buf_scan_perm.closure_app_seconds() +
4672 buf_scan_non_heap_roots.closure_app_seconds();
4673 g1_policy()->record_obj_copy_time(worker_i, obj_copy_time_sec * 1000.0);
4674
4675 double ext_root_time_ms =
4676 ((ext_roots_end - ext_roots_start) - obj_copy_time_sec) * 1000.0;
4677
4678 g1_policy()->record_ext_root_scan_time(worker_i, ext_root_time_ms);
4679
4680 // Scan strong roots in mark stack.
4681 if (!_process_strong_tasks->is_task_claimed(G1H_PS_mark_stack_oops_do)) {
4682 concurrent_mark()->oops_do(scan_non_heap_roots);
4683 }
4684 double mark_stack_scan_ms = (os::elapsedTime() - ext_roots_end) * 1000.0;
4685 g1_policy()->record_mark_stack_scan_time(worker_i, mark_stack_scan_ms);
4686
4687 // Now scan the complement of the collection set.
4688 if (scan_rs != NULL) {
4689 g1_rem_set()->oops_into_collection_set_do(scan_rs, worker_i);
4690 }
4691
4692 _process_strong_tasks->all_tasks_completed();
4693 }
4694
4695 void
4696 G1CollectedHeap::g1_process_weak_roots(OopClosure* root_closure,
4697 OopClosure* non_root_closure) {
4698 CodeBlobToOopClosure roots_in_blobs(root_closure, /*do_marking=*/ false);
4699 SharedHeap::process_weak_roots(root_closure, &roots_in_blobs, non_root_closure);
4700 }
4701
4702 // Weak Reference Processing support
4703
4704 // An always "is_alive" closure that is used to preserve referents.
4705 // If the object is non-null then it's alive. Used in the preservation
4706 // of referent objects that are pointed to by reference objects
4707 // discovered by the CM ref processor.
4708 class G1AlwaysAliveClosure: public BoolObjectClosure {
4709 G1CollectedHeap* _g1;
4710 public:
4711 G1AlwaysAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4712 void do_object(oop p) { assert(false, "Do not call."); }
4713 bool do_object_b(oop p) {
4714 if (p != NULL) {
4715 return true;
4716 }
4717 return false;
4718 }
4719 };
4720
4721 bool G1STWIsAliveClosure::do_object_b(oop p) {
4722 // An object is reachable if it is outside the collection set,
4723 // or is inside and copied.
4724 return !_g1->obj_in_cs(p) || p->is_forwarded();
4725 }
4726
4727 // Non Copying Keep Alive closure
4728 class G1KeepAliveClosure: public OopClosure {
4729 G1CollectedHeap* _g1;
4730 public:
4731 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
4732 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
4733 void do_oop( oop* p) {
4734 oop obj = *p;
4735
4736 if (_g1->obj_in_cs(obj)) {
4737 assert( obj->is_forwarded(), "invariant" );
4738 *p = obj->forwardee();
4739 }
4740 }
4741 };
4742
4743 // Copying Keep Alive closure - can be called from both
4744 // serial and parallel code as long as different worker
4745 // threads utilize different G1ParScanThreadState instances
4746 // and different queues.
4747
4748 class G1CopyingKeepAliveClosure: public OopClosure {
4749 G1CollectedHeap* _g1h;
4750 OopClosure* _copy_non_heap_obj_cl;
4751 OopsInHeapRegionClosure* _copy_perm_obj_cl;
4752 G1ParScanThreadState* _par_scan_state;
4753
4754 public:
4755 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
4756 OopClosure* non_heap_obj_cl,
4757 OopsInHeapRegionClosure* perm_obj_cl,
4758 G1ParScanThreadState* pss):
4759 _g1h(g1h),
4760 _copy_non_heap_obj_cl(non_heap_obj_cl),
4761 _copy_perm_obj_cl(perm_obj_cl),
4762 _par_scan_state(pss)
4763 {}
4764
4765 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
4766 virtual void do_oop( oop* p) { do_oop_work(p); }
4767
4768 template <class T> void do_oop_work(T* p) {
4769 oop obj = oopDesc::load_decode_heap_oop(p);
4770
4771 if (_g1h->obj_in_cs(obj)) {
4772 // If the referent object has been forwarded (either copied
4773 // to a new location or to itself in the event of an
4774 // evacuation failure) then we need to update the reference
4775 // field and, if both reference and referent are in the G1
4776 // heap, update the RSet for the referent.
4777 //
4778 // If the referent has not been forwarded then we have to keep
4779 // it alive by policy. Therefore we have copy the referent.
4780 //
4781 // If the reference field is in the G1 heap then we can push
4782 // on the PSS queue. When the queue is drained (after each
4783 // phase of reference processing) the object and it's followers
4784 // will be copied, the reference field set to point to the
4785 // new location, and the RSet updated. Otherwise we need to
4786 // use the the non-heap or perm closures directly to copy
4787 // the refernt object and update the pointer, while avoiding
4788 // updating the RSet.
4789
4790 if (_g1h->is_in_g1_reserved(p)) {
4791 _par_scan_state->push_on_queue(p);
4792 } else {
4793 // The reference field is not in the G1 heap.
4794 if (_g1h->perm_gen()->is_in(p)) {
4795 _copy_perm_obj_cl->do_oop(p);
4796 } else {
4797 _copy_non_heap_obj_cl->do_oop(p);
4798 }
4799 }
4800 }
4801 }
4802 };
4803
4804 // Serial drain queue closure. Called as the 'complete_gc'
4805 // closure for each discovered list in some of the
4806 // reference processing phases.
4807
4808 class G1STWDrainQueueClosure: public VoidClosure {
4809 protected:
4810 G1CollectedHeap* _g1h;
4811 G1ParScanThreadState* _par_scan_state;
4812
4813 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4814
4815 public:
4816 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
4817 _g1h(g1h),
4818 _par_scan_state(pss)
4819 { }
4820
4821 void do_void() {
4822 G1ParScanThreadState* const pss = par_scan_state();
4823 pss->trim_queue();
4824 }
4825 };
4826
4827 // Parallel Reference Processing closures
4828
4829 // Implementation of AbstractRefProcTaskExecutor for parallel reference
4830 // processing during G1 evacuation pauses.
4831
4832 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
4833 private:
4834 G1CollectedHeap* _g1h;
4835 RefToScanQueueSet* _queues;
4836 WorkGang* _workers;
4837 int _active_workers;
4838
4839 public:
4840 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
4841 WorkGang* workers,
4842 RefToScanQueueSet *task_queues,
4843 int n_workers) :
4844 _g1h(g1h),
4845 _queues(task_queues),
4846 _workers(workers),
4847 _active_workers(n_workers)
4848 {
4849 assert(n_workers > 0, "shouldn't call this otherwise");
4850 }
4851
4852 // Executes the given task using concurrent marking worker threads.
4853 virtual void execute(ProcessTask& task);
4854 virtual void execute(EnqueueTask& task);
4855 };
4856
4857 // Gang task for possibly parallel reference processing
4858
4859 class G1STWRefProcTaskProxy: public AbstractGangTask {
4860 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
4861 ProcessTask& _proc_task;
4862 G1CollectedHeap* _g1h;
4863 RefToScanQueueSet *_task_queues;
4864 ParallelTaskTerminator* _terminator;
4865
4866 public:
4867 G1STWRefProcTaskProxy(ProcessTask& proc_task,
4868 G1CollectedHeap* g1h,
4869 RefToScanQueueSet *task_queues,
4870 ParallelTaskTerminator* terminator) :
4871 AbstractGangTask("Process reference objects in parallel"),
4872 _proc_task(proc_task),
4873 _g1h(g1h),
4874 _task_queues(task_queues),
4875 _terminator(terminator)
4876 {}
4877
4878 virtual void work(int i) {
4879 // The reference processing task executed by a single worker.
4880 ResourceMark rm;
4881 HandleMark hm;
4882
4883 G1STWIsAliveClosure is_alive(_g1h);
4884
4885 G1ParScanThreadState pss(_g1h, i);
4886
4887 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
4888 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
4889 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
4890
4891 pss.set_evac_closure(&scan_evac_cl);
4892 pss.set_evac_failure_closure(&evac_failure_cl);
4893 pss.set_partial_scan_closure(&partial_scan_cl);
4894
4895 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
4896 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL);
4897
4898 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
4899 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL);
4900
4901 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
4902 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
4903
4904 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4905 // We also need to mark copied objects.
4906 copy_non_heap_cl = ©_mark_non_heap_cl;
4907 copy_perm_cl = ©_mark_perm_cl;
4908 }
4909
4910 // Keep alive closure.
4911 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
4912
4913 // Complete GC closure
4914 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
4915
4916 // Call the reference processing task's work routine.
4917 _proc_task.work(i, is_alive, keep_alive, drain_queue);
4918
4919 // Note we cannot assert that the refs array is empty here as not all
4920 // of the processing tasks (specifically phase2 - pp2_work) execute
4921 // the complete_gc closure (which ordinarily would drain the queue) so
4922 // the queue may not be empty.
4923 }
4924 };
4925
4926 // Driver routine for parallel reference processing.
4927 // Creates an instance of the ref processing gang
4928 // task and has the worker threads execute it.
4929 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
4930 assert(_workers != NULL, "Need parallel worker threads.");
4931
4932 ParallelTaskTerminator terminator(_active_workers, _queues);
4933 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
4934
4935 _g1h->set_par_threads(_active_workers);
4936 _workers->run_task(&proc_task_proxy);
4937 _g1h->set_par_threads(0);
4938 }
4939
4940 // Gang task for parallel reference enqueueing.
4941
4942 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
4943 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
4944 EnqueueTask& _enq_task;
4945
4946 public:
4947 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
4948 AbstractGangTask("Enqueue reference objects in parallel"),
4949 _enq_task(enq_task)
4950 { }
4951
4952 virtual void work(int i) {
4953 _enq_task.work(i);
4954 }
4955 };
4956
4957 // Driver routine for parallel reference enqueing.
4958 // Creates an instance of the ref enqueueing gang
4959 // task and has the worker threads execute it.
4960
4961 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
4962 assert(_workers != NULL, "Need parallel worker threads.");
4963
4964 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
4965
4966 _g1h->set_par_threads(_active_workers);
4967 _workers->run_task(&enq_task_proxy);
4968 _g1h->set_par_threads(0);
4969 }
4970
4971 // End of weak reference support closures
4972
4973 // Abstract task used to preserve (i.e. copy) any referent objects
4974 // that are in the collection set and are pointed to by reference
4975 // objects discovered by the CM ref processor.
4976
4977 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
4978 protected:
4979 G1CollectedHeap* _g1h;
4980 RefToScanQueueSet *_queues;
4981 ParallelTaskTerminator _terminator;
4982 int _n_workers;
4983
4984 public:
4985 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
4986 AbstractGangTask("ParPreserveCMReferents"),
4987 _g1h(g1h),
4988 _queues(task_queues),
4989 _terminator(workers, _queues),
4990 _n_workers(workers)
4991 { }
4992
4993 void work(int i) {
4994 ResourceMark rm;
4995 HandleMark hm;
4996
4997 G1ParScanThreadState pss(_g1h, i);
4998 G1ParScanHeapEvacClosure scan_evac_cl(_g1h, &pss, NULL);
4999 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5000 G1ParScanPartialArrayClosure partial_scan_cl(_g1h, &pss, NULL);
5001
5002 pss.set_evac_closure(&scan_evac_cl);
5003 pss.set_evac_failure_closure(&evac_failure_cl);
5004 pss.set_partial_scan_closure(&partial_scan_cl);
5005
5006 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5007
5008
5009 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5010 G1ParScanPermClosure only_copy_perm_cl(_g1h, &pss, NULL);
5011
5012 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5013 G1ParScanAndMarkPermClosure copy_mark_perm_cl(_g1h, &pss, NULL);
5014
5015 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5016 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
5017
5018 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5019 // We also need to mark copied objects.
5020 copy_non_heap_cl = ©_mark_non_heap_cl;
5021 copy_perm_cl = ©_mark_perm_cl;
5022 }
5023
5024 // Is alive closure
5025 G1AlwaysAliveClosure always_alive(_g1h);
5026
5027 // Copying keep alive closure. Applied to referent objects that need
5028 // to be copied.
5029 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, copy_perm_cl, &pss);
5030
5031 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5032
5033 int limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5034 int stride = MIN2(MAX2(_n_workers, 1), limit);
5035
5036 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5037 // So this must be true - but assert just in case someone decides to
5038 // change the worker ids.
5039 assert(0 <= i && i < limit, "sanity");
5040 assert(!rp->discovery_is_atomic(), "check this code");
5041
5042 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5043 for (int idx = i; idx < limit; idx += stride) {
5044 DiscoveredList& ref_list = rp->discovered_soft_refs()[idx];
5045
5046 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5047 while (iter.has_next()) {
5048 // Since discovery is not atomic for the CM ref processor, we
5049 // can see some null referent objects.
5050 iter.load_ptrs(DEBUG_ONLY(true));
5051 oop ref = iter.obj();
5052
5053 // This will filter nulls.
5054 if (iter.is_referent_alive()) {
5055 iter.make_referent_alive();
5056 }
5057 iter.move_to_next();
5058 }
5059 }
5060
5061 // Drain the queue - which may cause stealing
5062 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5063 drain_queue.do_void();
5064 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5065 assert(pss.refs()->is_empty(), "should be");
5066 }
5067 };
5068
5069 // Weak Reference processing during an evacuation pause (part 1).
5070 void G1CollectedHeap::process_discovered_references() {
5071 double ref_proc_start = os::elapsedTime();
5072
5073 ReferenceProcessor* rp = _ref_processor_stw;
5074 assert(rp->discovery_enabled(), "should have been enabled");
5075
5076 // Any reference objects, in the collection set, that were 'discovered'
5077 // by the CM ref processor should have already been copied (either by
5078 // applying the external root copy closure to the discovered lists, or
5079 // by following an RSet entry).
5080 //
5081 // But some of the referents, that are in the collection set, that these
5082 // reference objects point to may not have been copied: the STW ref
5083 // processor would have seen that the reference object had already
5084 // been 'discovered' and would have skipped discovering the reference,
5085 // but would not have treated the reference object as a regular oop.
5086 // As a reult the copy closure would not have been applied to the
5087 // referent object.
5088 //
5089 // We need to explicitly copy these referent objects - the references
5090 // will be processed at the end of remarking.
5091 //
5092 // We also need to do this copying before we process the reference
5093 // objects discovered by the STW ref processor in case one of these
5094 // referents points to another object which is also referenced by an
5095 // object discovered by the STW ref processor.
5096
5097 int n_workers = (G1CollectedHeap::use_parallel_gc_threads() ?
5098 workers()->total_workers() : 1);
5099
5100 set_par_threads(n_workers);
5101 G1ParPreserveCMReferentsTask keep_cm_referents(this, n_workers, _task_queues);
5102
5103 if (G1CollectedHeap::use_parallel_gc_threads()) {
5104 workers()->run_task(&keep_cm_referents);
5105 } else {
5106 keep_cm_referents.work(0);
5107 }
5108
5109 set_par_threads(0);
5110
5111 // Closure to test whether a referent is alive.
5112 G1STWIsAliveClosure is_alive(this);
5113
5114 // Even when parallel reference processing is enabled, the processing
5115 // of JNI refs is serial and performed serially by the current thread
5116 // rather than by a worker. The following PSS will be used for processing
5117 // JNI refs.
5118
5119 // Use only a single queue for this PSS.
5120 G1ParScanThreadState pss(this, 0);
5121
5122 // We do not embed a reference processor in the copying/scanning
5123 // closures while we're actually processing the discovered
5124 // reference objects.
5125 G1ParScanHeapEvacClosure scan_evac_cl(this, &pss, NULL);
5126 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5127 G1ParScanPartialArrayClosure partial_scan_cl(this, &pss, NULL);
5128
5129 pss.set_evac_closure(&scan_evac_cl);
5130 pss.set_evac_failure_closure(&evac_failure_cl);
5131 pss.set_partial_scan_closure(&partial_scan_cl);
5132
5133 assert(pss.refs()->is_empty(), "pre-condition");
5134
5135 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5136 G1ParScanPermClosure only_copy_perm_cl(this, &pss, NULL);
5137
5138 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5139 G1ParScanAndMarkPermClosure copy_mark_perm_cl(this, &pss, NULL);
5140
5141 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5142 OopsInHeapRegionClosure* copy_perm_cl = &only_copy_perm_cl;
5143
5144 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5145 // We also need to mark copied objects.
5146 copy_non_heap_cl = ©_mark_non_heap_cl;
5147 copy_perm_cl = ©_mark_perm_cl;
5148 }
5149
5150 // Keep alive closure.
5151 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, copy_perm_cl, &pss);
5152
5153 // Serial Complete GC closure
5154 G1STWDrainQueueClosure drain_queue(this, &pss);
5155
5156 // Setup the soft refs policy...
5157 rp->setup_policy(false);
5158
5159 if (!rp->processing_is_mt()) {
5160 // Serial reference processing...
5161 rp->process_discovered_references(&is_alive,
5162 &keep_alive,
5163 &drain_queue,
5164 NULL);
5165 } else {
5166 // Parallel reference processing
5167 int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
5168 assert(rp->num_q() == active_workers, "sanity");
5169 assert(active_workers <= rp->max_num_q(), "sanity");
5170
5171 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5172 rp->process_discovered_references(&is_alive, &keep_alive, &drain_queue, &par_task_executor);
5173 }
5174
5175 // We have completed copying any necessary live referent objects
5176 // (that were not copied during the actual pause) so we can
5177 // retire any active alloc buffers
5178 pss.retire_alloc_buffers();
5179 assert(pss.refs()->is_empty(), "both queue and overflow should be empty");
5180
5181 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5182 g1_policy()->record_ref_proc_time(ref_proc_time * 1000.0);
5183 }
5184
5185 // Weak Reference processing during an evacuation pause (part 2).
5186 void G1CollectedHeap::enqueue_discovered_references() {
5187 double ref_enq_start = os::elapsedTime();
5188
5189 ReferenceProcessor* rp = _ref_processor_stw;
5190 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5191
5192 // Now enqueue any remaining on the discovered lists on to
5193 // the pending list.
5194 if (!rp->processing_is_mt()) {
5195 // Serial reference processing...
5196 rp->enqueue_discovered_references();
5197 } else {
5198 // Parallel reference enqueuing
5199
5200 int active_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
5201 assert(rp->num_q() == active_workers, "sanity");
5202 assert(active_workers <= rp->max_num_q(), "sanity");
5203
5204 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, active_workers);
5205 rp->enqueue_discovered_references(&par_task_executor);
5206 }
5207
5208 rp->verify_no_references_recorded();
5209 assert(!rp->discovery_enabled(), "should have been disabled");
5210
5211 // FIXME
5212 // CM's reference processing also cleans up the string and symbol tables.
5213 // Should we do that here also? We could, but it is a serial operation
5214 // and could signicantly increase the pause time.
5215
5216 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5217 g1_policy()->record_ref_enq_time(ref_enq_time * 1000.0);
5218 }
5219
5220 void G1CollectedHeap::evacuate_collection_set() {
5221 set_evacuation_failed(false);
5222
5223 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5224 concurrent_g1_refine()->set_use_cache(false);
5225 concurrent_g1_refine()->clear_hot_cache_claimed_index();
5226
5227 int n_workers = (ParallelGCThreads > 0 ? workers()->total_workers() : 1);
5228 set_par_threads(n_workers);
5229 G1ParTask g1_par_task(this, n_workers, _task_queues);
5230
5231 init_for_evac_failure(NULL);
5232
5233 rem_set()->prepare_for_younger_refs_iterate(true);
5234
5235 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5236 double start_par = os::elapsedTime();
5237
5238 if (G1CollectedHeap::use_parallel_gc_threads()) {
5239 // The individual threads will set their evac-failure closures.
5240 StrongRootsScope srs(this);
5241 if (ParallelGCVerbose) G1ParScanThreadState::print_termination_stats_hdr();
5242 workers()->run_task(&g1_par_task);
5243 } else {
5244 StrongRootsScope srs(this);
5245 g1_par_task.work(0);
5246 }
5247
5248 double par_time = (os::elapsedTime() - start_par) * 1000.0;
5249 g1_policy()->record_par_time(par_time);
5250 set_par_threads(0);
5251
5252 // Process any discovered reference objects - we have
5253 // to do this _before_ we retire the GC alloc regions
5254 // as we may have to copy some 'reachable' referent
5255 // objects (and their reachable sub-graphs) that were
5256 // not copied during the pause.
5257 process_discovered_references();
5258
5259 // Weak root processing.
5260 // Note: when JSR 292 is enabled and code blobs can contain
5261 // non-perm oops then we will need to process the code blobs
5262 // here too.
5263 {
5264 G1STWIsAliveClosure is_alive(this);
5265 G1KeepAliveClosure keep_alive(this);
5266 JNIHandles::weak_oops_do(&is_alive, &keep_alive);
5267 }
5268
5269 release_gc_alloc_regions();
5270 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5271
5272 concurrent_g1_refine()->clear_hot_cache();
5273 concurrent_g1_refine()->set_use_cache(true);
5274
5275 finalize_for_evac_failure();
5276
5277 // Must do this before removing self-forwarding pointers, which clears
5278 // the per-region evac-failure flags.
5279 concurrent_mark()->complete_marking_in_collection_set();
5280
5281 if (evacuation_failed()) {
5282 remove_self_forwarding_pointers();
5283 if (PrintGCDetails) {
5284 gclog_or_tty->print(" (to-space overflow)");
5285 } else if (PrintGC) {
5286 gclog_or_tty->print("--");
5287 }
5288 }
5289
5290 // Enqueue any remaining references remaining on the STW
5291 // reference processor's discovered lists. We need to do
5292 // this after the card table is cleaned (and verified) as
5293 // the act of enqueuing entries on to the pending list
5294 // will log these updates (and dirty their associated
5295 // cards). We need these updates logged to update any
5296 // RSets.
5297 enqueue_discovered_references();
5298
5299 if (G1DeferredRSUpdate) {
5300 RedirtyLoggedCardTableEntryFastClosure redirty;
5301 dirty_card_queue_set().set_closure(&redirty);
5302 dirty_card_queue_set().apply_closure_to_all_completed_buffers();
5303
5304 DirtyCardQueueSet& dcq = JavaThread::dirty_card_queue_set();
5305 dcq.merge_bufferlists(&dirty_card_queue_set());
5306 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
5307 }
5308 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5309 }
5310
5311 void G1CollectedHeap::free_region_if_empty(HeapRegion* hr,
5312 size_t* pre_used,
5313 FreeRegionList* free_list,
5314 HumongousRegionSet* humongous_proxy_set,
5315 HRRSCleanupTask* hrrs_cleanup_task,
5316 bool par) {
5317 if (hr->used() > 0 && hr->max_live_bytes() == 0 && !hr->is_young()) {
5318 if (hr->isHumongous()) {
5479 G1ParCleanupCTTask cleanup_task(ct_bs, this);
5480
5481 if (ParallelGCThreads > 0) {
5482 set_par_threads(workers()->total_workers());
5483 workers()->run_task(&cleanup_task);
5484 set_par_threads(0);
5485 } else {
5486 while (_dirty_cards_region_list) {
5487 HeapRegion* r = _dirty_cards_region_list;
5488 cleanup_task.clear_cards(r);
5489 _dirty_cards_region_list = r->get_next_dirty_cards_region();
5490 if (_dirty_cards_region_list == r) {
5491 // The last region.
5492 _dirty_cards_region_list = NULL;
5493 }
5494 r->set_next_dirty_cards_region(NULL);
5495 }
5496 }
5497
5498 double elapsed = os::elapsedTime() - start;
5499 g1_policy()->record_clear_ct_time(elapsed * 1000.0);
5500 #ifndef PRODUCT
5501 if (G1VerifyCTCleanup || VerifyAfterGC) {
5502 G1VerifyCardTableCleanup cleanup_verifier(this, ct_bs);
5503 heap_region_iterate(&cleanup_verifier);
5504 }
5505 #endif
5506 }
5507
5508 void G1CollectedHeap::free_collection_set(HeapRegion* cs_head) {
5509 size_t pre_used = 0;
5510 FreeRegionList local_free_list("Local List for CSet Freeing");
5511
5512 double young_time_ms = 0.0;
5513 double non_young_time_ms = 0.0;
5514
5515 // Since the collection set is a superset of the the young list,
5516 // all we need to do to clear the young list is clear its
5517 // head and length, and unlink any young regions in the code below
5518 _young_list->clear();
5519
|