< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 7182 : imported patch 8058298
rev 7183 : imported patch rev1
rev 7184 : imported patch rev2
rev 7185 : [mq]: rev3

*** 88,99 **** // apply to TLAB allocation, which is not part of this interface: it // is done by clients of this interface.) // Notes on implementation of parallelism in different tasks. // ! // G1ParVerifyTask uses heap_region_par_iterate_chunked() for parallelism. ! // The number of GC workers is passed to heap_region_par_iterate_chunked(). // It does use run_task() which sets _n_workers in the task. // G1ParTask executes g1_process_roots() -> // SharedHeap::process_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses // SequentialSubTasksDone. SharedHeap::process_roots() also --- 88,99 ---- // apply to TLAB allocation, which is not part of this interface: it // is done by clients of this interface.) // Notes on implementation of parallelism in different tasks. // ! // G1ParVerifyTask uses heap_region_par_iterate() for parallelism. ! // The number of GC workers is passed to heap_region_par_iterate(). // It does use run_task() which sets _n_workers in the task. // G1ParTask executes g1_process_roots() -> // SharedHeap::process_roots() which calls eventually to // CardTableModRefBS::par_non_clean_card_iterate_work() which uses // SequentialSubTasksDone. SharedHeap::process_roots() also
*** 1213,1233 **** } }; class ParRebuildRSTask: public AbstractGangTask { G1CollectedHeap* _g1; public: ! ParRebuildRSTask(G1CollectedHeap* g1) ! : AbstractGangTask("ParRebuildRSTask"), ! _g1(g1) ! { } void work(uint worker_id) { RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id); ! _g1->heap_region_par_iterate_chunked(&rebuild_rs, worker_id, ! _g1->workers()->active_workers(), ! HeapRegion::RebuildRSClaimValue); } }; class PostCompactionPrinterClosure: public HeapRegionClosure { private: --- 1213,1231 ---- } }; class ParRebuildRSTask: public AbstractGangTask { G1CollectedHeap* _g1; + HeapRegionClaimer _hrclaimer; + public: ! ParRebuildRSTask(G1CollectedHeap* g1) : ! AbstractGangTask("ParRebuildRSTask"), _g1(g1), _hrclaimer(g1->workers()->active_workers()) {} void work(uint worker_id) { RebuildRSOutOfRegionClosure rebuild_rs(_g1, worker_id); ! _g1->heap_region_par_iterate(&rebuild_rs, worker_id, &_hrclaimer); } }; class PostCompactionPrinterClosure: public HeapRegionClosure { private:
*** 1453,1476 **** // no get carried forward to a serial phase where there // may be code that is "possibly_parallel". set_par_threads(n_workers); ParRebuildRSTask rebuild_rs_task(this); - assert(check_heap_region_claim_values( - HeapRegion::InitialClaimValue), "sanity check"); assert(UseDynamicNumberOfGCThreads || workers()->active_workers() == workers()->total_workers(), "Unless dynamic should use total workers"); // Use the most recent number of active workers assert(workers()->active_workers() > 0, "Active workers not properly set"); set_par_threads(workers()->active_workers()); workers()->run_task(&rebuild_rs_task); set_par_threads(0); - assert(check_heap_region_claim_values( - HeapRegion::RebuildRSClaimValue), "sanity check"); - reset_heap_region_claim_values(); } else { RebuildRSOutOfRegionClosure rebuild_rs(this); heap_region_iterate(&rebuild_rs); } --- 1451,1469 ----
*** 2631,2745 **** void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { _hrm.iterate(cl); } void ! G1CollectedHeap::heap_region_par_iterate_chunked(HeapRegionClosure* cl, uint worker_id, ! uint num_workers, ! jint claim_value) const { ! _hrm.par_iterate(cl, worker_id, num_workers, claim_value); ! } ! ! class ResetClaimValuesClosure: public HeapRegionClosure { ! public: ! bool doHeapRegion(HeapRegion* r) { ! r->set_claim_value(HeapRegion::InitialClaimValue); ! return false; ! } ! }; ! ! void G1CollectedHeap::reset_heap_region_claim_values() { ! ResetClaimValuesClosure blk; ! heap_region_iterate(&blk); ! } ! ! void G1CollectedHeap::reset_cset_heap_region_claim_values() { ! ResetClaimValuesClosure blk; ! collection_set_iterate(&blk); ! } ! ! #ifdef ASSERT ! // This checks whether all regions in the heap have the correct claim ! // value. I also piggy-backed on this a check to ensure that the ! // humongous_start_region() information on "continues humongous" ! // regions is correct. ! ! class CheckClaimValuesClosure : public HeapRegionClosure { ! private: ! jint _claim_value; ! uint _failures; ! HeapRegion* _sh_region; ! ! public: ! CheckClaimValuesClosure(jint claim_value) : ! _claim_value(claim_value), _failures(0), _sh_region(NULL) { } ! bool doHeapRegion(HeapRegion* r) { ! if (r->claim_value() != _claim_value) { ! gclog_or_tty->print_cr("Region " HR_FORMAT ", " ! "claim value = %d, should be %d", ! HR_FORMAT_PARAMS(r), ! r->claim_value(), _claim_value); ! ++_failures; ! } ! if (!r->is_humongous()) { ! _sh_region = NULL; ! } else if (r->is_starts_humongous()) { ! _sh_region = r; ! } else if (r->is_continues_humongous()) { ! if (r->humongous_start_region() != _sh_region) { ! gclog_or_tty->print_cr("Region " HR_FORMAT ", " ! "HS = "PTR_FORMAT", should be "PTR_FORMAT, ! HR_FORMAT_PARAMS(r), ! r->humongous_start_region(), ! _sh_region); ! ++_failures; ! } ! } ! return false; ! } ! uint failures() { return _failures; } ! }; ! ! bool G1CollectedHeap::check_heap_region_claim_values(jint claim_value) { ! CheckClaimValuesClosure cl(claim_value); ! heap_region_iterate(&cl); ! return cl.failures() == 0; } - class CheckClaimValuesInCSetHRClosure: public HeapRegionClosure { - private: - jint _claim_value; - uint _failures; - - public: - CheckClaimValuesInCSetHRClosure(jint claim_value) : - _claim_value(claim_value), _failures(0) { } - - uint failures() { return _failures; } - - bool doHeapRegion(HeapRegion* hr) { - assert(hr->in_collection_set(), "how?"); - assert(!hr->is_humongous(), "H-region in CSet"); - if (hr->claim_value() != _claim_value) { - gclog_or_tty->print_cr("CSet Region " HR_FORMAT ", " - "claim value = %d, should be %d", - HR_FORMAT_PARAMS(hr), - hr->claim_value(), _claim_value); - _failures += 1; - } - return false; - } - }; - - bool G1CollectedHeap::check_cset_heap_region_claim_values(jint claim_value) { - CheckClaimValuesInCSetHRClosure cl(claim_value); - collection_set_iterate(&cl); - return cl.failures() == 0; - } - #endif // ASSERT - // Clear the cached CSet starting regions and (more importantly) // the time stamps. Called when we reset the GC time stamp. void G1CollectedHeap::clear_cset_start_regions() { assert(_worker_cset_start_region != NULL, "sanity"); assert(_worker_cset_start_region_time_stamp != NULL, "sanity"); --- 2624,2639 ---- void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const { _hrm.iterate(cl); } void ! G1CollectedHeap::heap_region_par_iterate(HeapRegionClosure* cl, uint worker_id, ! HeapRegionClaimer *hrclaimer) const { ! _hrm.par_iterate(cl, worker_id, hrclaimer); } // Clear the cached CSet starting regions and (more importantly) // the time stamps. Called when we reset the GC time stamp. void G1CollectedHeap::clear_cset_start_regions() { assert(_worker_cset_start_region != NULL, "sanity"); assert(_worker_cset_start_region_time_stamp != NULL, "sanity");
*** 3252,3282 **** class G1ParVerifyTask: public AbstractGangTask { private: G1CollectedHeap* _g1h; VerifyOption _vo; bool _failures; public: // _vo == UsePrevMarking -> use "prev" marking information, // _vo == UseNextMarking -> use "next" marking information, // _vo == UseMarkWord -> use mark word from object header. G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) : AbstractGangTask("Parallel verify task"), _g1h(g1h), _vo(vo), ! _failures(false) { } bool failures() { return _failures; } void work(uint worker_id) { HandleMark hm; VerifyRegionClosure blk(true, _vo); ! _g1h->heap_region_par_iterate_chunked(&blk, worker_id, ! _g1h->workers()->active_workers(), ! HeapRegion::ParVerifyClaimValue); if (blk.failures()) { _failures = true; } } }; --- 3146,3176 ---- class G1ParVerifyTask: public AbstractGangTask { private: G1CollectedHeap* _g1h; VerifyOption _vo; bool _failures; + HeapRegionClaimer _hrclaimer; public: // _vo == UsePrevMarking -> use "prev" marking information, // _vo == UseNextMarking -> use "next" marking information, // _vo == UseMarkWord -> use mark word from object header. G1ParVerifyTask(G1CollectedHeap* g1h, VerifyOption vo) : AbstractGangTask("Parallel verify task"), _g1h(g1h), _vo(vo), ! _failures(false), ! _hrclaimer(g1h->workers()->active_workers()) {} bool failures() { return _failures; } void work(uint worker_id) { HandleMark hm; VerifyRegionClosure blk(true, _vo); ! _g1h->heap_region_par_iterate(&blk, worker_id, &_hrclaimer); if (blk.failures()) { _failures = true; } } };
*** 3314,3325 **** verify_region_sets(); } if (!silent) { gclog_or_tty->print("HeapRegions "); } if (GCParallelVerificationEnabled && ParallelGCThreads > 1) { - assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), - "sanity check"); G1ParVerifyTask task(this, vo); assert(UseDynamicNumberOfGCThreads || workers()->active_workers() == workers()->total_workers(), "If not dynamic should be using all the workers"); --- 3208,3217 ----
*** 3329,3347 **** set_par_threads(0); if (task.failures()) { failures = true; } - // Checks that the expected amount of parallel work was done. - // The implication is that n_workers is > 0. - assert(check_heap_region_claim_values(HeapRegion::ParVerifyClaimValue), - "sanity check"); - - reset_heap_region_claim_values(); - - assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), - "sanity check"); } else { VerifyRegionClosure blk(false, vo); heap_region_iterate(&blk); if (blk.failures()) { failures = true; --- 3221,3230 ----
*** 3924,3935 **** if (!G1StressConcRegionFreeing) { append_secondary_free_list_if_not_empty_with_lock(); } assert(check_young_list_well_formed(), "young list should be well formed"); - assert(check_heap_region_claim_values(HeapRegion::InitialClaimValue), - "sanity check"); // Don't dynamically change the number of GC threads this early. A value of // 0 is used to indicate serial work. When parallel work is done, // it will be set. --- 3807,3816 ----
*** 4286,4315 **** delete _evac_failure_scan_stack; _evac_failure_scan_stack = NULL; } void G1CollectedHeap::remove_self_forwarding_pointers() { - assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); - double remove_self_forwards_start = os::elapsedTime(); - G1ParRemoveSelfForwardPtrsTask rsfp_task(this); - - if (G1CollectedHeap::use_parallel_gc_threads()) { set_par_threads(); workers()->run_task(&rsfp_task); set_par_threads(0); - } else { - rsfp_task.work(0); - } - - assert(check_cset_heap_region_claim_values(HeapRegion::ParEvacFailureClaimValue), "sanity"); - - // Reset the claim values in the regions in the collection set. - reset_cset_heap_region_claim_values(); - - assert(check_cset_heap_region_claim_values(HeapRegion::InitialClaimValue), "sanity"); // Now restore saved marks, if any. assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), "Both or none."); while (!_objs_with_preserved_marks.is_empty()) { --- 4167,4182 ---- delete _evac_failure_scan_stack; _evac_failure_scan_stack = NULL; } void G1CollectedHeap::remove_self_forwarding_pointers() { double remove_self_forwards_start = os::elapsedTime(); set_par_threads(); + G1ParRemoveSelfForwardPtrsTask rsfp_task(this); workers()->run_task(&rsfp_task); set_par_threads(0); // Now restore saved marks, if any. assert(_objs_with_preserved_marks.size() == _preserved_marks_of_objs.size(), "Both or none."); while (!_objs_with_preserved_marks.is_empty()) {
*** 5946,5960 **** hot_card_cache->reset_hot_cache(); hot_card_cache->set_use_cache(true); purge_code_root_memory(); - if (g1_policy()->during_initial_mark_pause()) { - // Reset the claim values set during marking the strong code roots - reset_heap_region_claim_values(); - } - finalize_for_evac_failure(); if (evacuation_failed()) { remove_self_forwarding_pointers(); --- 5813,5822 ----
< prev index next >