1052 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1053 "the current alloc region was unexpectedly found to be non-NULL");
1054
1055 if (!is_humongous(word_size)) {
1056 return _allocator->attempt_allocation_locked(word_size, context);
1057 } else {
1058 HeapWord* result = humongous_obj_allocate(word_size, context);
1059 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1060 collector_state()->set_initiate_conc_mark_if_possible(true);
1061 }
1062 return result;
1063 }
1064
1065 ShouldNotReachHere();
1066 }
1067
1068 class PostCompactionPrinterClosure: public HeapRegionClosure {
1069 private:
1070 G1HRPrinter* _hr_printer;
1071 public:
1072 bool do_heap_region(HeapRegion* hr) {
1073 assert(!hr->is_young(), "not expecting to find young regions");
1074 _hr_printer->post_compaction(hr);
1075 return false;
1076 }
1077
1078 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1079 : _hr_printer(hr_printer) { }
1080 };
1081
1082 void G1CollectedHeap::print_hrm_post_compaction() {
1083 if (_hr_printer.is_active()) {
1084 PostCompactionPrinterClosure cl(hr_printer());
1085 heap_region_iterate(&cl);
1086 }
1087 }
1088
1089 void G1CollectedHeap::abort_concurrent_cycle() {
1090 // Note: When we have a more flexible GC logging framework that
1091 // allows us to add optional attributes to a GC log record we
1092 // could consider timing and reporting how long we wait in the
1963
1964 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1965 return _hrm.total_free_bytes();
1966 }
1967
1968 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1969 hr->reset_gc_time_stamp();
1970 }
1971
1972 #ifndef PRODUCT
1973
1974 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1975 private:
1976 unsigned _gc_time_stamp;
1977 bool _failures;
1978
1979 public:
1980 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
1981 _gc_time_stamp(gc_time_stamp), _failures(false) { }
1982
1983 virtual bool do_heap_region(HeapRegion* hr) {
1984 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
1985 if (_gc_time_stamp != region_gc_time_stamp) {
1986 log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
1987 region_gc_time_stamp, _gc_time_stamp);
1988 _failures = true;
1989 }
1990 return false;
1991 }
1992
1993 bool failures() { return _failures; }
1994 };
1995
1996 void G1CollectedHeap::check_gc_time_stamps() {
1997 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1998 heap_region_iterate(&cl);
1999 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2000 }
2001 #endif // PRODUCT
2002
2003 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2015 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2016 }
2017
2018 // Computes the sum of the storage used by the various regions.
2019 size_t G1CollectedHeap::used() const {
2020 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2021 if (_archive_allocator != NULL) {
2022 result += _archive_allocator->used();
2023 }
2024 return result;
2025 }
2026
2027 size_t G1CollectedHeap::used_unlocked() const {
2028 return _summary_bytes_used;
2029 }
2030
2031 class SumUsedClosure: public HeapRegionClosure {
2032 size_t _used;
2033 public:
2034 SumUsedClosure() : _used(0) {}
2035 bool do_heap_region(HeapRegion* r) {
2036 _used += r->used();
2037 return false;
2038 }
2039 size_t result() { return _used; }
2040 };
2041
2042 size_t G1CollectedHeap::recalculate_used() const {
2043 double recalculate_used_start = os::elapsedTime();
2044
2045 SumUsedClosure blk;
2046 heap_region_iterate(&blk);
2047
2048 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2049 return blk.result();
2050 }
2051
2052 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2053 switch (cause) {
2054 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2055 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2234 #ifdef ASSERT
2235 bool G1CollectedHeap::is_in_exact(const void* p) const {
2236 bool contains = reserved_region().contains(p);
2237 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2238 if (contains && available) {
2239 return true;
2240 } else {
2241 return false;
2242 }
2243 }
2244 #endif
2245
2246 // Iteration functions.
2247
2248 // Iterates an ObjectClosure over all objects within a HeapRegion.
2249
2250 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2251 ObjectClosure* _cl;
2252 public:
2253 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2254 bool do_heap_region(HeapRegion* r) {
2255 if (!r->is_continues_humongous()) {
2256 r->object_iterate(_cl);
2257 }
2258 return false;
2259 }
2260 };
2261
2262 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2263 IterateObjectClosureRegionClosure blk(cl);
2264 heap_region_iterate(&blk);
2265 }
2266
2267 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2268 _hrm.iterate(cl);
2269 }
2270
2271 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2272 HeapRegionClaimer *hrclaimer,
2273 uint worker_id) const {
2274 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2349 void G1CollectedHeap::verify(VerifyOption vo) {
2350 _verifier->verify(vo);
2351 }
2352
2353 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2354 return true;
2355 }
2356
2357 const char* const* G1CollectedHeap::concurrent_phases() const {
2358 return _cmThread->concurrent_phases();
2359 }
2360
2361 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2362 return _cmThread->request_concurrent_phase(phase);
2363 }
2364
2365 class PrintRegionClosure: public HeapRegionClosure {
2366 outputStream* _st;
2367 public:
2368 PrintRegionClosure(outputStream* st) : _st(st) {}
2369 bool do_heap_region(HeapRegion* r) {
2370 r->print_on(_st);
2371 return false;
2372 }
2373 };
2374
2375 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2376 const HeapRegion* hr,
2377 const VerifyOption vo) const {
2378 switch (vo) {
2379 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2380 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2381 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2382 default: ShouldNotReachHere();
2383 }
2384 return false; // keep some compilers happy
2385 }
2386
2387 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2388 const VerifyOption vo) const {
2389 switch (vo) {
2468 tc->do_thread(_young_gen_sampling_thread);
2469 if (G1StringDedup::is_enabled()) {
2470 G1StringDedup::threads_do(tc);
2471 }
2472 }
2473
2474 void G1CollectedHeap::print_tracing_info() const {
2475 g1_rem_set()->print_summary_info();
2476 concurrent_mark()->print_summary_info();
2477 }
2478
2479 #ifndef PRODUCT
2480 // Helpful for debugging RSet issues.
2481
2482 class PrintRSetsClosure : public HeapRegionClosure {
2483 private:
2484 const char* _msg;
2485 size_t _occupied_sum;
2486
2487 public:
2488 bool do_heap_region(HeapRegion* r) {
2489 HeapRegionRemSet* hrrs = r->rem_set();
2490 size_t occupied = hrrs->occupied();
2491 _occupied_sum += occupied;
2492
2493 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2494 if (occupied == 0) {
2495 tty->print_cr(" RSet is empty");
2496 } else {
2497 hrrs->print();
2498 }
2499 tty->print_cr("----------");
2500 return false;
2501 }
2502
2503 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2504 tty->cr();
2505 tty->print_cr("========================================");
2506 tty->print_cr("%s", msg);
2507 tty->cr();
2508 }
2716 // We also treat is_typeArray() objects specially, allowing them
2717 // to be reclaimed even if allocated before the start of
2718 // concurrent mark. For this we rely on mark stack insertion to
2719 // exclude is_typeArray() objects, preventing reclaiming an object
2720 // that is in the mark stack. We also rely on the metadata for
2721 // such objects to be built-in and so ensured to be kept live.
2722 // Frequent allocation and drop of large binary blobs is an
2723 // important use case for eager reclaim, and this special handling
2724 // may reduce needed headroom.
2725
2726 return obj->is_typeArray() && is_remset_small(region);
2727 }
2728
2729 public:
2730 RegisterHumongousWithInCSetFastTestClosure()
2731 : _total_humongous(0),
2732 _candidate_humongous(0),
2733 _dcq(&JavaThread::dirty_card_queue_set()) {
2734 }
2735
2736 virtual bool do_heap_region(HeapRegion* r) {
2737 if (!r->is_starts_humongous()) {
2738 return false;
2739 }
2740 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2741
2742 bool is_candidate = humongous_region_is_candidate(g1h, r);
2743 uint rindex = r->hrm_index();
2744 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2745 if (is_candidate) {
2746 _candidate_humongous++;
2747 g1h->register_humongous_region_with_cset(rindex);
2748 // Is_candidate already filters out humongous object with large remembered sets.
2749 // If we have a humongous object with a few remembered sets, we simply flush these
2750 // remembered set entries into the DCQS. That will result in automatic
2751 // re-evaluation of their remembered set entries during the following evacuation
2752 // phase.
2753 if (!r->rem_set()->is_empty()) {
2754 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2755 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2756 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2792 return;
2793 }
2794 double time = os::elapsed_counter();
2795
2796 // Collect reclaim candidate information and register candidates with cset.
2797 RegisterHumongousWithInCSetFastTestClosure cl;
2798 heap_region_iterate(&cl);
2799
2800 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2801 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2802 cl.total_humongous(),
2803 cl.candidate_humongous());
2804 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2805
2806 // Finally flush all remembered set entries to re-check into the global DCQS.
2807 cl.flush_rem_set_entries();
2808 }
2809
2810 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2811 public:
2812 bool do_heap_region(HeapRegion* hr) {
2813 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2814 hr->verify_rem_set();
2815 }
2816 return false;
2817 }
2818 };
2819
2820 uint G1CollectedHeap::num_task_queues() const {
2821 return _task_queues->size();
2822 }
2823
2824 #if TASKQUEUE_STATS
2825 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2826 st->print_raw_cr("GC Task Stats");
2827 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2828 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2829 }
2830
2831 void G1CollectedHeap::print_taskqueue_stats() const {
2832 if (!log_is_enabled(Trace, gc, task, stats)) {
2862 double scan_wait_start = os::elapsedTime();
2863 // We have to wait until the CM threads finish scanning the
2864 // root regions as it's the only way to ensure that all the
2865 // objects on them have been correctly scanned before we start
2866 // moving them during the GC.
2867 bool waited = _cm->root_regions()->wait_until_scan_finished();
2868 double wait_time_ms = 0.0;
2869 if (waited) {
2870 double scan_wait_end = os::elapsedTime();
2871 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2872 }
2873 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2874 }
2875
2876 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2877 private:
2878 G1HRPrinter* _hr_printer;
2879 public:
2880 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2881
2882 virtual bool do_heap_region(HeapRegion* r) {
2883 _hr_printer->cset(r);
2884 return false;
2885 }
2886 };
2887
2888 void G1CollectedHeap::start_new_collection_set() {
2889 collection_set()->start_incremental_building();
2890
2891 clear_cset_fast_test();
2892
2893 guarantee(_eden.length() == 0, "eden should have been cleared");
2894 g1_policy()->transfer_survivors_to_cset(survivor());
2895 }
2896
2897 bool
2898 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2899 assert_at_safepoint(true /* should_be_vm_thread */);
2900 guarantee(!is_gc_active(), "collection is not reentrant");
2901
2902 if (GCLocker::check_active_before_gc()) {
4552
4553 size_t _bytes_allocated_in_old_since_last_gc;
4554
4555 size_t _failure_used_words;
4556 size_t _failure_waste_words;
4557
4558 FreeRegionList _local_free_list;
4559 public:
4560 G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4561 HeapRegionClosure(),
4562 _evacuation_info(evacuation_info),
4563 _surviving_young_words(surviving_young_words),
4564 _before_used_bytes(0),
4565 _after_used_bytes(0),
4566 _bytes_allocated_in_old_since_last_gc(0),
4567 _failure_used_words(0),
4568 _failure_waste_words(0),
4569 _local_free_list("Local Region List for CSet Freeing") {
4570 }
4571
4572 virtual bool do_heap_region(HeapRegion* r) {
4573 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4574
4575 assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4576 g1h->clear_in_cset(r);
4577
4578 if (r->is_young()) {
4579 assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4580 "Young index %d is wrong for region %u of type %s with %u young regions",
4581 r->young_index_in_cset(),
4582 r->hrm_index(),
4583 r->get_type_str(),
4584 g1h->collection_set()->young_region_length());
4585 size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4586 r->record_surv_words_in_group(words_survived);
4587 }
4588
4589 if (!r->evacuation_failed()) {
4590 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4591 _before_used_bytes += r->used();
4592 g1h->free_region(r,
4675 assert(!g1h->is_on_master_free_list(r), "sanity");
4676
4677 Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4678
4679 if (!is_young) {
4680 g1h->_hot_card_cache->reset_card_counts(r);
4681 }
4682
4683 if (!evacuation_failed) {
4684 r->rem_set()->clear_locked();
4685 }
4686 }
4687
4688 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4689 private:
4690 size_t _cur_idx;
4691 WorkItem* _work_items;
4692 public:
4693 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4694
4695 virtual bool do_heap_region(HeapRegion* r) {
4696 _work_items[_cur_idx++] = WorkItem(r);
4697 return false;
4698 }
4699 };
4700
4701 void prepare_work() {
4702 G1PrepareFreeCollectionSetClosure cl(_work_items);
4703 _collection_set->iterate(&cl);
4704 }
4705
4706 void complete_work() {
4707 _cl.complete_work();
4708
4709 G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4710 policy->record_max_rs_lengths(_rs_lengths);
4711 policy->cset_regions_freed();
4712 }
4713 public:
4714 G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4715 AbstractGangTask("G1 Free Collection Set"),
4809 workers()->run_task(&cl, num_workers);
4810 }
4811 g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4812
4813 collection_set->clear();
4814 }
4815
4816 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4817 private:
4818 FreeRegionList* _free_region_list;
4819 HeapRegionSet* _proxy_set;
4820 uint _humongous_objects_reclaimed;
4821 uint _humongous_regions_reclaimed;
4822 size_t _freed_bytes;
4823 public:
4824
4825 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4826 _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4827 }
4828
4829 virtual bool do_heap_region(HeapRegion* r) {
4830 if (!r->is_starts_humongous()) {
4831 return false;
4832 }
4833
4834 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4835
4836 oop obj = (oop)r->bottom();
4837 G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4838
4839 // The following checks whether the humongous object is live are sufficient.
4840 // The main additional check (in addition to having a reference from the roots
4841 // or the young gen) is whether the humongous object has a remembered set entry.
4842 //
4843 // A humongous object cannot be live if there is no remembered set for it
4844 // because:
4845 // - there can be no references from within humongous starts regions referencing
4846 // the object because we never allocate other objects into them.
4847 // (I.e. there are no intra-region references that may be missed by the
4848 // remembered set)
4849 // - as soon there is a remembered set entry to the humongous starts region
4944 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4945
4946 G1HRPrinter* hrp = hr_printer();
4947 if (hrp->is_active()) {
4948 FreeRegionListIterator iter(&local_cleanup_list);
4949 while (iter.more_available()) {
4950 HeapRegion* hr = iter.get_next();
4951 hrp->cleanup(hr);
4952 }
4953 }
4954
4955 prepend_to_freelist(&local_cleanup_list);
4956 decrement_summary_bytes(cl.bytes_freed());
4957
4958 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4959 cl.humongous_objects_reclaimed());
4960 }
4961
4962 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4963 public:
4964 virtual bool do_heap_region(HeapRegion* r) {
4965 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4966 G1CollectedHeap::heap()->clear_in_cset(r);
4967 r->set_young_index_in_cset(-1);
4968 return false;
4969 }
4970 };
4971
4972 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4973 G1AbandonCollectionSetClosure cl;
4974 collection_set->iterate(&cl);
4975
4976 collection_set->clear();
4977 collection_set->stop_incremental_building();
4978 }
4979
4980 void G1CollectedHeap::set_free_regions_coming() {
4981 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4982
4983 assert(!free_regions_coming(), "pre-condition");
4984 _free_regions_coming = true;
5014
5015 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
5016 }
5017
5018 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5019 return _allocator->is_retained_old_region(hr);
5020 }
5021
5022 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5023 _eden.add(hr);
5024 _g1_policy->set_region_eden(hr);
5025 }
5026
5027 #ifdef ASSERT
5028
5029 class NoYoungRegionsClosure: public HeapRegionClosure {
5030 private:
5031 bool _success;
5032 public:
5033 NoYoungRegionsClosure() : _success(true) { }
5034 bool do_heap_region(HeapRegion* r) {
5035 if (r->is_young()) {
5036 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5037 p2i(r->bottom()), p2i(r->end()));
5038 _success = false;
5039 }
5040 return false;
5041 }
5042 bool success() { return _success; }
5043 };
5044
5045 bool G1CollectedHeap::check_young_list_empty() {
5046 bool ret = (young_regions_count() == 0);
5047
5048 NoYoungRegionsClosure closure;
5049 heap_region_iterate(&closure);
5050 ret = ret && closure.success();
5051
5052 return ret;
5053 }
5054
5055 #endif // ASSERT
5056
5057 class TearDownRegionSetsClosure : public HeapRegionClosure {
5058 private:
5059 HeapRegionSet *_old_set;
5060
5061 public:
5062 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5063
5064 bool do_heap_region(HeapRegion* r) {
5065 if (r->is_old()) {
5066 _old_set->remove(r);
5067 } else if(r->is_young()) {
5068 r->uninstall_surv_rate_group();
5069 } else {
5070 // We ignore free regions, we'll empty the free list afterwards.
5071 // We ignore humongous regions, we're not tearing down the
5072 // humongous regions set.
5073 assert(r->is_free() || r->is_humongous(),
5074 "it cannot be another type");
5075 }
5076 return false;
5077 }
5078
5079 ~TearDownRegionSetsClosure() {
5080 assert(_old_set->is_empty(), "post-condition");
5081 }
5082 };
5083
5084 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5112 }
5113
5114 class RebuildRegionSetsClosure : public HeapRegionClosure {
5115 private:
5116 bool _free_list_only;
5117 HeapRegionSet* _old_set;
5118 HeapRegionManager* _hrm;
5119 size_t _total_used;
5120
5121 public:
5122 RebuildRegionSetsClosure(bool free_list_only,
5123 HeapRegionSet* old_set, HeapRegionManager* hrm) :
5124 _free_list_only(free_list_only),
5125 _old_set(old_set), _hrm(hrm), _total_used(0) {
5126 assert(_hrm->num_free_regions() == 0, "pre-condition");
5127 if (!free_list_only) {
5128 assert(_old_set->is_empty(), "pre-condition");
5129 }
5130 }
5131
5132 bool do_heap_region(HeapRegion* r) {
5133 if (r->is_empty()) {
5134 // Add free regions to the free list
5135 r->set_free();
5136 r->set_allocation_context(AllocationContext::system());
5137 _hrm->insert_into_free_list(r);
5138 } else if (!_free_list_only) {
5139
5140 if (r->is_humongous()) {
5141 // We ignore humongous regions. We left the humongous set unchanged.
5142 } else {
5143 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5144 // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
5145 r->move_to_old();
5146 _old_set->add(r);
5147 }
5148 _total_used += r->used();
5149 }
5150
5151 return false;
5152 }
|
1052 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
1053 "the current alloc region was unexpectedly found to be non-NULL");
1054
1055 if (!is_humongous(word_size)) {
1056 return _allocator->attempt_allocation_locked(word_size, context);
1057 } else {
1058 HeapWord* result = humongous_obj_allocate(word_size, context);
1059 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1060 collector_state()->set_initiate_conc_mark_if_possible(true);
1061 }
1062 return result;
1063 }
1064
1065 ShouldNotReachHere();
1066 }
1067
1068 class PostCompactionPrinterClosure: public HeapRegionClosure {
1069 private:
1070 G1HRPrinter* _hr_printer;
1071 public:
1072 bool doHeapRegion(HeapRegion* hr) {
1073 assert(!hr->is_young(), "not expecting to find young regions");
1074 _hr_printer->post_compaction(hr);
1075 return false;
1076 }
1077
1078 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1079 : _hr_printer(hr_printer) { }
1080 };
1081
1082 void G1CollectedHeap::print_hrm_post_compaction() {
1083 if (_hr_printer.is_active()) {
1084 PostCompactionPrinterClosure cl(hr_printer());
1085 heap_region_iterate(&cl);
1086 }
1087 }
1088
1089 void G1CollectedHeap::abort_concurrent_cycle() {
1090 // Note: When we have a more flexible GC logging framework that
1091 // allows us to add optional attributes to a GC log record we
1092 // could consider timing and reporting how long we wait in the
1963
1964 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1965 return _hrm.total_free_bytes();
1966 }
1967
1968 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1969 hr->reset_gc_time_stamp();
1970 }
1971
1972 #ifndef PRODUCT
1973
1974 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1975 private:
1976 unsigned _gc_time_stamp;
1977 bool _failures;
1978
1979 public:
1980 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
1981 _gc_time_stamp(gc_time_stamp), _failures(false) { }
1982
1983 virtual bool doHeapRegion(HeapRegion* hr) {
1984 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
1985 if (_gc_time_stamp != region_gc_time_stamp) {
1986 log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
1987 region_gc_time_stamp, _gc_time_stamp);
1988 _failures = true;
1989 }
1990 return false;
1991 }
1992
1993 bool failures() { return _failures; }
1994 };
1995
1996 void G1CollectedHeap::check_gc_time_stamps() {
1997 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1998 heap_region_iterate(&cl);
1999 guarantee(!cl.failures(), "all GC time stamps should have been reset");
2000 }
2001 #endif // PRODUCT
2002
2003 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
2015 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2016 }
2017
2018 // Computes the sum of the storage used by the various regions.
2019 size_t G1CollectedHeap::used() const {
2020 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
2021 if (_archive_allocator != NULL) {
2022 result += _archive_allocator->used();
2023 }
2024 return result;
2025 }
2026
2027 size_t G1CollectedHeap::used_unlocked() const {
2028 return _summary_bytes_used;
2029 }
2030
2031 class SumUsedClosure: public HeapRegionClosure {
2032 size_t _used;
2033 public:
2034 SumUsedClosure() : _used(0) {}
2035 bool doHeapRegion(HeapRegion* r) {
2036 _used += r->used();
2037 return false;
2038 }
2039 size_t result() { return _used; }
2040 };
2041
2042 size_t G1CollectedHeap::recalculate_used() const {
2043 double recalculate_used_start = os::elapsedTime();
2044
2045 SumUsedClosure blk;
2046 heap_region_iterate(&blk);
2047
2048 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
2049 return blk.result();
2050 }
2051
2052 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
2053 switch (cause) {
2054 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
2055 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2234 #ifdef ASSERT
2235 bool G1CollectedHeap::is_in_exact(const void* p) const {
2236 bool contains = reserved_region().contains(p);
2237 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2238 if (contains && available) {
2239 return true;
2240 } else {
2241 return false;
2242 }
2243 }
2244 #endif
2245
2246 // Iteration functions.
2247
2248 // Iterates an ObjectClosure over all objects within a HeapRegion.
2249
2250 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2251 ObjectClosure* _cl;
2252 public:
2253 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2254 bool doHeapRegion(HeapRegion* r) {
2255 if (!r->is_continues_humongous()) {
2256 r->object_iterate(_cl);
2257 }
2258 return false;
2259 }
2260 };
2261
2262 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2263 IterateObjectClosureRegionClosure blk(cl);
2264 heap_region_iterate(&blk);
2265 }
2266
2267 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2268 _hrm.iterate(cl);
2269 }
2270
2271 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2272 HeapRegionClaimer *hrclaimer,
2273 uint worker_id) const {
2274 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2349 void G1CollectedHeap::verify(VerifyOption vo) {
2350 _verifier->verify(vo);
2351 }
2352
2353 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2354 return true;
2355 }
2356
2357 const char* const* G1CollectedHeap::concurrent_phases() const {
2358 return _cmThread->concurrent_phases();
2359 }
2360
2361 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2362 return _cmThread->request_concurrent_phase(phase);
2363 }
2364
2365 class PrintRegionClosure: public HeapRegionClosure {
2366 outputStream* _st;
2367 public:
2368 PrintRegionClosure(outputStream* st) : _st(st) {}
2369 bool doHeapRegion(HeapRegion* r) {
2370 r->print_on(_st);
2371 return false;
2372 }
2373 };
2374
2375 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2376 const HeapRegion* hr,
2377 const VerifyOption vo) const {
2378 switch (vo) {
2379 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2380 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2381 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2382 default: ShouldNotReachHere();
2383 }
2384 return false; // keep some compilers happy
2385 }
2386
2387 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2388 const VerifyOption vo) const {
2389 switch (vo) {
2468 tc->do_thread(_young_gen_sampling_thread);
2469 if (G1StringDedup::is_enabled()) {
2470 G1StringDedup::threads_do(tc);
2471 }
2472 }
2473
2474 void G1CollectedHeap::print_tracing_info() const {
2475 g1_rem_set()->print_summary_info();
2476 concurrent_mark()->print_summary_info();
2477 }
2478
2479 #ifndef PRODUCT
2480 // Helpful for debugging RSet issues.
2481
2482 class PrintRSetsClosure : public HeapRegionClosure {
2483 private:
2484 const char* _msg;
2485 size_t _occupied_sum;
2486
2487 public:
2488 bool doHeapRegion(HeapRegion* r) {
2489 HeapRegionRemSet* hrrs = r->rem_set();
2490 size_t occupied = hrrs->occupied();
2491 _occupied_sum += occupied;
2492
2493 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2494 if (occupied == 0) {
2495 tty->print_cr(" RSet is empty");
2496 } else {
2497 hrrs->print();
2498 }
2499 tty->print_cr("----------");
2500 return false;
2501 }
2502
2503 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2504 tty->cr();
2505 tty->print_cr("========================================");
2506 tty->print_cr("%s", msg);
2507 tty->cr();
2508 }
2716 // We also treat is_typeArray() objects specially, allowing them
2717 // to be reclaimed even if allocated before the start of
2718 // concurrent mark. For this we rely on mark stack insertion to
2719 // exclude is_typeArray() objects, preventing reclaiming an object
2720 // that is in the mark stack. We also rely on the metadata for
2721 // such objects to be built-in and so ensured to be kept live.
2722 // Frequent allocation and drop of large binary blobs is an
2723 // important use case for eager reclaim, and this special handling
2724 // may reduce needed headroom.
2725
2726 return obj->is_typeArray() && is_remset_small(region);
2727 }
2728
2729 public:
2730 RegisterHumongousWithInCSetFastTestClosure()
2731 : _total_humongous(0),
2732 _candidate_humongous(0),
2733 _dcq(&JavaThread::dirty_card_queue_set()) {
2734 }
2735
2736 virtual bool doHeapRegion(HeapRegion* r) {
2737 if (!r->is_starts_humongous()) {
2738 return false;
2739 }
2740 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2741
2742 bool is_candidate = humongous_region_is_candidate(g1h, r);
2743 uint rindex = r->hrm_index();
2744 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2745 if (is_candidate) {
2746 _candidate_humongous++;
2747 g1h->register_humongous_region_with_cset(rindex);
2748 // Is_candidate already filters out humongous object with large remembered sets.
2749 // If we have a humongous object with a few remembered sets, we simply flush these
2750 // remembered set entries into the DCQS. That will result in automatic
2751 // re-evaluation of their remembered set entries during the following evacuation
2752 // phase.
2753 if (!r->rem_set()->is_empty()) {
2754 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2755 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2756 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2792 return;
2793 }
2794 double time = os::elapsed_counter();
2795
2796 // Collect reclaim candidate information and register candidates with cset.
2797 RegisterHumongousWithInCSetFastTestClosure cl;
2798 heap_region_iterate(&cl);
2799
2800 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2801 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2802 cl.total_humongous(),
2803 cl.candidate_humongous());
2804 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2805
2806 // Finally flush all remembered set entries to re-check into the global DCQS.
2807 cl.flush_rem_set_entries();
2808 }
2809
2810 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2811 public:
2812 bool doHeapRegion(HeapRegion* hr) {
2813 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2814 hr->verify_rem_set();
2815 }
2816 return false;
2817 }
2818 };
2819
2820 uint G1CollectedHeap::num_task_queues() const {
2821 return _task_queues->size();
2822 }
2823
2824 #if TASKQUEUE_STATS
2825 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2826 st->print_raw_cr("GC Task Stats");
2827 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2828 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2829 }
2830
2831 void G1CollectedHeap::print_taskqueue_stats() const {
2832 if (!log_is_enabled(Trace, gc, task, stats)) {
2862 double scan_wait_start = os::elapsedTime();
2863 // We have to wait until the CM threads finish scanning the
2864 // root regions as it's the only way to ensure that all the
2865 // objects on them have been correctly scanned before we start
2866 // moving them during the GC.
2867 bool waited = _cm->root_regions()->wait_until_scan_finished();
2868 double wait_time_ms = 0.0;
2869 if (waited) {
2870 double scan_wait_end = os::elapsedTime();
2871 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2872 }
2873 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2874 }
2875
2876 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2877 private:
2878 G1HRPrinter* _hr_printer;
2879 public:
2880 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2881
2882 virtual bool doHeapRegion(HeapRegion* r) {
2883 _hr_printer->cset(r);
2884 return false;
2885 }
2886 };
2887
2888 void G1CollectedHeap::start_new_collection_set() {
2889 collection_set()->start_incremental_building();
2890
2891 clear_cset_fast_test();
2892
2893 guarantee(_eden.length() == 0, "eden should have been cleared");
2894 g1_policy()->transfer_survivors_to_cset(survivor());
2895 }
2896
2897 bool
2898 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2899 assert_at_safepoint(true /* should_be_vm_thread */);
2900 guarantee(!is_gc_active(), "collection is not reentrant");
2901
2902 if (GCLocker::check_active_before_gc()) {
4552
4553 size_t _bytes_allocated_in_old_since_last_gc;
4554
4555 size_t _failure_used_words;
4556 size_t _failure_waste_words;
4557
4558 FreeRegionList _local_free_list;
4559 public:
4560 G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4561 HeapRegionClosure(),
4562 _evacuation_info(evacuation_info),
4563 _surviving_young_words(surviving_young_words),
4564 _before_used_bytes(0),
4565 _after_used_bytes(0),
4566 _bytes_allocated_in_old_since_last_gc(0),
4567 _failure_used_words(0),
4568 _failure_waste_words(0),
4569 _local_free_list("Local Region List for CSet Freeing") {
4570 }
4571
4572 virtual bool doHeapRegion(HeapRegion* r) {
4573 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4574
4575 assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4576 g1h->clear_in_cset(r);
4577
4578 if (r->is_young()) {
4579 assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4580 "Young index %d is wrong for region %u of type %s with %u young regions",
4581 r->young_index_in_cset(),
4582 r->hrm_index(),
4583 r->get_type_str(),
4584 g1h->collection_set()->young_region_length());
4585 size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4586 r->record_surv_words_in_group(words_survived);
4587 }
4588
4589 if (!r->evacuation_failed()) {
4590 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4591 _before_used_bytes += r->used();
4592 g1h->free_region(r,
4675 assert(!g1h->is_on_master_free_list(r), "sanity");
4676
4677 Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4678
4679 if (!is_young) {
4680 g1h->_hot_card_cache->reset_card_counts(r);
4681 }
4682
4683 if (!evacuation_failed) {
4684 r->rem_set()->clear_locked();
4685 }
4686 }
4687
4688 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4689 private:
4690 size_t _cur_idx;
4691 WorkItem* _work_items;
4692 public:
4693 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4694
4695 virtual bool doHeapRegion(HeapRegion* r) {
4696 _work_items[_cur_idx++] = WorkItem(r);
4697 return false;
4698 }
4699 };
4700
4701 void prepare_work() {
4702 G1PrepareFreeCollectionSetClosure cl(_work_items);
4703 _collection_set->iterate(&cl);
4704 }
4705
4706 void complete_work() {
4707 _cl.complete_work();
4708
4709 G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4710 policy->record_max_rs_lengths(_rs_lengths);
4711 policy->cset_regions_freed();
4712 }
4713 public:
4714 G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4715 AbstractGangTask("G1 Free Collection Set"),
4809 workers()->run_task(&cl, num_workers);
4810 }
4811 g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4812
4813 collection_set->clear();
4814 }
4815
4816 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4817 private:
4818 FreeRegionList* _free_region_list;
4819 HeapRegionSet* _proxy_set;
4820 uint _humongous_objects_reclaimed;
4821 uint _humongous_regions_reclaimed;
4822 size_t _freed_bytes;
4823 public:
4824
4825 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4826 _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4827 }
4828
4829 virtual bool doHeapRegion(HeapRegion* r) {
4830 if (!r->is_starts_humongous()) {
4831 return false;
4832 }
4833
4834 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4835
4836 oop obj = (oop)r->bottom();
4837 G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4838
4839 // The following checks whether the humongous object is live are sufficient.
4840 // The main additional check (in addition to having a reference from the roots
4841 // or the young gen) is whether the humongous object has a remembered set entry.
4842 //
4843 // A humongous object cannot be live if there is no remembered set for it
4844 // because:
4845 // - there can be no references from within humongous starts regions referencing
4846 // the object because we never allocate other objects into them.
4847 // (I.e. there are no intra-region references that may be missed by the
4848 // remembered set)
4849 // - as soon there is a remembered set entry to the humongous starts region
4944 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4945
4946 G1HRPrinter* hrp = hr_printer();
4947 if (hrp->is_active()) {
4948 FreeRegionListIterator iter(&local_cleanup_list);
4949 while (iter.more_available()) {
4950 HeapRegion* hr = iter.get_next();
4951 hrp->cleanup(hr);
4952 }
4953 }
4954
4955 prepend_to_freelist(&local_cleanup_list);
4956 decrement_summary_bytes(cl.bytes_freed());
4957
4958 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4959 cl.humongous_objects_reclaimed());
4960 }
4961
4962 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4963 public:
4964 virtual bool doHeapRegion(HeapRegion* r) {
4965 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4966 G1CollectedHeap::heap()->clear_in_cset(r);
4967 r->set_young_index_in_cset(-1);
4968 return false;
4969 }
4970 };
4971
4972 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4973 G1AbandonCollectionSetClosure cl;
4974 collection_set->iterate(&cl);
4975
4976 collection_set->clear();
4977 collection_set->stop_incremental_building();
4978 }
4979
4980 void G1CollectedHeap::set_free_regions_coming() {
4981 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4982
4983 assert(!free_regions_coming(), "pre-condition");
4984 _free_regions_coming = true;
5014
5015 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
5016 }
5017
5018 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
5019 return _allocator->is_retained_old_region(hr);
5020 }
5021
5022 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
5023 _eden.add(hr);
5024 _g1_policy->set_region_eden(hr);
5025 }
5026
5027 #ifdef ASSERT
5028
5029 class NoYoungRegionsClosure: public HeapRegionClosure {
5030 private:
5031 bool _success;
5032 public:
5033 NoYoungRegionsClosure() : _success(true) { }
5034 bool doHeapRegion(HeapRegion* r) {
5035 if (r->is_young()) {
5036 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
5037 p2i(r->bottom()), p2i(r->end()));
5038 _success = false;
5039 }
5040 return false;
5041 }
5042 bool success() { return _success; }
5043 };
5044
5045 bool G1CollectedHeap::check_young_list_empty() {
5046 bool ret = (young_regions_count() == 0);
5047
5048 NoYoungRegionsClosure closure;
5049 heap_region_iterate(&closure);
5050 ret = ret && closure.success();
5051
5052 return ret;
5053 }
5054
5055 #endif // ASSERT
5056
5057 class TearDownRegionSetsClosure : public HeapRegionClosure {
5058 private:
5059 HeapRegionSet *_old_set;
5060
5061 public:
5062 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
5063
5064 bool doHeapRegion(HeapRegion* r) {
5065 if (r->is_old()) {
5066 _old_set->remove(r);
5067 } else if(r->is_young()) {
5068 r->uninstall_surv_rate_group();
5069 } else {
5070 // We ignore free regions, we'll empty the free list afterwards.
5071 // We ignore humongous regions, we're not tearing down the
5072 // humongous regions set.
5073 assert(r->is_free() || r->is_humongous(),
5074 "it cannot be another type");
5075 }
5076 return false;
5077 }
5078
5079 ~TearDownRegionSetsClosure() {
5080 assert(_old_set->is_empty(), "post-condition");
5081 }
5082 };
5083
5084 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5112 }
5113
5114 class RebuildRegionSetsClosure : public HeapRegionClosure {
5115 private:
5116 bool _free_list_only;
5117 HeapRegionSet* _old_set;
5118 HeapRegionManager* _hrm;
5119 size_t _total_used;
5120
5121 public:
5122 RebuildRegionSetsClosure(bool free_list_only,
5123 HeapRegionSet* old_set, HeapRegionManager* hrm) :
5124 _free_list_only(free_list_only),
5125 _old_set(old_set), _hrm(hrm), _total_used(0) {
5126 assert(_hrm->num_free_regions() == 0, "pre-condition");
5127 if (!free_list_only) {
5128 assert(_old_set->is_empty(), "pre-condition");
5129 }
5130 }
5131
5132 bool doHeapRegion(HeapRegion* r) {
5133 if (r->is_empty()) {
5134 // Add free regions to the free list
5135 r->set_free();
5136 r->set_allocation_context(AllocationContext::system());
5137 _hrm->insert_into_free_list(r);
5138 } else if (!_free_list_only) {
5139
5140 if (r->is_humongous()) {
5141 // We ignore humongous regions. We left the humongous set unchanged.
5142 } else {
5143 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5144 // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
5145 r->move_to_old();
5146 _old_set->add(r);
5147 }
5148 _total_used += r->used();
5149 }
5150
5151 return false;
5152 }
|