993 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
994 "the current alloc region was unexpectedly found to be non-NULL");
995
996 if (!is_humongous(word_size)) {
997 return _allocator->attempt_allocation_locked(word_size, context);
998 } else {
999 HeapWord* result = humongous_obj_allocate(word_size, context);
1000 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1001 collector_state()->set_initiate_conc_mark_if_possible(true);
1002 }
1003 return result;
1004 }
1005
1006 ShouldNotReachHere();
1007 }
1008
1009 class PostCompactionPrinterClosure: public HeapRegionClosure {
1010 private:
1011 G1HRPrinter* _hr_printer;
1012 public:
1013 bool doHeapRegion(HeapRegion* hr) {
1014 assert(!hr->is_young(), "not expecting to find young regions");
1015 _hr_printer->post_compaction(hr);
1016 return false;
1017 }
1018
1019 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1020 : _hr_printer(hr_printer) { }
1021 };
1022
1023 void G1CollectedHeap::print_hrm_post_compaction() {
1024 if (_hr_printer.is_active()) {
1025 PostCompactionPrinterClosure cl(hr_printer());
1026 heap_region_iterate(&cl);
1027 }
1028 }
1029
1030 void G1CollectedHeap::abort_concurrent_cycle() {
1031 // Note: When we have a more flexible GC logging framework that
1032 // allows us to add optional attributes to a GC log record we
1033 // could consider timing and reporting how long we wait in the
1900
1901 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1902 return _hrm.total_free_bytes();
1903 }
1904
1905 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1906 hr->reset_gc_time_stamp();
1907 }
1908
1909 #ifndef PRODUCT
1910
1911 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1912 private:
1913 unsigned _gc_time_stamp;
1914 bool _failures;
1915
1916 public:
1917 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
1918 _gc_time_stamp(gc_time_stamp), _failures(false) { }
1919
1920 virtual bool doHeapRegion(HeapRegion* hr) {
1921 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
1922 if (_gc_time_stamp != region_gc_time_stamp) {
1923 log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
1924 region_gc_time_stamp, _gc_time_stamp);
1925 _failures = true;
1926 }
1927 return false;
1928 }
1929
1930 bool failures() { return _failures; }
1931 };
1932
1933 void G1CollectedHeap::check_gc_time_stamps() {
1934 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1935 heap_region_iterate(&cl);
1936 guarantee(!cl.failures(), "all GC time stamps should have been reset");
1937 }
1938 #endif // PRODUCT
1939
1940 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1952 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1953 }
1954
1955 // Computes the sum of the storage used by the various regions.
1956 size_t G1CollectedHeap::used() const {
1957 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1958 if (_archive_allocator != NULL) {
1959 result += _archive_allocator->used();
1960 }
1961 return result;
1962 }
1963
1964 size_t G1CollectedHeap::used_unlocked() const {
1965 return _summary_bytes_used;
1966 }
1967
1968 class SumUsedClosure: public HeapRegionClosure {
1969 size_t _used;
1970 public:
1971 SumUsedClosure() : _used(0) {}
1972 bool doHeapRegion(HeapRegion* r) {
1973 _used += r->used();
1974 return false;
1975 }
1976 size_t result() { return _used; }
1977 };
1978
1979 size_t G1CollectedHeap::recalculate_used() const {
1980 double recalculate_used_start = os::elapsedTime();
1981
1982 SumUsedClosure blk;
1983 heap_region_iterate(&blk);
1984
1985 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
1986 return blk.result();
1987 }
1988
1989 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1990 switch (cause) {
1991 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1992 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2171 #ifdef ASSERT
2172 bool G1CollectedHeap::is_in_exact(const void* p) const {
2173 bool contains = reserved_region().contains(p);
2174 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2175 if (contains && available) {
2176 return true;
2177 } else {
2178 return false;
2179 }
2180 }
2181 #endif
2182
2183 // Iteration functions.
2184
2185 // Iterates an ObjectClosure over all objects within a HeapRegion.
2186
2187 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2188 ObjectClosure* _cl;
2189 public:
2190 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2191 bool doHeapRegion(HeapRegion* r) {
2192 if (!r->is_continues_humongous()) {
2193 r->object_iterate(_cl);
2194 }
2195 return false;
2196 }
2197 };
2198
2199 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2200 IterateObjectClosureRegionClosure blk(cl);
2201 heap_region_iterate(&blk);
2202 }
2203
2204 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2205 _hrm.iterate(cl);
2206 }
2207
2208 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2209 HeapRegionClaimer *hrclaimer,
2210 uint worker_id) const {
2211 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2286 void G1CollectedHeap::verify(VerifyOption vo) {
2287 _verifier->verify(vo);
2288 }
2289
2290 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2291 return true;
2292 }
2293
2294 const char* const* G1CollectedHeap::concurrent_phases() const {
2295 return _cmThread->concurrent_phases();
2296 }
2297
2298 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2299 return _cmThread->request_concurrent_phase(phase);
2300 }
2301
2302 class PrintRegionClosure: public HeapRegionClosure {
2303 outputStream* _st;
2304 public:
2305 PrintRegionClosure(outputStream* st) : _st(st) {}
2306 bool doHeapRegion(HeapRegion* r) {
2307 r->print_on(_st);
2308 return false;
2309 }
2310 };
2311
2312 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2313 const HeapRegion* hr,
2314 const VerifyOption vo) const {
2315 switch (vo) {
2316 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2317 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2318 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2319 default: ShouldNotReachHere();
2320 }
2321 return false; // keep some compilers happy
2322 }
2323
2324 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2325 const VerifyOption vo) const {
2326 switch (vo) {
2405 tc->do_thread(_young_gen_sampling_thread);
2406 if (G1StringDedup::is_enabled()) {
2407 G1StringDedup::threads_do(tc);
2408 }
2409 }
2410
2411 void G1CollectedHeap::print_tracing_info() const {
2412 g1_rem_set()->print_summary_info();
2413 concurrent_mark()->print_summary_info();
2414 }
2415
2416 #ifndef PRODUCT
2417 // Helpful for debugging RSet issues.
2418
2419 class PrintRSetsClosure : public HeapRegionClosure {
2420 private:
2421 const char* _msg;
2422 size_t _occupied_sum;
2423
2424 public:
2425 bool doHeapRegion(HeapRegion* r) {
2426 HeapRegionRemSet* hrrs = r->rem_set();
2427 size_t occupied = hrrs->occupied();
2428 _occupied_sum += occupied;
2429
2430 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2431 if (occupied == 0) {
2432 tty->print_cr(" RSet is empty");
2433 } else {
2434 hrrs->print();
2435 }
2436 tty->print_cr("----------");
2437 return false;
2438 }
2439
2440 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2441 tty->cr();
2442 tty->print_cr("========================================");
2443 tty->print_cr("%s", msg);
2444 tty->cr();
2445 }
2652 // We also treat is_typeArray() objects specially, allowing them
2653 // to be reclaimed even if allocated before the start of
2654 // concurrent mark. For this we rely on mark stack insertion to
2655 // exclude is_typeArray() objects, preventing reclaiming an object
2656 // that is in the mark stack. We also rely on the metadata for
2657 // such objects to be built-in and so ensured to be kept live.
2658 // Frequent allocation and drop of large binary blobs is an
2659 // important use case for eager reclaim, and this special handling
2660 // may reduce needed headroom.
2661
2662 return obj->is_typeArray() && is_remset_small(region);
2663 }
2664
2665 public:
2666 RegisterHumongousWithInCSetFastTestClosure()
2667 : _total_humongous(0),
2668 _candidate_humongous(0),
2669 _dcq(&JavaThread::dirty_card_queue_set()) {
2670 }
2671
2672 virtual bool doHeapRegion(HeapRegion* r) {
2673 if (!r->is_starts_humongous()) {
2674 return false;
2675 }
2676 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2677
2678 bool is_candidate = humongous_region_is_candidate(g1h, r);
2679 uint rindex = r->hrm_index();
2680 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2681 if (is_candidate) {
2682 _candidate_humongous++;
2683 g1h->register_humongous_region_with_cset(rindex);
2684 // Is_candidate already filters out humongous object with large remembered sets.
2685 // If we have a humongous object with a few remembered sets, we simply flush these
2686 // remembered set entries into the DCQS. That will result in automatic
2687 // re-evaluation of their remembered set entries during the following evacuation
2688 // phase.
2689 if (!r->rem_set()->is_empty()) {
2690 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2691 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2692 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2728 return;
2729 }
2730 double time = os::elapsed_counter();
2731
2732 // Collect reclaim candidate information and register candidates with cset.
2733 RegisterHumongousWithInCSetFastTestClosure cl;
2734 heap_region_iterate(&cl);
2735
2736 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2737 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2738 cl.total_humongous(),
2739 cl.candidate_humongous());
2740 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2741
2742 // Finally flush all remembered set entries to re-check into the global DCQS.
2743 cl.flush_rem_set_entries();
2744 }
2745
2746 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2747 public:
2748 bool doHeapRegion(HeapRegion* hr) {
2749 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2750 hr->verify_rem_set();
2751 }
2752 return false;
2753 }
2754 };
2755
2756 uint G1CollectedHeap::num_task_queues() const {
2757 return _task_queues->size();
2758 }
2759
2760 #if TASKQUEUE_STATS
2761 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2762 st->print_raw_cr("GC Task Stats");
2763 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2764 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2765 }
2766
2767 void G1CollectedHeap::print_taskqueue_stats() const {
2768 if (!log_is_enabled(Trace, gc, task, stats)) {
2798 double scan_wait_start = os::elapsedTime();
2799 // We have to wait until the CM threads finish scanning the
2800 // root regions as it's the only way to ensure that all the
2801 // objects on them have been correctly scanned before we start
2802 // moving them during the GC.
2803 bool waited = _cm->root_regions()->wait_until_scan_finished();
2804 double wait_time_ms = 0.0;
2805 if (waited) {
2806 double scan_wait_end = os::elapsedTime();
2807 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2808 }
2809 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2810 }
2811
2812 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2813 private:
2814 G1HRPrinter* _hr_printer;
2815 public:
2816 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2817
2818 virtual bool doHeapRegion(HeapRegion* r) {
2819 _hr_printer->cset(r);
2820 return false;
2821 }
2822 };
2823
2824 void G1CollectedHeap::start_new_collection_set() {
2825 collection_set()->start_incremental_building();
2826
2827 clear_cset_fast_test();
2828
2829 guarantee(_eden.length() == 0, "eden should have been cleared");
2830 g1_policy()->transfer_survivors_to_cset(survivor());
2831 }
2832
2833 bool
2834 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2835 assert_at_safepoint(true /* should_be_vm_thread */);
2836 guarantee(!is_gc_active(), "collection is not reentrant");
2837
2838 if (GCLocker::check_active_before_gc()) {
4488
4489 size_t _bytes_allocated_in_old_since_last_gc;
4490
4491 size_t _failure_used_words;
4492 size_t _failure_waste_words;
4493
4494 FreeRegionList _local_free_list;
4495 public:
4496 G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4497 HeapRegionClosure(),
4498 _evacuation_info(evacuation_info),
4499 _surviving_young_words(surviving_young_words),
4500 _before_used_bytes(0),
4501 _after_used_bytes(0),
4502 _bytes_allocated_in_old_since_last_gc(0),
4503 _failure_used_words(0),
4504 _failure_waste_words(0),
4505 _local_free_list("Local Region List for CSet Freeing") {
4506 }
4507
4508 virtual bool doHeapRegion(HeapRegion* r) {
4509 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4510
4511 assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4512 g1h->clear_in_cset(r);
4513
4514 if (r->is_young()) {
4515 assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4516 "Young index %d is wrong for region %u of type %s with %u young regions",
4517 r->young_index_in_cset(),
4518 r->hrm_index(),
4519 r->get_type_str(),
4520 g1h->collection_set()->young_region_length());
4521 size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4522 r->record_surv_words_in_group(words_survived);
4523 }
4524
4525 if (!r->evacuation_failed()) {
4526 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4527 _before_used_bytes += r->used();
4528 g1h->free_region(r,
4611 assert(!g1h->is_on_master_free_list(r), "sanity");
4612
4613 Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4614
4615 if (!is_young) {
4616 g1h->_hot_card_cache->reset_card_counts(r);
4617 }
4618
4619 if (!evacuation_failed) {
4620 r->rem_set()->clear_locked();
4621 }
4622 }
4623
4624 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4625 private:
4626 size_t _cur_idx;
4627 WorkItem* _work_items;
4628 public:
4629 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4630
4631 virtual bool doHeapRegion(HeapRegion* r) {
4632 _work_items[_cur_idx++] = WorkItem(r);
4633 return false;
4634 }
4635 };
4636
4637 void prepare_work() {
4638 G1PrepareFreeCollectionSetClosure cl(_work_items);
4639 _collection_set->iterate(&cl);
4640 }
4641
4642 void complete_work() {
4643 _cl.complete_work();
4644
4645 G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4646 policy->record_max_rs_lengths(_rs_lengths);
4647 policy->cset_regions_freed();
4648 }
4649 public:
4650 G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4651 AbstractGangTask("G1 Free Collection Set"),
4745 workers()->run_task(&cl, num_workers);
4746 }
4747 g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4748
4749 collection_set->clear();
4750 }
4751
4752 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4753 private:
4754 FreeRegionList* _free_region_list;
4755 HeapRegionSet* _proxy_set;
4756 uint _humongous_objects_reclaimed;
4757 uint _humongous_regions_reclaimed;
4758 size_t _freed_bytes;
4759 public:
4760
4761 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4762 _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4763 }
4764
4765 virtual bool doHeapRegion(HeapRegion* r) {
4766 if (!r->is_starts_humongous()) {
4767 return false;
4768 }
4769
4770 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4771
4772 oop obj = (oop)r->bottom();
4773 G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4774
4775 // The following checks whether the humongous object is live are sufficient.
4776 // The main additional check (in addition to having a reference from the roots
4777 // or the young gen) is whether the humongous object has a remembered set entry.
4778 //
4779 // A humongous object cannot be live if there is no remembered set for it
4780 // because:
4781 // - there can be no references from within humongous starts regions referencing
4782 // the object because we never allocate other objects into them.
4783 // (I.e. there are no intra-region references that may be missed by the
4784 // remembered set)
4785 // - as soon there is a remembered set entry to the humongous starts region
4880 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4881
4882 G1HRPrinter* hrp = hr_printer();
4883 if (hrp->is_active()) {
4884 FreeRegionListIterator iter(&local_cleanup_list);
4885 while (iter.more_available()) {
4886 HeapRegion* hr = iter.get_next();
4887 hrp->cleanup(hr);
4888 }
4889 }
4890
4891 prepend_to_freelist(&local_cleanup_list);
4892 decrement_summary_bytes(cl.bytes_freed());
4893
4894 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4895 cl.humongous_objects_reclaimed());
4896 }
4897
4898 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4899 public:
4900 virtual bool doHeapRegion(HeapRegion* r) {
4901 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4902 G1CollectedHeap::heap()->clear_in_cset(r);
4903 r->set_young_index_in_cset(-1);
4904 return false;
4905 }
4906 };
4907
4908 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4909 G1AbandonCollectionSetClosure cl;
4910 collection_set->iterate(&cl);
4911
4912 collection_set->clear();
4913 collection_set->stop_incremental_building();
4914 }
4915
4916 void G1CollectedHeap::set_free_regions_coming() {
4917 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4918
4919 assert(!free_regions_coming(), "pre-condition");
4920 _free_regions_coming = true;
4950
4951 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
4952 }
4953
4954 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4955 return _allocator->is_retained_old_region(hr);
4956 }
4957
4958 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4959 _eden.add(hr);
4960 _g1_policy->set_region_eden(hr);
4961 }
4962
4963 #ifdef ASSERT
4964
4965 class NoYoungRegionsClosure: public HeapRegionClosure {
4966 private:
4967 bool _success;
4968 public:
4969 NoYoungRegionsClosure() : _success(true) { }
4970 bool doHeapRegion(HeapRegion* r) {
4971 if (r->is_young()) {
4972 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4973 p2i(r->bottom()), p2i(r->end()));
4974 _success = false;
4975 }
4976 return false;
4977 }
4978 bool success() { return _success; }
4979 };
4980
4981 bool G1CollectedHeap::check_young_list_empty() {
4982 bool ret = (young_regions_count() == 0);
4983
4984 NoYoungRegionsClosure closure;
4985 heap_region_iterate(&closure);
4986 ret = ret && closure.success();
4987
4988 return ret;
4989 }
4990
4991 #endif // ASSERT
4992
4993 class TearDownRegionSetsClosure : public HeapRegionClosure {
4994 private:
4995 HeapRegionSet *_old_set;
4996
4997 public:
4998 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
4999
5000 bool doHeapRegion(HeapRegion* r) {
5001 if (r->is_old()) {
5002 _old_set->remove(r);
5003 } else if(r->is_young()) {
5004 r->uninstall_surv_rate_group();
5005 } else {
5006 // We ignore free regions, we'll empty the free list afterwards.
5007 // We ignore humongous regions, we're not tearing down the
5008 // humongous regions set.
5009 assert(r->is_free() || r->is_humongous(),
5010 "it cannot be another type");
5011 }
5012 return false;
5013 }
5014
5015 ~TearDownRegionSetsClosure() {
5016 assert(_old_set->is_empty(), "post-condition");
5017 }
5018 };
5019
5020 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5048 }
5049
5050 class RebuildRegionSetsClosure : public HeapRegionClosure {
5051 private:
5052 bool _free_list_only;
5053 HeapRegionSet* _old_set;
5054 HeapRegionManager* _hrm;
5055 size_t _total_used;
5056
5057 public:
5058 RebuildRegionSetsClosure(bool free_list_only,
5059 HeapRegionSet* old_set, HeapRegionManager* hrm) :
5060 _free_list_only(free_list_only),
5061 _old_set(old_set), _hrm(hrm), _total_used(0) {
5062 assert(_hrm->num_free_regions() == 0, "pre-condition");
5063 if (!free_list_only) {
5064 assert(_old_set->is_empty(), "pre-condition");
5065 }
5066 }
5067
5068 bool doHeapRegion(HeapRegion* r) {
5069 if (r->is_empty()) {
5070 // Add free regions to the free list
5071 r->set_free();
5072 r->set_allocation_context(AllocationContext::system());
5073 _hrm->insert_into_free_list(r);
5074 } else if (!_free_list_only) {
5075
5076 if (r->is_humongous()) {
5077 // We ignore humongous regions. We left the humongous set unchanged.
5078 } else {
5079 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5080 // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
5081 r->move_to_old();
5082 _old_set->add(r);
5083 }
5084 _total_used += r->used();
5085 }
5086
5087 return false;
5088 }
|
993 assert(!_allocator->has_mutator_alloc_region(context) || !expect_null_mutator_alloc_region,
994 "the current alloc region was unexpectedly found to be non-NULL");
995
996 if (!is_humongous(word_size)) {
997 return _allocator->attempt_allocation_locked(word_size, context);
998 } else {
999 HeapWord* result = humongous_obj_allocate(word_size, context);
1000 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1001 collector_state()->set_initiate_conc_mark_if_possible(true);
1002 }
1003 return result;
1004 }
1005
1006 ShouldNotReachHere();
1007 }
1008
1009 class PostCompactionPrinterClosure: public HeapRegionClosure {
1010 private:
1011 G1HRPrinter* _hr_printer;
1012 public:
1013 bool do_heap_region(HeapRegion* hr) {
1014 assert(!hr->is_young(), "not expecting to find young regions");
1015 _hr_printer->post_compaction(hr);
1016 return false;
1017 }
1018
1019 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1020 : _hr_printer(hr_printer) { }
1021 };
1022
1023 void G1CollectedHeap::print_hrm_post_compaction() {
1024 if (_hr_printer.is_active()) {
1025 PostCompactionPrinterClosure cl(hr_printer());
1026 heap_region_iterate(&cl);
1027 }
1028 }
1029
1030 void G1CollectedHeap::abort_concurrent_cycle() {
1031 // Note: When we have a more flexible GC logging framework that
1032 // allows us to add optional attributes to a GC log record we
1033 // could consider timing and reporting how long we wait in the
1900
1901 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1902 return _hrm.total_free_bytes();
1903 }
1904
1905 void G1CollectedHeap::reset_gc_time_stamps(HeapRegion* hr) {
1906 hr->reset_gc_time_stamp();
1907 }
1908
1909 #ifndef PRODUCT
1910
1911 class CheckGCTimeStampsHRClosure : public HeapRegionClosure {
1912 private:
1913 unsigned _gc_time_stamp;
1914 bool _failures;
1915
1916 public:
1917 CheckGCTimeStampsHRClosure(unsigned gc_time_stamp) :
1918 _gc_time_stamp(gc_time_stamp), _failures(false) { }
1919
1920 virtual bool do_heap_region(HeapRegion* hr) {
1921 unsigned region_gc_time_stamp = hr->get_gc_time_stamp();
1922 if (_gc_time_stamp != region_gc_time_stamp) {
1923 log_error(gc, verify)("Region " HR_FORMAT " has GC time stamp = %d, expected %d", HR_FORMAT_PARAMS(hr),
1924 region_gc_time_stamp, _gc_time_stamp);
1925 _failures = true;
1926 }
1927 return false;
1928 }
1929
1930 bool failures() { return _failures; }
1931 };
1932
1933 void G1CollectedHeap::check_gc_time_stamps() {
1934 CheckGCTimeStampsHRClosure cl(_gc_time_stamp);
1935 heap_region_iterate(&cl);
1936 guarantee(!cl.failures(), "all GC time stamps should have been reset");
1937 }
1938 #endif // PRODUCT
1939
1940 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1952 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
1953 }
1954
1955 // Computes the sum of the storage used by the various regions.
1956 size_t G1CollectedHeap::used() const {
1957 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1958 if (_archive_allocator != NULL) {
1959 result += _archive_allocator->used();
1960 }
1961 return result;
1962 }
1963
1964 size_t G1CollectedHeap::used_unlocked() const {
1965 return _summary_bytes_used;
1966 }
1967
1968 class SumUsedClosure: public HeapRegionClosure {
1969 size_t _used;
1970 public:
1971 SumUsedClosure() : _used(0) {}
1972 bool do_heap_region(HeapRegion* r) {
1973 _used += r->used();
1974 return false;
1975 }
1976 size_t result() { return _used; }
1977 };
1978
1979 size_t G1CollectedHeap::recalculate_used() const {
1980 double recalculate_used_start = os::elapsedTime();
1981
1982 SumUsedClosure blk;
1983 heap_region_iterate(&blk);
1984
1985 g1_policy()->phase_times()->record_evac_fail_recalc_used_time((os::elapsedTime() - recalculate_used_start) * 1000.0);
1986 return blk.result();
1987 }
1988
1989 bool G1CollectedHeap::is_user_requested_concurrent_full_gc(GCCause::Cause cause) {
1990 switch (cause) {
1991 case GCCause::_java_lang_system_gc: return ExplicitGCInvokesConcurrent;
1992 case GCCause::_dcmd_gc_run: return ExplicitGCInvokesConcurrent;
2171 #ifdef ASSERT
2172 bool G1CollectedHeap::is_in_exact(const void* p) const {
2173 bool contains = reserved_region().contains(p);
2174 bool available = _hrm.is_available(addr_to_region((HeapWord*)p));
2175 if (contains && available) {
2176 return true;
2177 } else {
2178 return false;
2179 }
2180 }
2181 #endif
2182
2183 // Iteration functions.
2184
2185 // Iterates an ObjectClosure over all objects within a HeapRegion.
2186
2187 class IterateObjectClosureRegionClosure: public HeapRegionClosure {
2188 ObjectClosure* _cl;
2189 public:
2190 IterateObjectClosureRegionClosure(ObjectClosure* cl) : _cl(cl) {}
2191 bool do_heap_region(HeapRegion* r) {
2192 if (!r->is_continues_humongous()) {
2193 r->object_iterate(_cl);
2194 }
2195 return false;
2196 }
2197 };
2198
2199 void G1CollectedHeap::object_iterate(ObjectClosure* cl) {
2200 IterateObjectClosureRegionClosure blk(cl);
2201 heap_region_iterate(&blk);
2202 }
2203
2204 void G1CollectedHeap::heap_region_iterate(HeapRegionClosure* cl) const {
2205 _hrm.iterate(cl);
2206 }
2207
2208 void G1CollectedHeap::heap_region_par_iterate_from_worker_offset(HeapRegionClosure* cl,
2209 HeapRegionClaimer *hrclaimer,
2210 uint worker_id) const {
2211 _hrm.par_iterate(cl, hrclaimer, hrclaimer->offset_for_worker(worker_id));
2286 void G1CollectedHeap::verify(VerifyOption vo) {
2287 _verifier->verify(vo);
2288 }
2289
2290 bool G1CollectedHeap::supports_concurrent_phase_control() const {
2291 return true;
2292 }
2293
2294 const char* const* G1CollectedHeap::concurrent_phases() const {
2295 return _cmThread->concurrent_phases();
2296 }
2297
2298 bool G1CollectedHeap::request_concurrent_phase(const char* phase) {
2299 return _cmThread->request_concurrent_phase(phase);
2300 }
2301
2302 class PrintRegionClosure: public HeapRegionClosure {
2303 outputStream* _st;
2304 public:
2305 PrintRegionClosure(outputStream* st) : _st(st) {}
2306 bool do_heap_region(HeapRegion* r) {
2307 r->print_on(_st);
2308 return false;
2309 }
2310 };
2311
2312 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2313 const HeapRegion* hr,
2314 const VerifyOption vo) const {
2315 switch (vo) {
2316 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
2317 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
2318 case VerifyOption_G1UseFullMarking: return is_obj_dead_full(obj, hr);
2319 default: ShouldNotReachHere();
2320 }
2321 return false; // keep some compilers happy
2322 }
2323
2324 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
2325 const VerifyOption vo) const {
2326 switch (vo) {
2405 tc->do_thread(_young_gen_sampling_thread);
2406 if (G1StringDedup::is_enabled()) {
2407 G1StringDedup::threads_do(tc);
2408 }
2409 }
2410
2411 void G1CollectedHeap::print_tracing_info() const {
2412 g1_rem_set()->print_summary_info();
2413 concurrent_mark()->print_summary_info();
2414 }
2415
2416 #ifndef PRODUCT
2417 // Helpful for debugging RSet issues.
2418
2419 class PrintRSetsClosure : public HeapRegionClosure {
2420 private:
2421 const char* _msg;
2422 size_t _occupied_sum;
2423
2424 public:
2425 bool do_heap_region(HeapRegion* r) {
2426 HeapRegionRemSet* hrrs = r->rem_set();
2427 size_t occupied = hrrs->occupied();
2428 _occupied_sum += occupied;
2429
2430 tty->print_cr("Printing RSet for region " HR_FORMAT, HR_FORMAT_PARAMS(r));
2431 if (occupied == 0) {
2432 tty->print_cr(" RSet is empty");
2433 } else {
2434 hrrs->print();
2435 }
2436 tty->print_cr("----------");
2437 return false;
2438 }
2439
2440 PrintRSetsClosure(const char* msg) : _msg(msg), _occupied_sum(0) {
2441 tty->cr();
2442 tty->print_cr("========================================");
2443 tty->print_cr("%s", msg);
2444 tty->cr();
2445 }
2652 // We also treat is_typeArray() objects specially, allowing them
2653 // to be reclaimed even if allocated before the start of
2654 // concurrent mark. For this we rely on mark stack insertion to
2655 // exclude is_typeArray() objects, preventing reclaiming an object
2656 // that is in the mark stack. We also rely on the metadata for
2657 // such objects to be built-in and so ensured to be kept live.
2658 // Frequent allocation and drop of large binary blobs is an
2659 // important use case for eager reclaim, and this special handling
2660 // may reduce needed headroom.
2661
2662 return obj->is_typeArray() && is_remset_small(region);
2663 }
2664
2665 public:
2666 RegisterHumongousWithInCSetFastTestClosure()
2667 : _total_humongous(0),
2668 _candidate_humongous(0),
2669 _dcq(&JavaThread::dirty_card_queue_set()) {
2670 }
2671
2672 virtual bool do_heap_region(HeapRegion* r) {
2673 if (!r->is_starts_humongous()) {
2674 return false;
2675 }
2676 G1CollectedHeap* g1h = G1CollectedHeap::heap();
2677
2678 bool is_candidate = humongous_region_is_candidate(g1h, r);
2679 uint rindex = r->hrm_index();
2680 g1h->set_humongous_reclaim_candidate(rindex, is_candidate);
2681 if (is_candidate) {
2682 _candidate_humongous++;
2683 g1h->register_humongous_region_with_cset(rindex);
2684 // Is_candidate already filters out humongous object with large remembered sets.
2685 // If we have a humongous object with a few remembered sets, we simply flush these
2686 // remembered set entries into the DCQS. That will result in automatic
2687 // re-evaluation of their remembered set entries during the following evacuation
2688 // phase.
2689 if (!r->rem_set()->is_empty()) {
2690 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
2691 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
2692 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
2728 return;
2729 }
2730 double time = os::elapsed_counter();
2731
2732 // Collect reclaim candidate information and register candidates with cset.
2733 RegisterHumongousWithInCSetFastTestClosure cl;
2734 heap_region_iterate(&cl);
2735
2736 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
2737 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
2738 cl.total_humongous(),
2739 cl.candidate_humongous());
2740 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
2741
2742 // Finally flush all remembered set entries to re-check into the global DCQS.
2743 cl.flush_rem_set_entries();
2744 }
2745
2746 class VerifyRegionRemSetClosure : public HeapRegionClosure {
2747 public:
2748 bool do_heap_region(HeapRegion* hr) {
2749 if (!hr->is_archive() && !hr->is_continues_humongous()) {
2750 hr->verify_rem_set();
2751 }
2752 return false;
2753 }
2754 };
2755
2756 uint G1CollectedHeap::num_task_queues() const {
2757 return _task_queues->size();
2758 }
2759
2760 #if TASKQUEUE_STATS
2761 void G1CollectedHeap::print_taskqueue_stats_hdr(outputStream* const st) {
2762 st->print_raw_cr("GC Task Stats");
2763 st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
2764 st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
2765 }
2766
2767 void G1CollectedHeap::print_taskqueue_stats() const {
2768 if (!log_is_enabled(Trace, gc, task, stats)) {
2798 double scan_wait_start = os::elapsedTime();
2799 // We have to wait until the CM threads finish scanning the
2800 // root regions as it's the only way to ensure that all the
2801 // objects on them have been correctly scanned before we start
2802 // moving them during the GC.
2803 bool waited = _cm->root_regions()->wait_until_scan_finished();
2804 double wait_time_ms = 0.0;
2805 if (waited) {
2806 double scan_wait_end = os::elapsedTime();
2807 wait_time_ms = (scan_wait_end - scan_wait_start) * 1000.0;
2808 }
2809 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
2810 }
2811
2812 class G1PrintCollectionSetClosure : public HeapRegionClosure {
2813 private:
2814 G1HRPrinter* _hr_printer;
2815 public:
2816 G1PrintCollectionSetClosure(G1HRPrinter* hr_printer) : HeapRegionClosure(), _hr_printer(hr_printer) { }
2817
2818 virtual bool do_heap_region(HeapRegion* r) {
2819 _hr_printer->cset(r);
2820 return false;
2821 }
2822 };
2823
2824 void G1CollectedHeap::start_new_collection_set() {
2825 collection_set()->start_incremental_building();
2826
2827 clear_cset_fast_test();
2828
2829 guarantee(_eden.length() == 0, "eden should have been cleared");
2830 g1_policy()->transfer_survivors_to_cset(survivor());
2831 }
2832
2833 bool
2834 G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
2835 assert_at_safepoint(true /* should_be_vm_thread */);
2836 guarantee(!is_gc_active(), "collection is not reentrant");
2837
2838 if (GCLocker::check_active_before_gc()) {
4488
4489 size_t _bytes_allocated_in_old_since_last_gc;
4490
4491 size_t _failure_used_words;
4492 size_t _failure_waste_words;
4493
4494 FreeRegionList _local_free_list;
4495 public:
4496 G1SerialFreeCollectionSetClosure(EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4497 HeapRegionClosure(),
4498 _evacuation_info(evacuation_info),
4499 _surviving_young_words(surviving_young_words),
4500 _before_used_bytes(0),
4501 _after_used_bytes(0),
4502 _bytes_allocated_in_old_since_last_gc(0),
4503 _failure_used_words(0),
4504 _failure_waste_words(0),
4505 _local_free_list("Local Region List for CSet Freeing") {
4506 }
4507
4508 virtual bool do_heap_region(HeapRegion* r) {
4509 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4510
4511 assert(r->in_collection_set(), "Region %u should be in collection set.", r->hrm_index());
4512 g1h->clear_in_cset(r);
4513
4514 if (r->is_young()) {
4515 assert(r->young_index_in_cset() != -1 && (uint)r->young_index_in_cset() < g1h->collection_set()->young_region_length(),
4516 "Young index %d is wrong for region %u of type %s with %u young regions",
4517 r->young_index_in_cset(),
4518 r->hrm_index(),
4519 r->get_type_str(),
4520 g1h->collection_set()->young_region_length());
4521 size_t words_survived = _surviving_young_words[r->young_index_in_cset()];
4522 r->record_surv_words_in_group(words_survived);
4523 }
4524
4525 if (!r->evacuation_failed()) {
4526 assert(r->not_empty(), "Region %u is an empty region in the collection set.", r->hrm_index());
4527 _before_used_bytes += r->used();
4528 g1h->free_region(r,
4611 assert(!g1h->is_on_master_free_list(r), "sanity");
4612
4613 Atomic::add(r->rem_set()->occupied_locked(), &_rs_lengths);
4614
4615 if (!is_young) {
4616 g1h->_hot_card_cache->reset_card_counts(r);
4617 }
4618
4619 if (!evacuation_failed) {
4620 r->rem_set()->clear_locked();
4621 }
4622 }
4623
4624 class G1PrepareFreeCollectionSetClosure : public HeapRegionClosure {
4625 private:
4626 size_t _cur_idx;
4627 WorkItem* _work_items;
4628 public:
4629 G1PrepareFreeCollectionSetClosure(WorkItem* work_items) : HeapRegionClosure(), _cur_idx(0), _work_items(work_items) { }
4630
4631 virtual bool do_heap_region(HeapRegion* r) {
4632 _work_items[_cur_idx++] = WorkItem(r);
4633 return false;
4634 }
4635 };
4636
4637 void prepare_work() {
4638 G1PrepareFreeCollectionSetClosure cl(_work_items);
4639 _collection_set->iterate(&cl);
4640 }
4641
4642 void complete_work() {
4643 _cl.complete_work();
4644
4645 G1Policy* policy = G1CollectedHeap::heap()->g1_policy();
4646 policy->record_max_rs_lengths(_rs_lengths);
4647 policy->cset_regions_freed();
4648 }
4649 public:
4650 G1FreeCollectionSetTask(G1CollectionSet* collection_set, EvacuationInfo* evacuation_info, const size_t* surviving_young_words) :
4651 AbstractGangTask("G1 Free Collection Set"),
4745 workers()->run_task(&cl, num_workers);
4746 }
4747 g1_policy()->phase_times()->record_total_free_cset_time_ms((os::elapsedTime() - free_cset_start_time) * 1000.0);
4748
4749 collection_set->clear();
4750 }
4751
4752 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
4753 private:
4754 FreeRegionList* _free_region_list;
4755 HeapRegionSet* _proxy_set;
4756 uint _humongous_objects_reclaimed;
4757 uint _humongous_regions_reclaimed;
4758 size_t _freed_bytes;
4759 public:
4760
4761 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
4762 _free_region_list(free_region_list), _humongous_objects_reclaimed(0), _humongous_regions_reclaimed(0), _freed_bytes(0) {
4763 }
4764
4765 virtual bool do_heap_region(HeapRegion* r) {
4766 if (!r->is_starts_humongous()) {
4767 return false;
4768 }
4769
4770 G1CollectedHeap* g1h = G1CollectedHeap::heap();
4771
4772 oop obj = (oop)r->bottom();
4773 G1CMBitMap* next_bitmap = g1h->concurrent_mark()->next_mark_bitmap();
4774
4775 // The following checks whether the humongous object is live are sufficient.
4776 // The main additional check (in addition to having a reference from the roots
4777 // or the young gen) is whether the humongous object has a remembered set entry.
4778 //
4779 // A humongous object cannot be live if there is no remembered set for it
4780 // because:
4781 // - there can be no references from within humongous starts regions referencing
4782 // the object because we never allocate other objects into them.
4783 // (I.e. there are no intra-region references that may be missed by the
4784 // remembered set)
4785 // - as soon there is a remembered set entry to the humongous starts region
4880 remove_from_old_sets(0, cl.humongous_regions_reclaimed());
4881
4882 G1HRPrinter* hrp = hr_printer();
4883 if (hrp->is_active()) {
4884 FreeRegionListIterator iter(&local_cleanup_list);
4885 while (iter.more_available()) {
4886 HeapRegion* hr = iter.get_next();
4887 hrp->cleanup(hr);
4888 }
4889 }
4890
4891 prepend_to_freelist(&local_cleanup_list);
4892 decrement_summary_bytes(cl.bytes_freed());
4893
4894 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
4895 cl.humongous_objects_reclaimed());
4896 }
4897
4898 class G1AbandonCollectionSetClosure : public HeapRegionClosure {
4899 public:
4900 virtual bool do_heap_region(HeapRegion* r) {
4901 assert(r->in_collection_set(), "Region %u must have been in collection set", r->hrm_index());
4902 G1CollectedHeap::heap()->clear_in_cset(r);
4903 r->set_young_index_in_cset(-1);
4904 return false;
4905 }
4906 };
4907
4908 void G1CollectedHeap::abandon_collection_set(G1CollectionSet* collection_set) {
4909 G1AbandonCollectionSetClosure cl;
4910 collection_set->iterate(&cl);
4911
4912 collection_set->clear();
4913 collection_set->stop_incremental_building();
4914 }
4915
4916 void G1CollectedHeap::set_free_regions_coming() {
4917 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [cm thread] : setting free regions coming");
4918
4919 assert(!free_regions_coming(), "pre-condition");
4920 _free_regions_coming = true;
4950
4951 log_develop_trace(gc, freelist)("G1ConcRegionFreeing [other] : done waiting for free regions");
4952 }
4953
4954 bool G1CollectedHeap::is_old_gc_alloc_region(HeapRegion* hr) {
4955 return _allocator->is_retained_old_region(hr);
4956 }
4957
4958 void G1CollectedHeap::set_region_short_lived_locked(HeapRegion* hr) {
4959 _eden.add(hr);
4960 _g1_policy->set_region_eden(hr);
4961 }
4962
4963 #ifdef ASSERT
4964
4965 class NoYoungRegionsClosure: public HeapRegionClosure {
4966 private:
4967 bool _success;
4968 public:
4969 NoYoungRegionsClosure() : _success(true) { }
4970 bool do_heap_region(HeapRegion* r) {
4971 if (r->is_young()) {
4972 log_error(gc, verify)("Region [" PTR_FORMAT ", " PTR_FORMAT ") tagged as young",
4973 p2i(r->bottom()), p2i(r->end()));
4974 _success = false;
4975 }
4976 return false;
4977 }
4978 bool success() { return _success; }
4979 };
4980
4981 bool G1CollectedHeap::check_young_list_empty() {
4982 bool ret = (young_regions_count() == 0);
4983
4984 NoYoungRegionsClosure closure;
4985 heap_region_iterate(&closure);
4986 ret = ret && closure.success();
4987
4988 return ret;
4989 }
4990
4991 #endif // ASSERT
4992
4993 class TearDownRegionSetsClosure : public HeapRegionClosure {
4994 private:
4995 HeapRegionSet *_old_set;
4996
4997 public:
4998 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
4999
5000 bool do_heap_region(HeapRegion* r) {
5001 if (r->is_old()) {
5002 _old_set->remove(r);
5003 } else if(r->is_young()) {
5004 r->uninstall_surv_rate_group();
5005 } else {
5006 // We ignore free regions, we'll empty the free list afterwards.
5007 // We ignore humongous regions, we're not tearing down the
5008 // humongous regions set.
5009 assert(r->is_free() || r->is_humongous(),
5010 "it cannot be another type");
5011 }
5012 return false;
5013 }
5014
5015 ~TearDownRegionSetsClosure() {
5016 assert(_old_set->is_empty(), "post-condition");
5017 }
5018 };
5019
5020 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
5048 }
5049
5050 class RebuildRegionSetsClosure : public HeapRegionClosure {
5051 private:
5052 bool _free_list_only;
5053 HeapRegionSet* _old_set;
5054 HeapRegionManager* _hrm;
5055 size_t _total_used;
5056
5057 public:
5058 RebuildRegionSetsClosure(bool free_list_only,
5059 HeapRegionSet* old_set, HeapRegionManager* hrm) :
5060 _free_list_only(free_list_only),
5061 _old_set(old_set), _hrm(hrm), _total_used(0) {
5062 assert(_hrm->num_free_regions() == 0, "pre-condition");
5063 if (!free_list_only) {
5064 assert(_old_set->is_empty(), "pre-condition");
5065 }
5066 }
5067
5068 bool do_heap_region(HeapRegion* r) {
5069 if (r->is_empty()) {
5070 // Add free regions to the free list
5071 r->set_free();
5072 r->set_allocation_context(AllocationContext::system());
5073 _hrm->insert_into_free_list(r);
5074 } else if (!_free_list_only) {
5075
5076 if (r->is_humongous()) {
5077 // We ignore humongous regions. We left the humongous set unchanged.
5078 } else {
5079 assert(r->is_young() || r->is_free() || r->is_old(), "invariant");
5080 // We now move all (non-humongous, non-old) regions to old gen, and register them as such.
5081 r->move_to_old();
5082 _old_set->add(r);
5083 }
5084 _total_used += r->used();
5085 }
5086
5087 return false;
5088 }
|