387 if (head == NULL) {
388 return NULL;
389 }
390 HeapRegion* new_head = head->get_next_dirty_cards_region();
391 if (head == new_head) {
392 // The last region.
393 new_head = NULL;
394 }
395 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
396 head);
397 } while (hr != head);
398 assert(hr != NULL, "invariant");
399 hr->set_next_dirty_cards_region(NULL);
400 return hr;
401 }
402
403 // Returns true if the reference points to an object that
404 // can move in an incremental collection.
405 bool G1CollectedHeap::is_scavengable(const void* p) {
406 HeapRegion* hr = heap_region_containing(p);
407 return !hr->is_humongous();
408 }
409
410 // Private methods.
411
412 HeapRegion*
413 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
414 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
415 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
416 if (!_secondary_free_list.is_empty()) {
417 if (G1ConcRegionFreeingVerbose) {
418 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
419 "secondary_free_list has %u entries",
420 _secondary_free_list.length());
421 }
422 // It looks as if there are free regions available on the
423 // secondary_free_list. Let's move them to the free_list and try
424 // again to allocate from it.
425 append_secondary_free_list();
426
427 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
890 // follow-on attempt will be at the start of the next loop
891 // iteration (after taking the Heap_lock).
892 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
893 false /* bot_updates */);
894 if (result != NULL) {
895 return result;
896 }
897
898 // Give a warning if we seem to be looping forever.
899 if ((QueuedAllocationWarningCount > 0) &&
900 (try_count % QueuedAllocationWarningCount == 0)) {
901 warning("G1CollectedHeap::attempt_allocation_slow() "
902 "retries %d times", try_count);
903 }
904 }
905
906 ShouldNotReachHere();
907 return NULL;
908 }
909
910 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
911 uint* gc_count_before_ret,
912 uint* gclocker_retry_count_ret) {
913 // The structure of this method has a lot of similarities to
914 // attempt_allocation_slow(). The reason these two were not merged
915 // into a single one is that such a method would require several "if
916 // allocation is not humongous do this, otherwise do that"
917 // conditional paths which would obscure its flow. In fact, an early
918 // version of this code did use a unified method which was harder to
919 // follow and, as a result, it had subtle bugs that were hard to
920 // track down. So keeping these two methods separate allows each to
921 // be more readable. It will be good to keep these two in sync as
922 // much as possible.
923
924 assert_heap_not_locked_and_not_at_safepoint();
925 assert(is_humongous(word_size), "attempt_allocation_humongous() "
926 "should only be called for humongous allocations");
927
928 // Humongous objects can exhaust the heap quickly, so we should check if we
929 // need to start a marking cycle at each humongous object allocation. We do
1114 }
1115 };
1116
1117 class PostCompactionPrinterClosure: public HeapRegionClosure {
1118 private:
1119 G1HRPrinter* _hr_printer;
1120 public:
1121 bool doHeapRegion(HeapRegion* hr) {
1122 assert(!hr->is_young(), "not expecting to find young regions");
1123 if (hr->is_free()) {
1124 // We only generate output for non-empty regions.
1125 } else if (hr->is_starts_humongous()) {
1126 if (hr->region_num() == 1) {
1127 // single humongous region
1128 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1129 } else {
1130 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1131 }
1132 } else if (hr->is_continues_humongous()) {
1133 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1134 } else if (hr->is_old()) {
1135 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1136 } else {
1137 ShouldNotReachHere();
1138 }
1139 return false;
1140 }
1141
1142 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1143 : _hr_printer(hr_printer) { }
1144 };
1145
1146 void G1CollectedHeap::print_hrm_post_compaction() {
1147 PostCompactionPrinterClosure cl(hr_printer());
1148 heap_region_iterate(&cl);
1149 }
1150
1151 bool G1CollectedHeap::do_collection(bool explicit_gc,
1152 bool clear_all_soft_refs,
1153 size_t word_size) {
1707 CollectedHeap(),
1708 _g1_policy(policy_),
1709 _dirty_card_queue_set(false),
1710 _into_cset_dirty_card_queue_set(false),
1711 _is_alive_closure_cm(this),
1712 _is_alive_closure_stw(this),
1713 _ref_processor_cm(NULL),
1714 _ref_processor_stw(NULL),
1715 _bot_shared(NULL),
1716 _evac_failure_scan_stack(NULL),
1717 _mark_in_progress(false),
1718 _cg1r(NULL),
1719 _g1mm(NULL),
1720 _refine_cte_cl(NULL),
1721 _full_collection(false),
1722 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1723 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1724 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1725 _humongous_reclaim_candidates(),
1726 _has_humongous_reclaim_candidates(false),
1727 _free_regions_coming(false),
1728 _young_list(new YoungList(this)),
1729 _gc_time_stamp(0),
1730 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1731 _old_plab_stats(OldPLABSize, PLABWeight),
1732 _expand_heap_after_alloc_failure(true),
1733 _surviving_young_words(NULL),
1734 _old_marking_cycles_started(0),
1735 _old_marking_cycles_completed(0),
1736 _concurrent_cycle_started(false),
1737 _heap_summary_sent(false),
1738 _in_cset_fast_test(),
1739 _dirty_cards_region_list(NULL),
1740 _worker_cset_start_region(NULL),
1741 _worker_cset_start_region_time_stamp(NULL),
1742 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1743 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1744 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1745 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1746
1747 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
1748 /* are_GC_task_threads */true,
1749 /* are_ConcurrentGC_threads */false);
1750 _workers->initialize_workers();
1751
1752 _allocator = G1Allocator::create_allocator(this);
1753 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1754
1755 int n_queues = (int)ParallelGCThreads;
1756 _task_queues = new RefToScanQueueSet(n_queues);
1757
1758 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1759 assert(n_rem_sets > 0, "Invariant.");
1760
1761 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1762 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1763 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1764
1765 for (int i = 0; i < n_queues; i++) {
1766 RefToScanQueue* q = new RefToScanQueue();
1767 q->initialize();
1768 _task_queues->register_queue(i, q);
1769 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1770 }
1771 clear_cset_start_regions();
1772
1773 // Initialize the G1EvacuationFailureALot counters and flags.
2148 DirtyCardQueue* into_cset_dcq,
2149 bool concurrent,
2150 uint worker_i) {
2151 // Clean cards in the hot card cache
2152 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2153 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2154
2155 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2156 size_t n_completed_buffers = 0;
2157 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2158 n_completed_buffers++;
2159 }
2160 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2161 dcqs.clear_n_completed_buffers();
2162 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2163 }
2164
2165
2166 // Computes the sum of the storage used by the various regions.
2167 size_t G1CollectedHeap::used() const {
2168 return _allocator->used();
2169 }
2170
2171 size_t G1CollectedHeap::used_unlocked() const {
2172 return _allocator->used_unlocked();
2173 }
2174
2175 class SumUsedClosure: public HeapRegionClosure {
2176 size_t _used;
2177 public:
2178 SumUsedClosure() : _used(0) {}
2179 bool doHeapRegion(HeapRegion* r) {
2180 if (!r->is_continues_humongous()) {
2181 _used += r->used();
2182 }
2183 return false;
2184 }
2185 size_t result() { return _used; }
2186 };
2187
2188 size_t G1CollectedHeap::recalculate_used() const {
2576 HeapRegion* next = cur->next_in_collection_set();
2577 if (cl->doHeapRegion(cur) && false) {
2578 cl->incomplete();
2579 return;
2580 }
2581 cur = next;
2582 }
2583 cur = g1_policy()->collection_set();
2584 while (cur != r) {
2585 HeapRegion* next = cur->next_in_collection_set();
2586 if (cl->doHeapRegion(cur) && false) {
2587 cl->incomplete();
2588 return;
2589 }
2590 cur = next;
2591 }
2592 }
2593
2594 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2595 HeapRegion* result = _hrm.next_region_in_heap(from);
2596 while (result != NULL && result->is_humongous()) {
2597 result = _hrm.next_region_in_heap(result);
2598 }
2599 return result;
2600 }
2601
2602 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2603 HeapRegion* hr = heap_region_containing(addr);
2604 return hr->block_start(addr);
2605 }
2606
2607 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2608 HeapRegion* hr = heap_region_containing(addr);
2609 return hr->block_size(addr);
2610 }
2611
2612 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2613 HeapRegion* hr = heap_region_containing(addr);
2614 return hr->block_is_obj(addr);
2615 }
2616
2884 // then verify that the marking information agrees.
2885 // Note we can't verify the contra-positive of the
2886 // above: if the object is dead (according to the mark
2887 // word), it may not be marked, or may have been marked
2888 // but has since became dead, or may have been allocated
2889 // since the last marking.
2890 if (_vo == VerifyOption_G1UseMarkWord) {
2891 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
2892 }
2893
2894 o->oop_iterate_no_header(&isLive);
2895 if (!_hr->obj_allocated_since_prev_marking(o)) {
2896 size_t obj_size = o->size(); // Make sure we don't overflow
2897 _live_bytes += (obj_size * HeapWordSize);
2898 }
2899 }
2900 }
2901 size_t live_bytes() { return _live_bytes; }
2902 };
2903
2904 class VerifyRegionClosure: public HeapRegionClosure {
2905 private:
2906 bool _par;
2907 VerifyOption _vo;
2908 bool _failures;
2909 public:
2910 // _vo == UsePrevMarking -> use "prev" marking information,
2911 // _vo == UseNextMarking -> use "next" marking information,
2912 // _vo == UseMarkWord -> use mark word from object header.
2913 VerifyRegionClosure(bool par, VerifyOption vo)
2914 : _par(par),
2915 _vo(vo),
2916 _failures(false) {}
2917
2918 bool failures() {
2919 return _failures;
2920 }
2921
2922 bool doHeapRegion(HeapRegion* r) {
2923 if (!r->is_continues_humongous()) {
2924 bool failures = false;
2925 r->verify(_vo, &failures);
2926 if (failures) {
2927 _failures = true;
2928 } else {
2929 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
2930 r->object_iterate(¬_dead_yet_cl);
2931 if (_vo != VerifyOption_G1UseNextMarking) {
2932 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
2933 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
2934 "max_live_bytes "SIZE_FORMAT" "
2935 "< calculated "SIZE_FORMAT,
2936 p2i(r->bottom()), p2i(r->end()),
2937 r->max_live_bytes(),
2938 not_dead_yet_cl.live_bytes());
2939 _failures = true;
2940 }
2941 } else {
2942 // When vo == UseNextMarking we cannot currently do a sanity
3087 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3088 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3089 }
3090
3091 class PrintRegionClosure: public HeapRegionClosure {
3092 outputStream* _st;
3093 public:
3094 PrintRegionClosure(outputStream* st) : _st(st) {}
3095 bool doHeapRegion(HeapRegion* r) {
3096 r->print_on(_st);
3097 return false;
3098 }
3099 };
3100
3101 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3102 const HeapRegion* hr,
3103 const VerifyOption vo) const {
3104 switch (vo) {
3105 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3106 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3107 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3108 default: ShouldNotReachHere();
3109 }
3110 return false; // keep some compilers happy
3111 }
3112
3113 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3114 const VerifyOption vo) const {
3115 switch (vo) {
3116 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3117 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3118 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked();
3119 default: ShouldNotReachHere();
3120 }
3121 return false; // keep some compilers happy
3122 }
3123
3124 void G1CollectedHeap::print_on(outputStream* st) const {
3125 st->print(" %-20s", "garbage-first heap");
3126 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3127 capacity()/K, used_unlocked()/K);
3128 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
3129 p2i(_hrm.reserved().start()),
3130 p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
3131 p2i(_hrm.reserved().end()));
3132 st->cr();
3133 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3134 uint young_regions = _young_list->length();
3135 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3136 (size_t) young_regions * HeapRegion::GrainBytes / K);
3137 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3138 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3139 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3140 st->cr();
3141 MetaspaceAux::print_on(st);
3142 }
3143
3144 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3145 print_on(st);
3146
3147 // Print the per-region information.
3148 st->cr();
3149 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3150 "HS=humongous(starts), HC=humongous(continues), "
3151 "CS=collection set, F=free, TS=gc time stamp, "
3152 "PTAMS=previous top-at-mark-start, "
3153 "NTAMS=next top-at-mark-start)");
3154 PrintRegionClosure blk(st);
3155 heap_region_iterate(&blk);
3156 }
3157
3158 void G1CollectedHeap::print_on_error(outputStream* st) const {
3159 this->CollectedHeap::print_on_error(st);
3160
3161 if (_cm != NULL) {
3162 st->cr();
3163 _cm->print_on_error(st);
3164 }
3165 }
3166
3167 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3168 workers()->print_worker_threads_on(st);
3169 _cmThread->print_on(st);
3170 st->cr();
3171 _cm->print_worker_threads_on(st);
3830 // Don't check the whole heap at this point as the
3831 // GC alloc regions from this pause have been tagged
3832 // as survivors and moved on to the survivor list.
3833 // Survivor regions will fail the !is_young() check.
3834 assert(check_young_list_empty(false /* check_heap */),
3835 "young list should be empty");
3836
3837 #if YOUNG_LIST_VERBOSE
3838 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3839 _young_list->print();
3840 #endif // YOUNG_LIST_VERBOSE
3841
3842 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3843 _young_list->first_survivor_region(),
3844 _young_list->last_survivor_region());
3845
3846 _young_list->reset_auxilary_lists();
3847
3848 if (evacuation_failed()) {
3849 _allocator->set_used(recalculate_used());
3850 for (uint i = 0; i < ParallelGCThreads; i++) {
3851 if (_evacuation_failed_info_array[i].has_failed()) {
3852 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3853 }
3854 }
3855 } else {
3856 // The "used" of the the collection set have already been subtracted
3857 // when they were freed. Add in the bytes evacuated.
3858 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3859 }
3860
3861 if (g1_policy()->during_initial_mark_pause()) {
3862 // We have to do this before we notify the CM threads that
3863 // they can start working to make sure that all the
3864 // appropriate initialization is done on the CM object.
3865 concurrent_mark()->checkpointRootsInitialPost();
3866 set_marking_started();
3867 // Note that we don't actually trigger the CM thread at
3868 // this point. We do that later when we're sure that
3869 // the current thread has completed its logging output.
6093 }
6094
6095 return ret;
6096 }
6097
6098 class TearDownRegionSetsClosure : public HeapRegionClosure {
6099 private:
6100 HeapRegionSet *_old_set;
6101
6102 public:
6103 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6104
6105 bool doHeapRegion(HeapRegion* r) {
6106 if (r->is_old()) {
6107 _old_set->remove(r);
6108 } else {
6109 // We ignore free regions, we'll empty the free list afterwards.
6110 // We ignore young regions, we'll empty the young list afterwards.
6111 // We ignore humongous regions, we're not tearing down the
6112 // humongous regions set.
6113 assert(r->is_free() || r->is_young() || r->is_humongous(),
6114 "it cannot be another type");
6115 }
6116 return false;
6117 }
6118
6119 ~TearDownRegionSetsClosure() {
6120 assert(_old_set->is_empty(), "post-condition");
6121 }
6122 };
6123
6124 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6125 assert_at_safepoint(true /* should_be_vm_thread */);
6126
6127 if (!free_list_only) {
6128 TearDownRegionSetsClosure cl(&_old_set);
6129 heap_region_iterate(&cl);
6130
6131 // Note that emptying the _young_list is postponed and instead done as
6132 // the first step when rebuilding the regions sets again. The reason for
6133 // this is that during a full GC string deduplication needs to know if
6151 assert(_hrm->num_free_regions() == 0, "pre-condition");
6152 if (!free_list_only) {
6153 assert(_old_set->is_empty(), "pre-condition");
6154 }
6155 }
6156
6157 bool doHeapRegion(HeapRegion* r) {
6158 if (r->is_continues_humongous()) {
6159 return false;
6160 }
6161
6162 if (r->is_empty()) {
6163 // Add free regions to the free list
6164 r->set_free();
6165 r->set_allocation_context(AllocationContext::system());
6166 _hrm->insert_into_free_list(r);
6167 } else if (!_free_list_only) {
6168 assert(!r->is_young(), "we should not come across young regions");
6169
6170 if (r->is_humongous()) {
6171 // We ignore humongous regions, we left the humongous set unchanged
6172 } else {
6173 // Objects that were compacted would have ended up on regions
6174 // that were previously old or free.
6175 assert(r->is_free() || r->is_old(), "invariant");
6176 // We now consider them old, so register as such.
6177 r->set_old();
6178 _old_set->add(r);
6179 }
6180 _total_used += r->used();
6181 }
6182
6183 return false;
6184 }
6185
6186 size_t total_used() {
6187 return _total_used;
6188 }
6189 };
6190
6191 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6192 assert_at_safepoint(true /* should_be_vm_thread */);
6193
6194 if (!free_list_only) {
6195 _young_list->empty_list();
6196 }
6197
6198 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6199 heap_region_iterate(&cl);
6200
6201 if (!free_list_only) {
6202 _allocator->set_used(cl.total_used());
6203 }
6204 assert(_allocator->used_unlocked() == recalculate_used(),
6205 err_msg("inconsistent _allocator->used_unlocked(), "
6206 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6207 _allocator->used_unlocked(), recalculate_used()));
6208 }
6209
6210 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6211 _refine_cte_cl->set_concurrent(concurrent);
6212 }
6213
6214 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6215 HeapRegion* hr = heap_region_containing(p);
6216 return hr->is_in(p);
6217 }
6218
6219 // Methods for the mutator alloc region
6220
6221 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6222 bool force) {
6283 return new_alloc_region;
6284 }
6285 }
6286 return NULL;
6287 }
6288
6289 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6290 size_t allocated_bytes,
6291 InCSetState dest) {
6292 bool during_im = g1_policy()->during_initial_mark_pause();
6293 alloc_region->note_end_of_copying(during_im);
6294 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6295 if (dest.is_young()) {
6296 young_list()->add_survivor_region(alloc_region);
6297 } else {
6298 _old_set.add(alloc_region);
6299 }
6300 _hr_printer.retire(alloc_region);
6301 }
6302
6303 // Heap region set verification
6304
6305 class VerifyRegionListsClosure : public HeapRegionClosure {
6306 private:
6307 HeapRegionSet* _old_set;
6308 HeapRegionSet* _humongous_set;
6309 HeapRegionManager* _hrm;
6310
6311 public:
6312 HeapRegionSetCount _old_count;
6313 HeapRegionSetCount _humongous_count;
6314 HeapRegionSetCount _free_count;
6315
6316 VerifyRegionListsClosure(HeapRegionSet* old_set,
6317 HeapRegionSet* humongous_set,
6318 HeapRegionManager* hrm) :
6319 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6320 _old_count(), _humongous_count(), _free_count(){ }
6321
6322 bool doHeapRegion(HeapRegion* hr) {
6323 if (hr->is_continues_humongous()) {
6324 return false;
6325 }
6326
6327 if (hr->is_young()) {
6328 // TODO
6329 } else if (hr->is_starts_humongous()) {
6330 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6331 _humongous_count.increment(1u, hr->capacity());
6332 } else if (hr->is_empty()) {
6333 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6334 _free_count.increment(1u, hr->capacity());
6335 } else if (hr->is_old()) {
6336 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6337 _old_count.increment(1u, hr->capacity());
6338 } else {
6339 ShouldNotReachHere();
6340 }
6341 return false;
6342 }
6343
6344 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6345 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6346 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6347 old_set->total_capacity_bytes(), _old_count.capacity()));
6348
6349 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6350 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6351 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6352
6353 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6354 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6355 free_list->total_capacity_bytes(), _free_count.capacity()));
6356 }
6357 };
6358
|
387 if (head == NULL) {
388 return NULL;
389 }
390 HeapRegion* new_head = head->get_next_dirty_cards_region();
391 if (head == new_head) {
392 // The last region.
393 new_head = NULL;
394 }
395 hr = (HeapRegion*)Atomic::cmpxchg_ptr(new_head, &_dirty_cards_region_list,
396 head);
397 } while (hr != head);
398 assert(hr != NULL, "invariant");
399 hr->set_next_dirty_cards_region(NULL);
400 return hr;
401 }
402
403 // Returns true if the reference points to an object that
404 // can move in an incremental collection.
405 bool G1CollectedHeap::is_scavengable(const void* p) {
406 HeapRegion* hr = heap_region_containing(p);
407 return !hr->is_pinned();
408 }
409
410 // Private methods.
411
412 HeapRegion*
413 G1CollectedHeap::new_region_try_secondary_free_list(bool is_old) {
414 MutexLockerEx x(SecondaryFreeList_lock, Mutex::_no_safepoint_check_flag);
415 while (!_secondary_free_list.is_empty() || free_regions_coming()) {
416 if (!_secondary_free_list.is_empty()) {
417 if (G1ConcRegionFreeingVerbose) {
418 gclog_or_tty->print_cr("G1ConcRegionFreeing [region alloc] : "
419 "secondary_free_list has %u entries",
420 _secondary_free_list.length());
421 }
422 // It looks as if there are free regions available on the
423 // secondary_free_list. Let's move them to the free_list and try
424 // again to allocate from it.
425 append_secondary_free_list();
426
427 assert(_hrm.num_free_regions() > 0, "if the secondary_free_list was not "
890 // follow-on attempt will be at the start of the next loop
891 // iteration (after taking the Heap_lock).
892 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
893 false /* bot_updates */);
894 if (result != NULL) {
895 return result;
896 }
897
898 // Give a warning if we seem to be looping forever.
899 if ((QueuedAllocationWarningCount > 0) &&
900 (try_count % QueuedAllocationWarningCount == 0)) {
901 warning("G1CollectedHeap::attempt_allocation_slow() "
902 "retries %d times", try_count);
903 }
904 }
905
906 ShouldNotReachHere();
907 return NULL;
908 }
909
910 void G1CollectedHeap::begin_record_alloc_range() {
911 assert_at_safepoint(true /* should_be_vm_thread */);
912 if (_recording_allocator == NULL) {
913 _recording_allocator = G1RecordingAllocator::create_allocator(this);
914 }
915 }
916
917 bool G1CollectedHeap::is_record_alloc_too_large(size_t word_size) {
918 // Check whether the size would be considered humongous for a minimum-sized region.
919 return word_size >= humongous_threshold_for(HeapRegion::min_region_size_in_words());
920 }
921
922 HeapWord* G1CollectedHeap::record_mem_allocate(size_t word_size) {
923 assert_at_safepoint(true /* should_be_vm_thread */);
924 assert(_recording_allocator != NULL, "_recording_allocator not initialized");
925
926 // Return NULL if the size would be considered humongous for a minimum-sized region.
927 // Otherwise, attempt to perform the allocation in the recorded space.
928 if (is_record_alloc_too_large(word_size)) {
929 return NULL;
930 }
931 return _recording_allocator->record_mem_allocate(word_size);
932 }
933
934 void G1CollectedHeap::end_record_alloc_range(GrowableArray<MemRegion>* ranges,
935 uint end_alignment) {
936 assert_at_safepoint(true /* should_be_vm_thread */);
937 assert(_recording_allocator != NULL, "recording allocator uninitialized");
938
939 // Call complete_recording to do the real work, filling in the MemRegion
940 // array with the recorded regions.
941 _recording_allocator->complete_recording(ranges, end_alignment);
942 }
943
944 void G1CollectedHeap::fill_with_non_humongous_objects(HeapWord* base_address, size_t word_size) {
945 // Create filler objects for the specified range, being careful not to
946 // create any humongous objects.
947 if (!is_humongous(word_size)) {
948 CollectedHeap::fill_with_object(base_address, word_size);
949 } else {
950 size_t remainder = word_size;
951 size_t increment = humongous_threshold_for(HeapRegion::GrainWords) / 2;
952 HeapWord* fill_top = base_address;
953 // Don't let remainder get smaller than the minimum filler object size.
954 while ((remainder > increment) && (remainder - increment >= min_fill_size())) {
955 CollectedHeap::fill_with_object(fill_top, increment);
956 fill_top += increment;
957 remainder -= increment;
958 }
959 if (remainder != 0) {
960 CollectedHeap::fill_with_object(fill_top, remainder);
961 }
962 }
963 }
964
965 bool
966 G1CollectedHeap::check_archive_addresses(MemRegion* ranges, uint count) {
967 MemRegion mr = _hrm.reserved();
968 for (uint i = 0; i < count; i++) {
969 if (!mr.contains(ranges[i].start()) || !mr.contains(ranges[i].last())) {
970 return false;
971 }
972 }
973 return true;
974 }
975
976 bool
977 G1CollectedHeap::alloc_archive_regions(MemRegion* ranges, uint count) {
978 MutexLockerEx x(Heap_lock);
979
980 MemRegion mr = _hrm.reserved();
981 HeapWord* prev_end_addr = NULL;
982 uint prev_end_index = 0;
983
984 // Temporarily disable pretouching of heap pages. This interface is used
985 // when mmap'ing archived heap data in, so pre-touching is wasted.
986 FlagSetting fs(AlwaysPreTouch, false);
987
988 // Enable archive object checking in G1MarkSweep. We have to let it know
989 // about each archive range, so that objects in those ranges aren't marked.
990 G1MarkSweep::enable_archive_object_check();
991
992 // For each specified MemRegion range, allocate the corresponding G1
993 // regions and mark them as archive regions.
994 // We expect the ranges in ascending order, without overlap.
995 for (uint i = 0; i < count; i++) {
996 HeapWord* base_address = ranges[i].start();
997 size_t word_size = ranges[i].word_size();
998 HeapWord* end_address = ranges[i].last();
999
1000 assert((base_address > prev_end_addr) && (base_address < end_address),
1001 "invalid range specification");
1002
1003 prev_end_addr = end_address;
1004 uint start_index = _hrm.addr_to_index(base_address);
1005 uint end_index = _hrm.addr_to_index(end_address);
1006
1007 // Check for ranges that begin/end in the same G1 region
1008 // as as the previous range.
1009 if (start_index == prev_end_index) {
1010 if (end_index == prev_end_index) {
1011 break;
1012 }
1013 start_index++;
1014 }
1015 prev_end_index = end_index;
1016
1017 // Ensure that each contained G1 region is available and free,
1018 // returning false if not.
1019 for (uint curr_index = start_index; curr_index <= end_index; curr_index++) {
1020 HeapRegion* curr_region;
1021 if ((curr_region = _hrm.at_or_null(curr_index)) == NULL) {
1022 ergo_verbose1(ErgoHeapSizing,
1023 "attempt heap expansion",
1024 ergo_format_reason("pinning region")
1025 ergo_format_byte("region size"),
1026 HeapRegion::GrainWords * HeapWordSize);
1027 _hrm.expand_at(curr_index, 1);
1028 } else {
1029 if (!curr_region->is_free()) {
1030 return false;
1031 }
1032 }
1033 }
1034
1035 _hrm.allocate_free_regions_starting_at(start_index, (end_index - start_index) + 1);
1036 _allocator->increase_used(word_size * HeapWordSize);
1037
1038 // Mark each G1 region touched by the range as archive, add it to the old set, and set
1039 // the allocation context and top.
1040 for (uint i = start_index; i <= end_index; i++) {
1041 HeapRegion* curr = region_at(i);
1042 assert(curr->is_empty() && !curr->is_pinned(), "Invalid MemRegion");
1043 _hr_printer.alloc(curr, G1HRPrinter::Archive);
1044 curr->set_allocation_context(AllocationContext::system());
1045 if (i != end_index) {
1046 curr->set_top(curr->end());
1047 } else {
1048 curr->set_top(end_address + 1);
1049 }
1050 curr->set_archive();
1051 _old_set.add(curr);
1052 }
1053
1054 // Notify mark-sweep of the archive range.
1055 G1MarkSweep::mark_range_archive(base_address, end_address);
1056 }
1057 return true;
1058 }
1059
1060 void
1061 G1CollectedHeap::fill_archive_regions(MemRegion* ranges, uint count) {
1062
1063 MemRegion mr = _hrm.reserved();
1064 HeapWord *prev_end_addr = NULL;
1065 uint prev_end_index = 0;
1066
1067 // For each MemRegion, create filler objects, if needed, in the G1 regions
1068 // that contain the address range. The address range actually within the
1069 // MemRegion will not be modified. That is assumed to have been initialized
1070 // elsewhere, probably via an mmap of archived heap data.
1071 MutexLockerEx x(Heap_lock);
1072 for (uint i = 0; i < count; i++) {
1073 HeapWord* base_address = ranges[i].start();
1074 size_t word_size = ranges[i].word_size();
1075 HeapWord* end_address = ranges[i].last();
1076
1077 assert(mr.contains(base_address) && mr.contains(end_address),
1078 "MemRegion outside of heap");
1079
1080 uint start_index = _hrm.addr_to_index(base_address);
1081 uint end_index = _hrm.addr_to_index(end_address);
1082 HeapRegion* start_region = _hrm.addr_to_region(base_address);
1083 HeapRegion* end_region = _hrm.addr_to_region(end_address);
1084 HeapWord* bottom_address = start_region->bottom();
1085
1086 // Check for a range beginning in the same region in which the
1087 // previous one ended.
1088 if (start_index == prev_end_index) {
1089 bottom_address = prev_end_addr;
1090 start_index++;
1091 }
1092
1093 #ifdef ASSERT
1094 // Verify the regions were all marked as archive regions by
1095 // alloc_fixed_ranges.
1096 for (uint i = start_index; i <= end_index; i++) {
1097 HeapRegion* curr = region_at(i);
1098 assert(curr->is_archive(), "Invalid range in fill_archive_regions");
1099 }
1100 #endif
1101
1102 prev_end_addr = base_address + word_size;
1103 prev_end_index = end_index;
1104
1105 // Fill the low part of the first allocated region with dummy object(s),
1106 // if the region base does not match the range address, or if the previous
1107 // range ended within the same G1 region, and there is a gap.
1108 if (base_address != bottom_address) {
1109 size_t fill_size = base_address - bottom_address;
1110 G1CollectedHeap::fill_with_non_humongous_objects(bottom_address, fill_size);
1111 _allocator->increase_used(fill_size * HeapWordSize);
1112 }
1113 }
1114 }
1115
1116
1117 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
1118 uint* gc_count_before_ret,
1119 uint* gclocker_retry_count_ret) {
1120 // The structure of this method has a lot of similarities to
1121 // attempt_allocation_slow(). The reason these two were not merged
1122 // into a single one is that such a method would require several "if
1123 // allocation is not humongous do this, otherwise do that"
1124 // conditional paths which would obscure its flow. In fact, an early
1125 // version of this code did use a unified method which was harder to
1126 // follow and, as a result, it had subtle bugs that were hard to
1127 // track down. So keeping these two methods separate allows each to
1128 // be more readable. It will be good to keep these two in sync as
1129 // much as possible.
1130
1131 assert_heap_not_locked_and_not_at_safepoint();
1132 assert(is_humongous(word_size), "attempt_allocation_humongous() "
1133 "should only be called for humongous allocations");
1134
1135 // Humongous objects can exhaust the heap quickly, so we should check if we
1136 // need to start a marking cycle at each humongous object allocation. We do
1321 }
1322 };
1323
1324 class PostCompactionPrinterClosure: public HeapRegionClosure {
1325 private:
1326 G1HRPrinter* _hr_printer;
1327 public:
1328 bool doHeapRegion(HeapRegion* hr) {
1329 assert(!hr->is_young(), "not expecting to find young regions");
1330 if (hr->is_free()) {
1331 // We only generate output for non-empty regions.
1332 } else if (hr->is_starts_humongous()) {
1333 if (hr->region_num() == 1) {
1334 // single humongous region
1335 _hr_printer->post_compaction(hr, G1HRPrinter::SingleHumongous);
1336 } else {
1337 _hr_printer->post_compaction(hr, G1HRPrinter::StartsHumongous);
1338 }
1339 } else if (hr->is_continues_humongous()) {
1340 _hr_printer->post_compaction(hr, G1HRPrinter::ContinuesHumongous);
1341 } else if (hr->is_archive()) {
1342 _hr_printer->post_compaction(hr, G1HRPrinter::Archive);
1343 } else if (hr->is_old()) {
1344 _hr_printer->post_compaction(hr, G1HRPrinter::Old);
1345 } else {
1346 ShouldNotReachHere();
1347 }
1348 return false;
1349 }
1350
1351 PostCompactionPrinterClosure(G1HRPrinter* hr_printer)
1352 : _hr_printer(hr_printer) { }
1353 };
1354
1355 void G1CollectedHeap::print_hrm_post_compaction() {
1356 PostCompactionPrinterClosure cl(hr_printer());
1357 heap_region_iterate(&cl);
1358 }
1359
1360 bool G1CollectedHeap::do_collection(bool explicit_gc,
1361 bool clear_all_soft_refs,
1362 size_t word_size) {
1916 CollectedHeap(),
1917 _g1_policy(policy_),
1918 _dirty_card_queue_set(false),
1919 _into_cset_dirty_card_queue_set(false),
1920 _is_alive_closure_cm(this),
1921 _is_alive_closure_stw(this),
1922 _ref_processor_cm(NULL),
1923 _ref_processor_stw(NULL),
1924 _bot_shared(NULL),
1925 _evac_failure_scan_stack(NULL),
1926 _mark_in_progress(false),
1927 _cg1r(NULL),
1928 _g1mm(NULL),
1929 _refine_cte_cl(NULL),
1930 _full_collection(false),
1931 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1932 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1933 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1934 _humongous_reclaim_candidates(),
1935 _has_humongous_reclaim_candidates(false),
1936 _recording_allocator(NULL),
1937 _free_regions_coming(false),
1938 _young_list(new YoungList(this)),
1939 _gc_time_stamp(0),
1940 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1941 _old_plab_stats(OldPLABSize, PLABWeight),
1942 _expand_heap_after_alloc_failure(true),
1943 _surviving_young_words(NULL),
1944 _old_marking_cycles_started(0),
1945 _old_marking_cycles_completed(0),
1946 _concurrent_cycle_started(false),
1947 _heap_summary_sent(false),
1948 _in_cset_fast_test(),
1949 _dirty_cards_region_list(NULL),
1950 _worker_cset_start_region(NULL),
1951 _worker_cset_start_region_time_stamp(NULL),
1952 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1953 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1954 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1955 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1956
1957 _workers = new FlexibleWorkGang("GC Thread", ParallelGCThreads,
1958 /* are_GC_task_threads */true,
1959 /* are_ConcurrentGC_threads */false);
1960 _workers->initialize_workers();
1961
1962 _allocator = G1Allocator::create_allocator(this);
1963 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1964
1965 int n_queues = (int)ParallelGCThreads;
1966 _task_queues = new RefToScanQueueSet(n_queues);
1967
1968 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1969 assert(n_rem_sets > 0, "Invariant.");
1970
1971 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1972 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1973 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1974
1975 for (int i = 0; i < n_queues; i++) {
1976 RefToScanQueue* q = new RefToScanQueue();
1977 q->initialize();
1978 _task_queues->register_queue(i, q);
1979 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1980 }
1981 clear_cset_start_regions();
1982
1983 // Initialize the G1EvacuationFailureALot counters and flags.
2358 DirtyCardQueue* into_cset_dcq,
2359 bool concurrent,
2360 uint worker_i) {
2361 // Clean cards in the hot card cache
2362 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2363 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2364
2365 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2366 size_t n_completed_buffers = 0;
2367 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2368 n_completed_buffers++;
2369 }
2370 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers);
2371 dcqs.clear_n_completed_buffers();
2372 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2373 }
2374
2375
2376 // Computes the sum of the storage used by the various regions.
2377 size_t G1CollectedHeap::used() const {
2378 size_t result = _allocator->used();
2379 if (_recording_allocator != NULL) {
2380 result += _recording_allocator->used();
2381 }
2382 return result;
2383 }
2384
2385 size_t G1CollectedHeap::used_unlocked() const {
2386 return _allocator->used_unlocked();
2387 }
2388
2389 class SumUsedClosure: public HeapRegionClosure {
2390 size_t _used;
2391 public:
2392 SumUsedClosure() : _used(0) {}
2393 bool doHeapRegion(HeapRegion* r) {
2394 if (!r->is_continues_humongous()) {
2395 _used += r->used();
2396 }
2397 return false;
2398 }
2399 size_t result() { return _used; }
2400 };
2401
2402 size_t G1CollectedHeap::recalculate_used() const {
2790 HeapRegion* next = cur->next_in_collection_set();
2791 if (cl->doHeapRegion(cur) && false) {
2792 cl->incomplete();
2793 return;
2794 }
2795 cur = next;
2796 }
2797 cur = g1_policy()->collection_set();
2798 while (cur != r) {
2799 HeapRegion* next = cur->next_in_collection_set();
2800 if (cl->doHeapRegion(cur) && false) {
2801 cl->incomplete();
2802 return;
2803 }
2804 cur = next;
2805 }
2806 }
2807
2808 HeapRegion* G1CollectedHeap::next_compaction_region(const HeapRegion* from) const {
2809 HeapRegion* result = _hrm.next_region_in_heap(from);
2810 while (result != NULL && result->is_pinned()) {
2811 result = _hrm.next_region_in_heap(result);
2812 }
2813 return result;
2814 }
2815
2816 HeapWord* G1CollectedHeap::block_start(const void* addr) const {
2817 HeapRegion* hr = heap_region_containing(addr);
2818 return hr->block_start(addr);
2819 }
2820
2821 size_t G1CollectedHeap::block_size(const HeapWord* addr) const {
2822 HeapRegion* hr = heap_region_containing(addr);
2823 return hr->block_size(addr);
2824 }
2825
2826 bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
2827 HeapRegion* hr = heap_region_containing(addr);
2828 return hr->block_is_obj(addr);
2829 }
2830
3098 // then verify that the marking information agrees.
3099 // Note we can't verify the contra-positive of the
3100 // above: if the object is dead (according to the mark
3101 // word), it may not be marked, or may have been marked
3102 // but has since became dead, or may have been allocated
3103 // since the last marking.
3104 if (_vo == VerifyOption_G1UseMarkWord) {
3105 guarantee(!_g1h->is_obj_dead(o), "mark word and concurrent mark mismatch");
3106 }
3107
3108 o->oop_iterate_no_header(&isLive);
3109 if (!_hr->obj_allocated_since_prev_marking(o)) {
3110 size_t obj_size = o->size(); // Make sure we don't overflow
3111 _live_bytes += (obj_size * HeapWordSize);
3112 }
3113 }
3114 }
3115 size_t live_bytes() { return _live_bytes; }
3116 };
3117
3118
3119 class VerifyArchiveOopClosure: public OopClosure {
3120 public:
3121 VerifyArchiveOopClosure(HeapRegion *hr) { }
3122 void do_oop(narrowOop *p) { do_oop_work(p); }
3123 void do_oop( oop *p) { do_oop_work(p); }
3124
3125 template <class T> void do_oop_work(T *p) {
3126 oop obj = oopDesc::load_decode_heap_oop(p);
3127 guarantee(obj == NULL || G1MarkSweep::in_archive_range(obj),
3128 "Archive object references a non-pinned object");
3129 }
3130 };
3131
3132 class VerifyArchiveRegionClosure: public ObjectClosure {
3133 public:
3134 VerifyArchiveRegionClosure(HeapRegion *hr) { }
3135 // Verify that all object pointers are to pinned regions.
3136 void do_object(oop o) {
3137 VerifyArchiveOopClosure checkOop(NULL);
3138 assert(o != NULL, "Should not be here for NULL oops");
3139 o->oop_iterate_no_header(&checkOop);
3140 }
3141 };
3142
3143
3144 class VerifyRegionClosure: public HeapRegionClosure {
3145 private:
3146 bool _par;
3147 VerifyOption _vo;
3148 bool _failures;
3149 public:
3150 // _vo == UsePrevMarking -> use "prev" marking information,
3151 // _vo == UseNextMarking -> use "next" marking information,
3152 // _vo == UseMarkWord -> use mark word from object header.
3153 VerifyRegionClosure(bool par, VerifyOption vo)
3154 : _par(par),
3155 _vo(vo),
3156 _failures(false) {}
3157
3158 bool failures() {
3159 return _failures;
3160 }
3161
3162 bool doHeapRegion(HeapRegion* r) {
3163 // For archive regions, verify there are no heap pointers to
3164 // non-pinned regions. For all others, verify liveness info.
3165 if (r->is_archive()) {
3166 VerifyArchiveRegionClosure verify_oop_pointers(r);
3167 r->object_iterate(&verify_oop_pointers);
3168 return true;
3169 }
3170 if (!r->is_continues_humongous()) {
3171 bool failures = false;
3172 r->verify(_vo, &failures);
3173 if (failures) {
3174 _failures = true;
3175 } else {
3176 VerifyObjsInRegionClosure not_dead_yet_cl(r, _vo);
3177 r->object_iterate(¬_dead_yet_cl);
3178 if (_vo != VerifyOption_G1UseNextMarking) {
3179 if (r->max_live_bytes() < not_dead_yet_cl.live_bytes()) {
3180 gclog_or_tty->print_cr("["PTR_FORMAT","PTR_FORMAT"] "
3181 "max_live_bytes "SIZE_FORMAT" "
3182 "< calculated "SIZE_FORMAT,
3183 p2i(r->bottom()), p2i(r->end()),
3184 r->max_live_bytes(),
3185 not_dead_yet_cl.live_bytes());
3186 _failures = true;
3187 }
3188 } else {
3189 // When vo == UseNextMarking we cannot currently do a sanity
3334 double verify_time_ms = verify(VerifyAfterGC, " VerifyAfterGC:");
3335 g1_policy()->phase_times()->record_verify_after_time_ms(verify_time_ms);
3336 }
3337
3338 class PrintRegionClosure: public HeapRegionClosure {
3339 outputStream* _st;
3340 public:
3341 PrintRegionClosure(outputStream* st) : _st(st) {}
3342 bool doHeapRegion(HeapRegion* r) {
3343 r->print_on(_st);
3344 return false;
3345 }
3346 };
3347
3348 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3349 const HeapRegion* hr,
3350 const VerifyOption vo) const {
3351 switch (vo) {
3352 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj, hr);
3353 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj, hr);
3354 case VerifyOption_G1UseMarkWord: return !obj->is_gc_marked() && !hr->is_archive();
3355 default: ShouldNotReachHere();
3356 }
3357 return false; // keep some compilers happy
3358 }
3359
3360 bool G1CollectedHeap::is_obj_dead_cond(const oop obj,
3361 const VerifyOption vo) const {
3362 switch (vo) {
3363 case VerifyOption_G1UsePrevMarking: return is_obj_dead(obj);
3364 case VerifyOption_G1UseNextMarking: return is_obj_ill(obj);
3365 case VerifyOption_G1UseMarkWord: {
3366 HeapRegion* hr = _hrm.addr_to_region((HeapWord*)obj);
3367 return !obj->is_gc_marked() && !hr->is_archive();
3368 }
3369 default: ShouldNotReachHere();
3370 }
3371 return false; // keep some compilers happy
3372 }
3373
3374 void G1CollectedHeap::print_on(outputStream* st) const {
3375 st->print(" %-20s", "garbage-first heap");
3376 st->print(" total " SIZE_FORMAT "K, used " SIZE_FORMAT "K",
3377 capacity()/K, used_unlocked()/K);
3378 st->print(" [" PTR_FORMAT ", " PTR_FORMAT ", " PTR_FORMAT ")",
3379 p2i(_hrm.reserved().start()),
3380 p2i(_hrm.reserved().start() + _hrm.length() + HeapRegion::GrainWords),
3381 p2i(_hrm.reserved().end()));
3382 st->cr();
3383 st->print(" region size " SIZE_FORMAT "K, ", HeapRegion::GrainBytes / K);
3384 uint young_regions = _young_list->length();
3385 st->print("%u young (" SIZE_FORMAT "K), ", young_regions,
3386 (size_t) young_regions * HeapRegion::GrainBytes / K);
3387 uint survivor_regions = g1_policy()->recorded_survivor_regions();
3388 st->print("%u survivors (" SIZE_FORMAT "K)", survivor_regions,
3389 (size_t) survivor_regions * HeapRegion::GrainBytes / K);
3390 st->cr();
3391 MetaspaceAux::print_on(st);
3392 }
3393
3394 void G1CollectedHeap::print_extended_on(outputStream* st) const {
3395 print_on(st);
3396
3397 // Print the per-region information.
3398 st->cr();
3399 st->print_cr("Heap Regions: (Y=young(eden), SU=young(survivor), "
3400 "HS=humongous(starts), HC=humongous(continues), "
3401 "CS=collection set, F=free, A=archive, TS=gc time stamp, "
3402 "PTAMS=previous top-at-mark-start, "
3403 "NTAMS=next top-at-mark-start)");
3404 PrintRegionClosure blk(st);
3405 heap_region_iterate(&blk);
3406 }
3407
3408 void G1CollectedHeap::print_on_error(outputStream* st) const {
3409 this->CollectedHeap::print_on_error(st);
3410
3411 if (_cm != NULL) {
3412 st->cr();
3413 _cm->print_on_error(st);
3414 }
3415 }
3416
3417 void G1CollectedHeap::print_gc_threads_on(outputStream* st) const {
3418 workers()->print_worker_threads_on(st);
3419 _cmThread->print_on(st);
3420 st->cr();
3421 _cm->print_worker_threads_on(st);
4080 // Don't check the whole heap at this point as the
4081 // GC alloc regions from this pause have been tagged
4082 // as survivors and moved on to the survivor list.
4083 // Survivor regions will fail the !is_young() check.
4084 assert(check_young_list_empty(false /* check_heap */),
4085 "young list should be empty");
4086
4087 #if YOUNG_LIST_VERBOSE
4088 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4089 _young_list->print();
4090 #endif // YOUNG_LIST_VERBOSE
4091
4092 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
4093 _young_list->first_survivor_region(),
4094 _young_list->last_survivor_region());
4095
4096 _young_list->reset_auxilary_lists();
4097
4098 if (evacuation_failed()) {
4099 _allocator->set_used(recalculate_used());
4100 if (_recording_allocator != NULL) {
4101 _recording_allocator->clear_used();
4102 }
4103 for (uint i = 0; i < ParallelGCThreads; i++) {
4104 if (_evacuation_failed_info_array[i].has_failed()) {
4105 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
4106 }
4107 }
4108 } else {
4109 // The "used" of the the collection set have already been subtracted
4110 // when they were freed. Add in the bytes evacuated.
4111 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
4112 }
4113
4114 if (g1_policy()->during_initial_mark_pause()) {
4115 // We have to do this before we notify the CM threads that
4116 // they can start working to make sure that all the
4117 // appropriate initialization is done on the CM object.
4118 concurrent_mark()->checkpointRootsInitialPost();
4119 set_marking_started();
4120 // Note that we don't actually trigger the CM thread at
4121 // this point. We do that later when we're sure that
4122 // the current thread has completed its logging output.
6346 }
6347
6348 return ret;
6349 }
6350
6351 class TearDownRegionSetsClosure : public HeapRegionClosure {
6352 private:
6353 HeapRegionSet *_old_set;
6354
6355 public:
6356 TearDownRegionSetsClosure(HeapRegionSet* old_set) : _old_set(old_set) { }
6357
6358 bool doHeapRegion(HeapRegion* r) {
6359 if (r->is_old()) {
6360 _old_set->remove(r);
6361 } else {
6362 // We ignore free regions, we'll empty the free list afterwards.
6363 // We ignore young regions, we'll empty the young list afterwards.
6364 // We ignore humongous regions, we're not tearing down the
6365 // humongous regions set.
6366 // We ignore archive regions.
6367 assert(r->is_free() || r->is_young() || r->is_humongous() || r->is_archive(),
6368 "it cannot be another type");
6369 }
6370 return false;
6371 }
6372
6373 ~TearDownRegionSetsClosure() {
6374 assert(_old_set->is_empty(), "post-condition");
6375 }
6376 };
6377
6378 void G1CollectedHeap::tear_down_region_sets(bool free_list_only) {
6379 assert_at_safepoint(true /* should_be_vm_thread */);
6380
6381 if (!free_list_only) {
6382 TearDownRegionSetsClosure cl(&_old_set);
6383 heap_region_iterate(&cl);
6384
6385 // Note that emptying the _young_list is postponed and instead done as
6386 // the first step when rebuilding the regions sets again. The reason for
6387 // this is that during a full GC string deduplication needs to know if
6405 assert(_hrm->num_free_regions() == 0, "pre-condition");
6406 if (!free_list_only) {
6407 assert(_old_set->is_empty(), "pre-condition");
6408 }
6409 }
6410
6411 bool doHeapRegion(HeapRegion* r) {
6412 if (r->is_continues_humongous()) {
6413 return false;
6414 }
6415
6416 if (r->is_empty()) {
6417 // Add free regions to the free list
6418 r->set_free();
6419 r->set_allocation_context(AllocationContext::system());
6420 _hrm->insert_into_free_list(r);
6421 } else if (!_free_list_only) {
6422 assert(!r->is_young(), "we should not come across young regions");
6423
6424 if (r->is_humongous()) {
6425 // We ignore humongous regions.
6426 // We left the humongous set unchanged,
6427 } else {
6428 // Objects that were compacted would have ended up on regions
6429 // that were previously old or free. Archive regions (which are
6430 // old) will not have been touched.
6431 assert(r->is_free() || r->is_old(), "invariant");
6432 // We now consider them old, so register as such. Leave
6433 // archive regions set that way, however, while still adding
6434 // them to the old set.
6435 if (!r->is_archive()) {
6436 r->set_old();
6437 }
6438 _old_set->add(r);
6439 }
6440 _total_used += r->used();
6441 }
6442
6443 return false;
6444 }
6445
6446 size_t total_used() {
6447 return _total_used;
6448 }
6449 };
6450
6451 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6452 assert_at_safepoint(true /* should_be_vm_thread */);
6453
6454 if (!free_list_only) {
6455 _young_list->empty_list();
6456 }
6457
6458 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6459 heap_region_iterate(&cl);
6460
6461 if (!free_list_only) {
6462 _allocator->set_used(cl.total_used());
6463 if (_recording_allocator != NULL) {
6464 _recording_allocator->clear_used();
6465 }
6466 }
6467 assert(_allocator->used_unlocked() == recalculate_used(),
6468 err_msg("inconsistent _allocator->used_unlocked(), "
6469 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6470 _allocator->used_unlocked(), recalculate_used()));
6471 }
6472
6473 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6474 _refine_cte_cl->set_concurrent(concurrent);
6475 }
6476
6477 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6478 HeapRegion* hr = heap_region_containing(p);
6479 return hr->is_in(p);
6480 }
6481
6482 // Methods for the mutator alloc region
6483
6484 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6485 bool force) {
6546 return new_alloc_region;
6547 }
6548 }
6549 return NULL;
6550 }
6551
6552 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6553 size_t allocated_bytes,
6554 InCSetState dest) {
6555 bool during_im = g1_policy()->during_initial_mark_pause();
6556 alloc_region->note_end_of_copying(during_im);
6557 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6558 if (dest.is_young()) {
6559 young_list()->add_survivor_region(alloc_region);
6560 } else {
6561 _old_set.add(alloc_region);
6562 }
6563 _hr_printer.retire(alloc_region);
6564 }
6565
6566 HeapRegion* G1CollectedHeap::alloc_highest_available_region() {
6567 bool expanded = false;
6568 uint index = _hrm.find_highest_available(&expanded);
6569
6570 if (index != G1_NO_HRM_INDEX) {
6571 if (expanded) {
6572 ergo_verbose1(ErgoHeapSizing,
6573 "attempt heap expansion",
6574 ergo_format_reason("requested address range outside heap bounds")
6575 ergo_format_byte("region size"),
6576 HeapRegion::GrainWords * HeapWordSize);
6577 }
6578 _hrm.allocate_free_regions_starting_at(index, 1);
6579 return region_at(index);
6580 }
6581 return NULL;
6582 }
6583
6584
6585 // Heap region set verification
6586
6587 class VerifyRegionListsClosure : public HeapRegionClosure {
6588 private:
6589 HeapRegionSet* _old_set;
6590 HeapRegionSet* _humongous_set;
6591 HeapRegionManager* _hrm;
6592
6593 public:
6594 HeapRegionSetCount _old_count;
6595 HeapRegionSetCount _humongous_count;
6596 HeapRegionSetCount _free_count;
6597
6598 VerifyRegionListsClosure(HeapRegionSet* old_set,
6599 HeapRegionSet* humongous_set,
6600 HeapRegionManager* hrm) :
6601 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6602 _old_count(), _humongous_count(), _free_count(){ }
6603
6604 bool doHeapRegion(HeapRegion* hr) {
6605 if (hr->is_continues_humongous()) {
6606 return false;
6607 }
6608
6609 if (hr->is_young()) {
6610 // TODO
6611 } else if (hr->is_starts_humongous()) {
6612 assert(hr->containing_set() == _humongous_set, err_msg("Heap region %u is starts humongous but not in humongous set.", hr->hrm_index()));
6613 _humongous_count.increment(1u, hr->capacity());
6614 } else if (hr->is_empty()) {
6615 assert(_hrm->is_free(hr), err_msg("Heap region %u is empty but not on the free list.", hr->hrm_index()));
6616 _free_count.increment(1u, hr->capacity());
6617 } else if (hr->is_old()) {
6618 assert(hr->containing_set() == _old_set, err_msg("Heap region %u is old but not in the old set.", hr->hrm_index()));
6619 _old_count.increment(1u, hr->capacity());
6620 } else {
6621 assert(!hr->is_pinned(), err_msg("Heap region %u is pinned but not old (archive) or humongous.", hr->hrm_index()));
6622 ShouldNotReachHere();
6623 }
6624 return false;
6625 }
6626
6627 void verify_counts(HeapRegionSet* old_set, HeapRegionSet* humongous_set, HeapRegionManager* free_list) {
6628 guarantee(old_set->length() == _old_count.length(), err_msg("Old set count mismatch. Expected %u, actual %u.", old_set->length(), _old_count.length()));
6629 guarantee(old_set->total_capacity_bytes() == _old_count.capacity(), err_msg("Old set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6630 old_set->total_capacity_bytes(), _old_count.capacity()));
6631
6632 guarantee(humongous_set->length() == _humongous_count.length(), err_msg("Hum set count mismatch. Expected %u, actual %u.", humongous_set->length(), _humongous_count.length()));
6633 guarantee(humongous_set->total_capacity_bytes() == _humongous_count.capacity(), err_msg("Hum set capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6634 humongous_set->total_capacity_bytes(), _humongous_count.capacity()));
6635
6636 guarantee(free_list->num_free_regions() == _free_count.length(), err_msg("Free list count mismatch. Expected %u, actual %u.", free_list->num_free_regions(), _free_count.length()));
6637 guarantee(free_list->total_capacity_bytes() == _free_count.capacity(), err_msg("Free list capacity mismatch. Expected " SIZE_FORMAT ", actual " SIZE_FORMAT,
6638 free_list->total_capacity_bytes(), _free_count.capacity()));
6639 }
6640 };
6641
|