152
153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
154 // The from card cache is not the memory that is actually committed. So we cannot
155 // take advantage of the zero_filled parameter.
156 reset_from_card_cache(start_idx, num_regions);
157 }
158
159 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
160 Ticks start = Ticks::now();
161 workers()->run_task(task, workers()->active_workers());
162 return Ticks::now() - start;
163 }
164
165 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
166 MemRegion mr) {
167 return new HeapRegion(hrs_index, bot(), mr);
168 }
169
170 // Private methods.
171
172 HeapRegion* G1CollectedHeap::new_region(size_t word_size, HeapRegionType type, bool do_expand) {
173 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
174 "the only time we use this to allocate a humongous region is "
175 "when we are allocating a single humongous region");
176
177 HeapRegion* res = _hrm->allocate_free_region(type);
178
179 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
180 // Currently, only attempts to allocate GC alloc regions set
181 // do_expand to true. So, we should only reach here during a
182 // safepoint. If this assumption changes we might have to
183 // reconsider the use of _expand_heap_after_alloc_failure.
184 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
185
186 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
187 word_size * HeapWordSize);
188
189 if (expand(word_size * HeapWordSize)) {
190 // Given that expand() succeeded in expanding the heap, and we
191 // always expand the heap by an amount aligned to the heap
192 // region size, the free list should in theory not be empty.
193 // In either case allocate_free_region() will check for NULL.
194 res = _hrm->allocate_free_region(type);
195 } else {
196 _expand_heap_after_alloc_failure = false;
197 }
198 }
199 return res;
200 }
201
202 HeapWord*
203 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
204 uint num_regions,
205 size_t word_size) {
206 assert(first != G1_NO_HRM_INDEX, "pre-condition");
207 assert(is_humongous(word_size), "word_size should be humongous");
208 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
209
210 // Index of last region in the series.
211 uint last = first + num_regions - 1;
212
213 // We need to initialize the region(s) we just discovered. This is
214 // a bit tricky given that it can happen concurrently with
1005 // scanning the root regions we might trip them over as we'll
1006 // be moving objects / updating references. So let's wait until
1007 // they are done. By telling them to abort, they should complete
1008 // early.
1009 _cm->root_regions()->abort();
1010 _cm->root_regions()->wait_until_scan_finished();
1011
1012 // Disable discovery and empty the discovered lists
1013 // for the CM ref processor.
1014 _ref_processor_cm->disable_discovery();
1015 _ref_processor_cm->abandon_partial_discovery();
1016 _ref_processor_cm->verify_no_references_recorded();
1017
1018 // Abandon current iterations of concurrent marking and concurrent
1019 // refinement, if any are in progress.
1020 concurrent_mark()->concurrent_cycle_abort();
1021 }
1022
1023 void G1CollectedHeap::prepare_heap_for_full_collection() {
1024 // Make sure we'll choose a new allocation region afterwards.
1025 _allocator->release_mutator_alloc_region();
1026 _allocator->abandon_gc_alloc_regions();
1027
1028 // We may have added regions to the current incremental collection
1029 // set between the last GC or pause and now. We need to clear the
1030 // incremental collection set and then start rebuilding it afresh
1031 // after this full GC.
1032 abandon_collection_set(collection_set());
1033
1034 tear_down_region_sets(false /* free_list_only */);
1035
1036 hrm()->prepare_for_full_collection_start();
1037 }
1038
1039 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1040 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1041 assert_used_and_recalculate_used_equal(this);
1042 _verifier->verify_region_sets_optional();
1043 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1044 _verifier->check_bitmaps("Full GC Start");
1045 }
1049
1050 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1051 ClassLoaderDataGraph::purge();
1052 MetaspaceUtils::verify_metrics();
1053
1054 // Prepare heap for normal collections.
1055 assert(num_free_regions() == 0, "we should not have added any free regions");
1056 rebuild_region_sets(false /* free_list_only */);
1057 abort_refinement();
1058 resize_heap_if_necessary();
1059
1060 // Rebuild the strong code root lists for each region
1061 rebuild_strong_code_roots();
1062
1063 // Purge code root memory
1064 purge_code_root_memory();
1065
1066 // Start a new incremental collection set for the next pause
1067 start_new_collection_set();
1068
1069 _allocator->init_mutator_alloc_region();
1070
1071 // Post collection state updates.
1072 MetaspaceGC::compute_new_size();
1073 }
1074
1075 void G1CollectedHeap::abort_refinement() {
1076 if (_hot_card_cache->use_cache()) {
1077 _hot_card_cache->reset_hot_cache();
1078 }
1079
1080 // Discard all remembered set updates.
1081 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1082 assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0,
1083 "DCQS should be empty");
1084 }
1085
1086 void G1CollectedHeap::verify_after_full_collection() {
1087 _hrm->verify_optional();
1088 _verifier->verify_region_sets_optional();
1089 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1366 }
1367
1368 if (expanded_by > 0) {
1369 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1370 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1371 policy()->record_new_heap_size(num_regions());
1372 } else {
1373 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1374
1375 // The expansion of the virtual storage space was unsuccessful.
1376 // Let's see if it was because we ran out of swap.
1377 if (G1ExitOnExpansionFailure &&
1378 _hrm->available() >= regions_to_expand) {
1379 // We had head room...
1380 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1381 }
1382 }
1383 return regions_to_expand > 0;
1384 }
1385
1386 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1387 size_t aligned_shrink_bytes =
1388 ReservedSpace::page_align_size_down(shrink_bytes);
1389 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1390 HeapRegion::GrainBytes);
1391 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1392
1393 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1394 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1395
1396
1397 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1398 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1399 if (num_regions_removed > 0) {
1400 policy()->record_new_heap_size(num_regions());
1401 } else {
1402 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1403 }
1404 }
1405
1406 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1407 _verifier->verify_region_sets_optional();
1408
1409 // We should only reach here at the end of a Full GC or during Remark which
1410 // means we should not not be holding to any GC alloc regions. The method
1411 // below will make sure of that and do any remaining clean up.
1412 _allocator->abandon_gc_alloc_regions();
1413
1414 // Instead of tearing down / rebuilding the free lists here, we
1415 // could instead use the remove_all_pending() method on free_list to
1416 // remove only the ones that we need to remove.
1478 } else {
1479 guarantee(Heap_lock->owned_by_self(),
1480 "master humongous set MT safety protocol outside a safepoint");
1481 }
1482 }
1483 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1484 const char* get_description() { return "Humongous Regions"; }
1485 };
1486
1487 G1CollectedHeap::G1CollectedHeap() :
1488 CollectedHeap(),
1489 _young_gen_sampling_thread(NULL),
1490 _workers(NULL),
1491 _card_table(NULL),
1492 _soft_ref_policy(),
1493 _old_set("Old Region Set", new OldRegionSetChecker()),
1494 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1495 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1496 _bot(NULL),
1497 _listener(),
1498 _hrm(NULL),
1499 _allocator(NULL),
1500 _verifier(NULL),
1501 _summary_bytes_used(0),
1502 _archive_allocator(NULL),
1503 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1504 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1505 _expand_heap_after_alloc_failure(true),
1506 _g1mm(NULL),
1507 _humongous_reclaim_candidates(),
1508 _has_humongous_reclaim_candidates(false),
1509 _hr_printer(),
1510 _collector_state(),
1511 _old_marking_cycles_started(0),
1512 _old_marking_cycles_completed(0),
1513 _eden(),
1514 _survivor(),
1515 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1516 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1517 _policy(G1Policy::create_policy(_gc_timer_stw)),
1761
1762 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1763
1764 {
1765 HeapWord* start = _hrm->reserved().start();
1766 HeapWord* end = _hrm->reserved().end();
1767 size_t granularity = HeapRegion::GrainBytes;
1768
1769 _region_attr.initialize(start, end, granularity);
1770 _humongous_reclaim_candidates.initialize(start, end, granularity);
1771 }
1772
1773 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1774 true /* are_GC_task_threads */,
1775 false /* are_ConcurrentGC_threads */);
1776 if (_workers == NULL) {
1777 return JNI_ENOMEM;
1778 }
1779 _workers->initialize_workers();
1780
1781 // Create the G1ConcurrentMark data structure and thread.
1782 // (Must do this late, so that "max_regions" is defined.)
1783 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1784 if (_cm == NULL || !_cm->completed_initialization()) {
1785 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1786 return JNI_ENOMEM;
1787 }
1788 _cm_thread = _cm->cm_thread();
1789
1790 // Now expand into the initial heap size.
1791 if (!expand(init_byte_size, _workers)) {
1792 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1793 return JNI_ENOMEM;
1794 }
1795
1796 // Perform any initialization actions delegated to the policy.
1797 policy()->init(this, &_collection_set);
1798
1799 jint ecode = initialize_concurrent_refinement();
1800 if (ecode != JNI_OK) {
1808
1809 {
1810 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1811 dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());
1812 dcqs.set_max_cards(concurrent_refine()->red_zone());
1813 }
1814
1815 // Here we allocate the dummy HeapRegion that is required by the
1816 // G1AllocRegion class.
1817 HeapRegion* dummy_region = _hrm->get_dummy_region();
1818
1819 // We'll re-use the same region whether the alloc region will
1820 // require BOT updates or not and, if it doesn't, then a non-young
1821 // region will complain that it cannot support allocations without
1822 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1823 dummy_region->set_eden();
1824 // Make sure it's full.
1825 dummy_region->set_top(dummy_region->end());
1826 G1AllocRegion::setup(this, dummy_region);
1827
1828 _allocator->init_mutator_alloc_region();
1829
1830 // Do create of the monitoring and management support so that
1831 // values in the heap have been properly initialized.
1832 _g1mm = new G1MonitoringSupport(this);
1833
1834 G1StringDedup::initialize();
1835
1836 _preserved_marks_set.init(ParallelGCThreads);
1837
1838 _collection_set.initialize(max_regions());
1839
1840 return JNI_OK;
1841 }
1842
1843 void G1CollectedHeap::stop() {
1844 // Stop all concurrent threads. We do this to make sure these threads
1845 // do not continue to execute and access resources (e.g. logging)
1846 // that are destroyed during shutdown.
1847 _cr->stop();
1848 _young_gen_sampling_thread->stop();
2991 {
2992 // The elapsed time induced by the start time below deliberately elides
2993 // the possible verification above.
2994 double sample_start_time_sec = os::elapsedTime();
2995
2996 // Please see comment in g1CollectedHeap.hpp and
2997 // G1CollectedHeap::ref_processing_init() to see how
2998 // reference processing currently works in G1.
2999 _ref_processor_stw->enable_discovery();
3000
3001 // We want to temporarily turn off discovery by the
3002 // CM ref processor, if necessary, and turn it back on
3003 // on again later if we do. Using a scoped
3004 // NoRefDiscovery object will do this.
3005 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3006
3007 policy()->record_collection_pause_start(sample_start_time_sec);
3008
3009 // Forget the current allocation region (we might even choose it to be part
3010 // of the collection set!).
3011 _allocator->release_mutator_alloc_region();
3012
3013 calculate_collection_set(evacuation_info, target_pause_time_ms);
3014
3015 G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
3016 G1ParScanThreadStateSet per_thread_states(this,
3017 &rdcqs,
3018 workers()->active_workers(),
3019 collection_set()->young_region_length(),
3020 collection_set()->optional_region_length());
3021 pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3022
3023 // Actually do the work...
3024 evacuate_initial_collection_set(&per_thread_states);
3025
3026 if (_collection_set.optional_region_length() != 0) {
3027 evacuate_optional_collection_set(&per_thread_states);
3028 }
3029 post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3030
3031 start_new_collection_set();
3032
3033 _survivor_evac_stats.adjust_desired_plab_sz();
3034 _old_evac_stats.adjust_desired_plab_sz();
3035
3036 if (should_start_conc_mark) {
3037 // We have to do this before we notify the CM threads that
3038 // they can start working to make sure that all the
3039 // appropriate initialization is done on the CM object.
3040 concurrent_mark()->post_initial_mark();
3041 // Note that we don't actually trigger the CM thread at
3042 // this point. We do that later when we're sure that
3043 // the current thread has completed its logging output.
3044 }
3045
3046 allocate_dummy_regions();
3047
3048 _allocator->init_mutator_alloc_region();
3049
3050 expand_heap_after_young_collection();
3051
3052 double sample_end_time_sec = os::elapsedTime();
3053 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3054 policy()->record_collection_pause_end(pause_time_ms, heap_used_bytes_before_gc);
3055 }
3056
3057 verify_after_young_collection(verify_type);
3058
3059 #ifdef TRACESPINNING
3060 ParallelTaskTerminator::print_termination_counts();
3061 #endif
3062
3063 gc_epilogue(false);
3064 }
3065
3066 // Print the remainder of the GC log output.
3067 if (evacuation_failed()) {
3068 log_info(gc)("To-space exhausted");
4531 if (!free_list_only) {
4532 _eden.clear();
4533 _survivor.clear();
4534 }
4535
4536 RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4537 heap_region_iterate(&cl);
4538
4539 if (!free_list_only) {
4540 set_used(cl.total_used());
4541 if (_archive_allocator != NULL) {
4542 _archive_allocator->clear_used();
4543 }
4544 }
4545 assert_used_and_recalculate_used_equal(this);
4546 }
4547
4548 // Methods for the mutator alloc region
4549
4550 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4551 bool force) {
4552 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4553 bool should_allocate = policy()->should_allocate_mutator_region();
4554 if (force || should_allocate) {
4555 HeapRegion* new_alloc_region = new_region(word_size,
4556 HeapRegionType::Eden,
4557 false /* do_expand */);
4558 if (new_alloc_region != NULL) {
4559 set_region_short_lived_locked(new_alloc_region);
4560 _hr_printer.alloc(new_alloc_region, !should_allocate);
4561 _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4562 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4563 return new_alloc_region;
4564 }
4565 }
4566 return NULL;
4567 }
4568
4569 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4570 size_t allocated_bytes) {
4571 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4572 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4573
4574 collection_set()->add_eden_region(alloc_region);
4575 increase_used(allocated_bytes);
4576 _eden.add_used_bytes(allocated_bytes);
4577 _hr_printer.retire(alloc_region);
|
152
153 void G1RegionMappingChangedListener::on_commit(uint start_idx, size_t num_regions, bool zero_filled) {
154 // The from card cache is not the memory that is actually committed. So we cannot
155 // take advantage of the zero_filled parameter.
156 reset_from_card_cache(start_idx, num_regions);
157 }
158
159 Tickspan G1CollectedHeap::run_task(AbstractGangTask* task) {
160 Ticks start = Ticks::now();
161 workers()->run_task(task, workers()->active_workers());
162 return Ticks::now() - start;
163 }
164
165 HeapRegion* G1CollectedHeap::new_heap_region(uint hrs_index,
166 MemRegion mr) {
167 return new HeapRegion(hrs_index, bot(), mr);
168 }
169
170 // Private methods.
171
172 HeapRegion* G1CollectedHeap::new_region(size_t word_size,
173 HeapRegionType type,
174 bool do_expand,
175 uint node_index) {
176 assert(!is_humongous(word_size) || word_size <= HeapRegion::GrainWords,
177 "the only time we use this to allocate a humongous region is "
178 "when we are allocating a single humongous region");
179
180 HeapRegion* res = _hrm->allocate_free_region(type, node_index);
181
182 if (res == NULL && do_expand && _expand_heap_after_alloc_failure) {
183 // Currently, only attempts to allocate GC alloc regions set
184 // do_expand to true. So, we should only reach here during a
185 // safepoint. If this assumption changes we might have to
186 // reconsider the use of _expand_heap_after_alloc_failure.
187 assert(SafepointSynchronize::is_at_safepoint(), "invariant");
188
189 log_debug(gc, ergo, heap)("Attempt heap expansion (region allocation request failed). Allocation request: " SIZE_FORMAT "B",
190 word_size * HeapWordSize);
191
192 assert(word_size * HeapWordSize < HeapRegion::GrainBytes,
193 "This kind of expansion should never be more than one region. Size: " SIZE_FORMAT,
194 word_size * HeapWordSize);
195 if (expand_single_region(node_index)) {
196 // Given that expand_single_region() succeeded in expanding the heap, and we
197 // always expand the heap by an amount aligned to the heap
198 // region size, the free list should in theory not be empty.
199 // In either case allocate_free_region() will check for NULL.
200 res = _hrm->allocate_free_region(type, node_index);
201 } else {
202 _expand_heap_after_alloc_failure = false;
203 }
204 }
205 return res;
206 }
207
208 HeapWord*
209 G1CollectedHeap::humongous_obj_allocate_initialize_regions(uint first,
210 uint num_regions,
211 size_t word_size) {
212 assert(first != G1_NO_HRM_INDEX, "pre-condition");
213 assert(is_humongous(word_size), "word_size should be humongous");
214 assert(num_regions * HeapRegion::GrainWords >= word_size, "pre-condition");
215
216 // Index of last region in the series.
217 uint last = first + num_regions - 1;
218
219 // We need to initialize the region(s) we just discovered. This is
220 // a bit tricky given that it can happen concurrently with
1011 // scanning the root regions we might trip them over as we'll
1012 // be moving objects / updating references. So let's wait until
1013 // they are done. By telling them to abort, they should complete
1014 // early.
1015 _cm->root_regions()->abort();
1016 _cm->root_regions()->wait_until_scan_finished();
1017
1018 // Disable discovery and empty the discovered lists
1019 // for the CM ref processor.
1020 _ref_processor_cm->disable_discovery();
1021 _ref_processor_cm->abandon_partial_discovery();
1022 _ref_processor_cm->verify_no_references_recorded();
1023
1024 // Abandon current iterations of concurrent marking and concurrent
1025 // refinement, if any are in progress.
1026 concurrent_mark()->concurrent_cycle_abort();
1027 }
1028
1029 void G1CollectedHeap::prepare_heap_for_full_collection() {
1030 // Make sure we'll choose a new allocation region afterwards.
1031 _allocator->release_mutator_alloc_regions();
1032 _allocator->abandon_gc_alloc_regions();
1033
1034 // We may have added regions to the current incremental collection
1035 // set between the last GC or pause and now. We need to clear the
1036 // incremental collection set and then start rebuilding it afresh
1037 // after this full GC.
1038 abandon_collection_set(collection_set());
1039
1040 tear_down_region_sets(false /* free_list_only */);
1041
1042 hrm()->prepare_for_full_collection_start();
1043 }
1044
1045 void G1CollectedHeap::verify_before_full_collection(bool explicit_gc) {
1046 assert(!GCCause::is_user_requested_gc(gc_cause()) || explicit_gc, "invariant");
1047 assert_used_and_recalculate_used_equal(this);
1048 _verifier->verify_region_sets_optional();
1049 _verifier->verify_before_gc(G1HeapVerifier::G1VerifyFull);
1050 _verifier->check_bitmaps("Full GC Start");
1051 }
1055
1056 // Delete metaspaces for unloaded class loaders and clean up loader_data graph
1057 ClassLoaderDataGraph::purge();
1058 MetaspaceUtils::verify_metrics();
1059
1060 // Prepare heap for normal collections.
1061 assert(num_free_regions() == 0, "we should not have added any free regions");
1062 rebuild_region_sets(false /* free_list_only */);
1063 abort_refinement();
1064 resize_heap_if_necessary();
1065
1066 // Rebuild the strong code root lists for each region
1067 rebuild_strong_code_roots();
1068
1069 // Purge code root memory
1070 purge_code_root_memory();
1071
1072 // Start a new incremental collection set for the next pause
1073 start_new_collection_set();
1074
1075 _allocator->init_mutator_alloc_regions();
1076
1077 // Post collection state updates.
1078 MetaspaceGC::compute_new_size();
1079 }
1080
1081 void G1CollectedHeap::abort_refinement() {
1082 if (_hot_card_cache->use_cache()) {
1083 _hot_card_cache->reset_hot_cache();
1084 }
1085
1086 // Discard all remembered set updates.
1087 G1BarrierSet::dirty_card_queue_set().abandon_logs();
1088 assert(G1BarrierSet::dirty_card_queue_set().num_cards() == 0,
1089 "DCQS should be empty");
1090 }
1091
1092 void G1CollectedHeap::verify_after_full_collection() {
1093 _hrm->verify_optional();
1094 _verifier->verify_region_sets_optional();
1095 _verifier->verify_after_gc(G1HeapVerifier::G1VerifyFull);
1372 }
1373
1374 if (expanded_by > 0) {
1375 size_t actual_expand_bytes = expanded_by * HeapRegion::GrainBytes;
1376 assert(actual_expand_bytes <= aligned_expand_bytes, "post-condition");
1377 policy()->record_new_heap_size(num_regions());
1378 } else {
1379 log_debug(gc, ergo, heap)("Did not expand the heap (heap expansion operation failed)");
1380
1381 // The expansion of the virtual storage space was unsuccessful.
1382 // Let's see if it was because we ran out of swap.
1383 if (G1ExitOnExpansionFailure &&
1384 _hrm->available() >= regions_to_expand) {
1385 // We had head room...
1386 vm_exit_out_of_memory(aligned_expand_bytes, OOM_MMAP_ERROR, "G1 heap expansion");
1387 }
1388 }
1389 return regions_to_expand > 0;
1390 }
1391
1392 bool G1CollectedHeap::expand_single_region(uint node_index) {
1393 uint expanded_by = _hrm->expand_on_preferred_node(node_index);
1394
1395 if (expanded_by == 0) {
1396 assert(is_maximal_no_gc(), "Should be no regions left, available: %u", _hrm->available());
1397 log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
1398 return false;
1399 }
1400
1401 policy()->record_new_heap_size(num_regions());
1402 return true;
1403 }
1404
1405 void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
1406 size_t aligned_shrink_bytes =
1407 ReservedSpace::page_align_size_down(shrink_bytes);
1408 aligned_shrink_bytes = align_down(aligned_shrink_bytes,
1409 HeapRegion::GrainBytes);
1410 uint num_regions_to_remove = (uint)(shrink_bytes / HeapRegion::GrainBytes);
1411
1412 uint num_regions_removed = _hrm->shrink_by(num_regions_to_remove);
1413 size_t shrunk_bytes = num_regions_removed * HeapRegion::GrainBytes;
1414
1415 log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: " SIZE_FORMAT "B aligned shrinking amount: " SIZE_FORMAT "B attempted shrinking amount: " SIZE_FORMAT "B",
1416 shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
1417 if (num_regions_removed > 0) {
1418 policy()->record_new_heap_size(num_regions());
1419 } else {
1420 log_debug(gc, ergo, heap)("Did not expand the heap (heap shrinking operation failed)");
1421 }
1422 }
1423
1424 void G1CollectedHeap::shrink(size_t shrink_bytes) {
1425 _verifier->verify_region_sets_optional();
1426
1427 // We should only reach here at the end of a Full GC or during Remark which
1428 // means we should not not be holding to any GC alloc regions. The method
1429 // below will make sure of that and do any remaining clean up.
1430 _allocator->abandon_gc_alloc_regions();
1431
1432 // Instead of tearing down / rebuilding the free lists here, we
1433 // could instead use the remove_all_pending() method on free_list to
1434 // remove only the ones that we need to remove.
1496 } else {
1497 guarantee(Heap_lock->owned_by_self(),
1498 "master humongous set MT safety protocol outside a safepoint");
1499 }
1500 }
1501 bool is_correct_type(HeapRegion* hr) { return hr->is_humongous(); }
1502 const char* get_description() { return "Humongous Regions"; }
1503 };
1504
1505 G1CollectedHeap::G1CollectedHeap() :
1506 CollectedHeap(),
1507 _young_gen_sampling_thread(NULL),
1508 _workers(NULL),
1509 _card_table(NULL),
1510 _soft_ref_policy(),
1511 _old_set("Old Region Set", new OldRegionSetChecker()),
1512 _archive_set("Archive Region Set", new ArchiveRegionSetChecker()),
1513 _humongous_set("Humongous Region Set", new HumongousRegionSetChecker()),
1514 _bot(NULL),
1515 _listener(),
1516 _numa(G1NUMA::create()),
1517 _hrm(NULL),
1518 _allocator(NULL),
1519 _verifier(NULL),
1520 _summary_bytes_used(0),
1521 _archive_allocator(NULL),
1522 _survivor_evac_stats("Young", YoungPLABSize, PLABWeight),
1523 _old_evac_stats("Old", OldPLABSize, PLABWeight),
1524 _expand_heap_after_alloc_failure(true),
1525 _g1mm(NULL),
1526 _humongous_reclaim_candidates(),
1527 _has_humongous_reclaim_candidates(false),
1528 _hr_printer(),
1529 _collector_state(),
1530 _old_marking_cycles_started(0),
1531 _old_marking_cycles_completed(0),
1532 _eden(),
1533 _survivor(),
1534 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1535 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1536 _policy(G1Policy::create_policy(_gc_timer_stw)),
1780
1781 _bot = new G1BlockOffsetTable(reserved_region(), bot_storage);
1782
1783 {
1784 HeapWord* start = _hrm->reserved().start();
1785 HeapWord* end = _hrm->reserved().end();
1786 size_t granularity = HeapRegion::GrainBytes;
1787
1788 _region_attr.initialize(start, end, granularity);
1789 _humongous_reclaim_candidates.initialize(start, end, granularity);
1790 }
1791
1792 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1793 true /* are_GC_task_threads */,
1794 false /* are_ConcurrentGC_threads */);
1795 if (_workers == NULL) {
1796 return JNI_ENOMEM;
1797 }
1798 _workers->initialize_workers();
1799
1800 _numa->set_region_info(HeapRegion::GrainBytes, page_size);
1801
1802 // Create the G1ConcurrentMark data structure and thread.
1803 // (Must do this late, so that "max_regions" is defined.)
1804 _cm = new G1ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1805 if (_cm == NULL || !_cm->completed_initialization()) {
1806 vm_shutdown_during_initialization("Could not create/initialize G1ConcurrentMark");
1807 return JNI_ENOMEM;
1808 }
1809 _cm_thread = _cm->cm_thread();
1810
1811 // Now expand into the initial heap size.
1812 if (!expand(init_byte_size, _workers)) {
1813 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1814 return JNI_ENOMEM;
1815 }
1816
1817 // Perform any initialization actions delegated to the policy.
1818 policy()->init(this, &_collection_set);
1819
1820 jint ecode = initialize_concurrent_refinement();
1821 if (ecode != JNI_OK) {
1829
1830 {
1831 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1832 dcqs.set_process_cards_threshold(concurrent_refine()->yellow_zone());
1833 dcqs.set_max_cards(concurrent_refine()->red_zone());
1834 }
1835
1836 // Here we allocate the dummy HeapRegion that is required by the
1837 // G1AllocRegion class.
1838 HeapRegion* dummy_region = _hrm->get_dummy_region();
1839
1840 // We'll re-use the same region whether the alloc region will
1841 // require BOT updates or not and, if it doesn't, then a non-young
1842 // region will complain that it cannot support allocations without
1843 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1844 dummy_region->set_eden();
1845 // Make sure it's full.
1846 dummy_region->set_top(dummy_region->end());
1847 G1AllocRegion::setup(this, dummy_region);
1848
1849 _allocator->init_mutator_alloc_regions();
1850
1851 // Do create of the monitoring and management support so that
1852 // values in the heap have been properly initialized.
1853 _g1mm = new G1MonitoringSupport(this);
1854
1855 G1StringDedup::initialize();
1856
1857 _preserved_marks_set.init(ParallelGCThreads);
1858
1859 _collection_set.initialize(max_regions());
1860
1861 return JNI_OK;
1862 }
1863
1864 void G1CollectedHeap::stop() {
1865 // Stop all concurrent threads. We do this to make sure these threads
1866 // do not continue to execute and access resources (e.g. logging)
1867 // that are destroyed during shutdown.
1868 _cr->stop();
1869 _young_gen_sampling_thread->stop();
3012 {
3013 // The elapsed time induced by the start time below deliberately elides
3014 // the possible verification above.
3015 double sample_start_time_sec = os::elapsedTime();
3016
3017 // Please see comment in g1CollectedHeap.hpp and
3018 // G1CollectedHeap::ref_processing_init() to see how
3019 // reference processing currently works in G1.
3020 _ref_processor_stw->enable_discovery();
3021
3022 // We want to temporarily turn off discovery by the
3023 // CM ref processor, if necessary, and turn it back on
3024 // on again later if we do. Using a scoped
3025 // NoRefDiscovery object will do this.
3026 NoRefDiscovery no_cm_discovery(_ref_processor_cm);
3027
3028 policy()->record_collection_pause_start(sample_start_time_sec);
3029
3030 // Forget the current allocation region (we might even choose it to be part
3031 // of the collection set!).
3032 _allocator->release_mutator_alloc_regions();
3033
3034 calculate_collection_set(evacuation_info, target_pause_time_ms);
3035
3036 G1RedirtyCardsQueueSet rdcqs(G1BarrierSet::dirty_card_queue_set().allocator());
3037 G1ParScanThreadStateSet per_thread_states(this,
3038 &rdcqs,
3039 workers()->active_workers(),
3040 collection_set()->young_region_length(),
3041 collection_set()->optional_region_length());
3042 pre_evacuate_collection_set(evacuation_info, &per_thread_states);
3043
3044 // Actually do the work...
3045 evacuate_initial_collection_set(&per_thread_states);
3046
3047 if (_collection_set.optional_region_length() != 0) {
3048 evacuate_optional_collection_set(&per_thread_states);
3049 }
3050 post_evacuate_collection_set(evacuation_info, &rdcqs, &per_thread_states);
3051
3052 start_new_collection_set();
3053
3054 _survivor_evac_stats.adjust_desired_plab_sz();
3055 _old_evac_stats.adjust_desired_plab_sz();
3056
3057 if (should_start_conc_mark) {
3058 // We have to do this before we notify the CM threads that
3059 // they can start working to make sure that all the
3060 // appropriate initialization is done on the CM object.
3061 concurrent_mark()->post_initial_mark();
3062 // Note that we don't actually trigger the CM thread at
3063 // this point. We do that later when we're sure that
3064 // the current thread has completed its logging output.
3065 }
3066
3067 allocate_dummy_regions();
3068
3069 _allocator->init_mutator_alloc_regions();
3070
3071 expand_heap_after_young_collection();
3072
3073 double sample_end_time_sec = os::elapsedTime();
3074 double pause_time_ms = (sample_end_time_sec - sample_start_time_sec) * MILLIUNITS;
3075 policy()->record_collection_pause_end(pause_time_ms, heap_used_bytes_before_gc);
3076 }
3077
3078 verify_after_young_collection(verify_type);
3079
3080 #ifdef TRACESPINNING
3081 ParallelTaskTerminator::print_termination_counts();
3082 #endif
3083
3084 gc_epilogue(false);
3085 }
3086
3087 // Print the remainder of the GC log output.
3088 if (evacuation_failed()) {
3089 log_info(gc)("To-space exhausted");
4552 if (!free_list_only) {
4553 _eden.clear();
4554 _survivor.clear();
4555 }
4556
4557 RebuildRegionSetsClosure cl(free_list_only, &_old_set, _hrm);
4558 heap_region_iterate(&cl);
4559
4560 if (!free_list_only) {
4561 set_used(cl.total_used());
4562 if (_archive_allocator != NULL) {
4563 _archive_allocator->clear_used();
4564 }
4565 }
4566 assert_used_and_recalculate_used_equal(this);
4567 }
4568
4569 // Methods for the mutator alloc region
4570
4571 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
4572 bool force,
4573 uint node_index) {
4574 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4575 bool should_allocate = policy()->should_allocate_mutator_region();
4576 if (force || should_allocate) {
4577 HeapRegion* new_alloc_region = new_region(word_size,
4578 HeapRegionType::Eden,
4579 false /* do_expand */,
4580 node_index);
4581 if (new_alloc_region != NULL) {
4582 set_region_short_lived_locked(new_alloc_region);
4583 _hr_printer.alloc(new_alloc_region, !should_allocate);
4584 _verifier->check_bitmaps("Mutator Region Allocation", new_alloc_region);
4585 _policy->remset_tracker()->update_at_allocate(new_alloc_region);
4586 return new_alloc_region;
4587 }
4588 }
4589 return NULL;
4590 }
4591
4592 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
4593 size_t allocated_bytes) {
4594 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
4595 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
4596
4597 collection_set()->add_eden_region(alloc_region);
4598 increase_used(allocated_bytes);
4599 _eden.add_used_bytes(allocated_bytes);
4600 _hr_printer.retire(alloc_region);
|