45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1MemoryPool.hpp"
48 #include "gc/g1/g1OopClosures.inline.hpp"
49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
50 #include "gc/g1/g1Policy.hpp"
51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
52 #include "gc/g1/g1RemSet.hpp"
53 #include "gc/g1/g1RootClosures.hpp"
54 #include "gc/g1/g1RootProcessor.hpp"
55 #include "gc/g1/g1SATBMarkQueueSet.hpp"
56 #include "gc/g1/g1StringDedup.hpp"
57 #include "gc/g1/g1ThreadLocalData.hpp"
58 #include "gc/g1/g1YCTypes.hpp"
59 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
60 #include "gc/g1/heapRegion.inline.hpp"
61 #include "gc/g1/heapRegionRemSet.hpp"
62 #include "gc/g1/heapRegionSet.inline.hpp"
63 #include "gc/g1/vm_operations_g1.hpp"
64 #include "gc/shared/adaptiveSizePolicy.hpp"
65 #include "gc/shared/gcHeapSummary.hpp"
66 #include "gc/shared/gcId.hpp"
67 #include "gc/shared/gcLocker.hpp"
68 #include "gc/shared/gcTimer.hpp"
69 #include "gc/shared/gcTrace.hpp"
70 #include "gc/shared/gcTraceTime.inline.hpp"
71 #include "gc/shared/generationSpec.hpp"
72 #include "gc/shared/isGCActiveMark.hpp"
73 #include "gc/shared/oopStorageParState.hpp"
74 #include "gc/shared/parallelCleaning.hpp"
75 #include "gc/shared/preservedMarks.inline.hpp"
76 #include "gc/shared/suspendibleThreadSet.hpp"
77 #include "gc/shared/referenceProcessor.inline.hpp"
78 #include "gc/shared/taskqueue.inline.hpp"
79 #include "gc/shared/weakProcessor.inline.hpp"
80 #include "logging/log.hpp"
81 #include "memory/allocation.hpp"
82 #include "memory/iterator.hpp"
83 #include "memory/resourceArea.hpp"
84 #include "oops/access.inline.hpp"
211
212 // The word size sum of all the regions we will allocate.
213 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
214 assert(word_size <= word_size_sum, "sanity");
215
216 // This will be the "starts humongous" region.
217 HeapRegion* first_hr = region_at(first);
218 // The header of the new object will be placed at the bottom of
219 // the first region.
220 HeapWord* new_obj = first_hr->bottom();
221 // This will be the new top of the new object.
222 HeapWord* obj_top = new_obj + word_size;
223
224 // First, we need to zero the header of the space that we will be
225 // allocating. When we update top further down, some refinement
226 // threads might try to scan the region. By zeroing the header we
227 // ensure that any thread that will try to scan the region will
228 // come across the zero klass word and bail out.
229 //
230 // NOTE: It would not have been correct to have used
231 // CollectedHeap::fill_with_object() and make the space look like
232 // an int array. The thread that is doing the allocation will
233 // later update the object header to a potentially different array
234 // type and, for a very short period of time, the klass and length
235 // fields will be inconsistent. This could cause a refinement
236 // thread to calculate the object size incorrectly.
237 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
238
239 // Next, pad out the unused tail of the last region with filler
240 // objects, for improved usage accounting.
241 // How many words we use for filler objects.
242 size_t word_fill_size = word_size_sum - word_size;
243
244 // How many words memory we "waste" which cannot hold a filler object.
245 size_t words_not_fillable = 0;
246
247 if (word_fill_size >= min_fill_size()) {
248 fill_with_objects(obj_top, word_fill_size);
249 } else if (word_fill_size > 0) {
250 // We have space to fill, but we cannot fit an object there.
251 words_not_fillable = word_fill_size;
252 word_fill_size = 0;
253 }
254
255 // We will set up the first region as "starts humongous". This
256 // will also update the BOT covering all the regions to reflect
257 // that there is a single object that starts at the bottom of the
258 // first region.
259 first_hr->set_starts_humongous(obj_top, word_fill_size);
260 _g1_policy->remset_tracker()->update_at_allocate(first_hr);
261 // Then, if there are any, we will set up the "continues
262 // humongous" regions.
263 HeapRegion* hr = NULL;
264 for (uint i = first + 1; i <= last; ++i) {
265 hr = region_at(i);
266 hr->set_continues_humongous(first_hr);
267 _g1_policy->remset_tracker()->update_at_allocate(hr);
268 }
704 // alloc_archive_regions.
705 HeapRegion* curr_region = start_region;
706 while (curr_region != NULL) {
707 guarantee(curr_region->is_archive(),
708 "Expected archive region at index %u", curr_region->hrm_index());
709 if (curr_region != last_region) {
710 curr_region = _hrm.next_region_in_heap(curr_region);
711 } else {
712 curr_region = NULL;
713 }
714 }
715
716 prev_last_addr = last_address;
717 prev_last_region = last_region;
718
719 // Fill the memory below the allocated range with dummy object(s),
720 // if the region bottom does not match the range start, or if the previous
721 // range ended within the same G1 region, and there is a gap.
722 if (start_address != bottom_address) {
723 size_t fill_size = pointer_delta(start_address, bottom_address);
724 G1CollectedHeap::fill_with_objects(bottom_address, fill_size);
725 increase_used(fill_size * HeapWordSize);
726 }
727 }
728 }
729
730 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
731 size_t desired_word_size,
732 size_t* actual_word_size) {
733 assert_heap_not_locked_and_not_at_safepoint();
734 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
735 "be called for humongous allocation requests");
736
737 HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
738
739 if (result == NULL) {
740 *actual_word_size = desired_word_size;
741 result = attempt_allocation_slow(desired_word_size);
742 }
743
744 assert_heap_not_locked();
1524 _ref_processor_stw(NULL),
1525 _is_alive_closure_stw(this),
1526 _is_subject_to_discovery_stw(this),
1527 _ref_processor_cm(NULL),
1528 _is_alive_closure_cm(this),
1529 _is_subject_to_discovery_cm(this),
1530 _in_cset_fast_test() {
1531
1532 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1533 true /* are_GC_task_threads */,
1534 false /* are_ConcurrentGC_threads */);
1535 _workers->initialize_workers();
1536 _verifier = new G1HeapVerifier(this);
1537
1538 _allocator = new G1Allocator(this);
1539
1540 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1541
1542 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1543
1544 // Override the default _filler_array_max_size so that no humongous filler
1545 // objects are created.
1546 _filler_array_max_size = _humongous_object_threshold_in_words;
1547
1548 uint n_queues = ParallelGCThreads;
1549 _task_queues = new RefToScanQueueSet(n_queues);
1550
1551 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1552
1553 for (uint i = 0; i < n_queues; i++) {
1554 RefToScanQueue* q = new RefToScanQueue();
1555 q->initialize();
1556 _task_queues->register_queue(i, q);
1557 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1558 }
1559
1560 // Initialize the G1EvacuationFailureALot counters and flags.
1561 NOT_PRODUCT(reset_evacuation_should_fail();)
1562
1563 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1564 }
1565
1566 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1966 case GCCause::_wb_conc_mark: return true;
1967 default : return false;
1968 }
1969 }
1970
1971 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1972 switch (cause) {
1973 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
1974 case GCCause::_g1_humongous_allocation: return true;
1975 default: return is_user_requested_concurrent_full_gc(cause);
1976 }
1977 }
1978
1979 #ifndef PRODUCT
1980 void G1CollectedHeap::allocate_dummy_regions() {
1981 // Let's fill up most of the region
1982 size_t word_size = HeapRegion::GrainWords - 1024;
1983 // And as a result the region we'll allocate will be humongous.
1984 guarantee(is_humongous(word_size), "sanity");
1985
1986 // _filler_array_max_size is set to humongous object threshold
1987 // but temporarily change it to use CollectedHeap::fill_with_object().
1988 SizeTFlagSetting fs(_filler_array_max_size, word_size);
1989
1990 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
1991 // Let's use the existing mechanism for the allocation
1992 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
1993 if (dummy_obj != NULL) {
1994 MemRegion mr(dummy_obj, word_size);
1995 CollectedHeap::fill_with_object(mr);
1996 } else {
1997 // If we can't allocate once, we probably cannot allocate
1998 // again. Let's get out of the loop.
1999 break;
2000 }
2001 }
2002 }
2003 #endif // !PRODUCT
2004
2005 void G1CollectedHeap::increment_old_marking_cycles_started() {
2006 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2007 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2008 "Wrong marking cycle count (started: %d, completed: %d)",
2009 _old_marking_cycles_started, _old_marking_cycles_completed);
2010
2011 _old_marking_cycles_started++;
2012 }
2013
2014 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2015 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2016
2017 // We assume that if concurrent == true, then the caller is a
2018 // concurrent thread that was joined the Suspendible Thread
2019 // Set. If there's ever a cheap way to check this, we should add an
2020 // assert here.
2021
|
45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1MemoryPool.hpp"
48 #include "gc/g1/g1OopClosures.inline.hpp"
49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
50 #include "gc/g1/g1Policy.hpp"
51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
52 #include "gc/g1/g1RemSet.hpp"
53 #include "gc/g1/g1RootClosures.hpp"
54 #include "gc/g1/g1RootProcessor.hpp"
55 #include "gc/g1/g1SATBMarkQueueSet.hpp"
56 #include "gc/g1/g1StringDedup.hpp"
57 #include "gc/g1/g1ThreadLocalData.hpp"
58 #include "gc/g1/g1YCTypes.hpp"
59 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
60 #include "gc/g1/heapRegion.inline.hpp"
61 #include "gc/g1/heapRegionRemSet.hpp"
62 #include "gc/g1/heapRegionSet.inline.hpp"
63 #include "gc/g1/vm_operations_g1.hpp"
64 #include "gc/shared/adaptiveSizePolicy.hpp"
65 #include "gc/shared/fill.hpp"
66 #include "gc/shared/gcHeapSummary.hpp"
67 #include "gc/shared/gcId.hpp"
68 #include "gc/shared/gcLocker.hpp"
69 #include "gc/shared/gcTimer.hpp"
70 #include "gc/shared/gcTrace.hpp"
71 #include "gc/shared/gcTraceTime.inline.hpp"
72 #include "gc/shared/generationSpec.hpp"
73 #include "gc/shared/isGCActiveMark.hpp"
74 #include "gc/shared/oopStorageParState.hpp"
75 #include "gc/shared/parallelCleaning.hpp"
76 #include "gc/shared/preservedMarks.inline.hpp"
77 #include "gc/shared/suspendibleThreadSet.hpp"
78 #include "gc/shared/referenceProcessor.inline.hpp"
79 #include "gc/shared/taskqueue.inline.hpp"
80 #include "gc/shared/weakProcessor.inline.hpp"
81 #include "logging/log.hpp"
82 #include "memory/allocation.hpp"
83 #include "memory/iterator.hpp"
84 #include "memory/resourceArea.hpp"
85 #include "oops/access.inline.hpp"
212
213 // The word size sum of all the regions we will allocate.
214 size_t word_size_sum = (size_t) num_regions * HeapRegion::GrainWords;
215 assert(word_size <= word_size_sum, "sanity");
216
217 // This will be the "starts humongous" region.
218 HeapRegion* first_hr = region_at(first);
219 // The header of the new object will be placed at the bottom of
220 // the first region.
221 HeapWord* new_obj = first_hr->bottom();
222 // This will be the new top of the new object.
223 HeapWord* obj_top = new_obj + word_size;
224
225 // First, we need to zero the header of the space that we will be
226 // allocating. When we update top further down, some refinement
227 // threads might try to scan the region. By zeroing the header we
228 // ensure that any thread that will try to scan the region will
229 // come across the zero klass word and bail out.
230 //
231 // NOTE: It would not have been correct to have used
232 // Fill::range() and make the space look like an int array.
233 // The thread that is doing the allocation will later update
234 // the object header to a potentially different array type
235 // and, for a very short period of time, the klass and length
236 // fields will be inconsistent. This could cause a refinement
237 // thread to calculate the object size incorrectly.
238 Copy::fill_to_words(new_obj, oopDesc::header_size(), 0);
239
240 // Next, pad out the unused tail of the last region with filler
241 // objects, for improved usage accounting.
242 // How many words we use for filler objects.
243 size_t word_fill_size = word_size_sum - word_size;
244
245 // How many words memory we "waste" which cannot hold a filler object.
246 size_t words_not_fillable = 0;
247
248 if (word_fill_size >= Fill::min_size()) {
249 Fill::range(obj_top, word_fill_size);
250 } else if (word_fill_size > 0) {
251 // We have space to fill, but we cannot fit an object there.
252 words_not_fillable = word_fill_size;
253 word_fill_size = 0;
254 }
255
256 // We will set up the first region as "starts humongous". This
257 // will also update the BOT covering all the regions to reflect
258 // that there is a single object that starts at the bottom of the
259 // first region.
260 first_hr->set_starts_humongous(obj_top, word_fill_size);
261 _g1_policy->remset_tracker()->update_at_allocate(first_hr);
262 // Then, if there are any, we will set up the "continues
263 // humongous" regions.
264 HeapRegion* hr = NULL;
265 for (uint i = first + 1; i <= last; ++i) {
266 hr = region_at(i);
267 hr->set_continues_humongous(first_hr);
268 _g1_policy->remset_tracker()->update_at_allocate(hr);
269 }
705 // alloc_archive_regions.
706 HeapRegion* curr_region = start_region;
707 while (curr_region != NULL) {
708 guarantee(curr_region->is_archive(),
709 "Expected archive region at index %u", curr_region->hrm_index());
710 if (curr_region != last_region) {
711 curr_region = _hrm.next_region_in_heap(curr_region);
712 } else {
713 curr_region = NULL;
714 }
715 }
716
717 prev_last_addr = last_address;
718 prev_last_region = last_region;
719
720 // Fill the memory below the allocated range with dummy object(s),
721 // if the region bottom does not match the range start, or if the previous
722 // range ended within the same G1 region, and there is a gap.
723 if (start_address != bottom_address) {
724 size_t fill_size = pointer_delta(start_address, bottom_address);
725 Fill::range(bottom_address, fill_size);
726 increase_used(fill_size * HeapWordSize);
727 }
728 }
729 }
730
731 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size,
732 size_t desired_word_size,
733 size_t* actual_word_size) {
734 assert_heap_not_locked_and_not_at_safepoint();
735 assert(!is_humongous(desired_word_size), "attempt_allocation() should not "
736 "be called for humongous allocation requests");
737
738 HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size);
739
740 if (result == NULL) {
741 *actual_word_size = desired_word_size;
742 result = attempt_allocation_slow(desired_word_size);
743 }
744
745 assert_heap_not_locked();
1525 _ref_processor_stw(NULL),
1526 _is_alive_closure_stw(this),
1527 _is_subject_to_discovery_stw(this),
1528 _ref_processor_cm(NULL),
1529 _is_alive_closure_cm(this),
1530 _is_subject_to_discovery_cm(this),
1531 _in_cset_fast_test() {
1532
1533 _workers = new WorkGang("GC Thread", ParallelGCThreads,
1534 true /* are_GC_task_threads */,
1535 false /* are_ConcurrentGC_threads */);
1536 _workers->initialize_workers();
1537 _verifier = new G1HeapVerifier(this);
1538
1539 _allocator = new G1Allocator(this);
1540
1541 _heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
1542
1543 _humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
1544
1545 // Override the default Fill::max_size() so that no humongous filler
1546 // objects are created.
1547 Fill::set_max_size(_humongous_object_threshold_in_words);
1548
1549 uint n_queues = ParallelGCThreads;
1550 _task_queues = new RefToScanQueueSet(n_queues);
1551
1552 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1553
1554 for (uint i = 0; i < n_queues; i++) {
1555 RefToScanQueue* q = new RefToScanQueue();
1556 q->initialize();
1557 _task_queues->register_queue(i, q);
1558 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1559 }
1560
1561 // Initialize the G1EvacuationFailureALot counters and flags.
1562 NOT_PRODUCT(reset_evacuation_should_fail();)
1563
1564 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1565 }
1566
1567 G1RegionToSpaceMapper* G1CollectedHeap::create_aux_memory_mapper(const char* description,
1967 case GCCause::_wb_conc_mark: return true;
1968 default : return false;
1969 }
1970 }
1971
1972 bool G1CollectedHeap::should_do_concurrent_full_gc(GCCause::Cause cause) {
1973 switch (cause) {
1974 case GCCause::_gc_locker: return GCLockerInvokesConcurrent;
1975 case GCCause::_g1_humongous_allocation: return true;
1976 default: return is_user_requested_concurrent_full_gc(cause);
1977 }
1978 }
1979
1980 #ifndef PRODUCT
1981 void G1CollectedHeap::allocate_dummy_regions() {
1982 // Let's fill up most of the region
1983 size_t word_size = HeapRegion::GrainWords - 1024;
1984 // And as a result the region we'll allocate will be humongous.
1985 guarantee(is_humongous(word_size), "sanity");
1986
1987 // Fill::max_size() is set to humongous object threshold
1988 // but temporarily change it to use Fill::range().
1989 const size_t prev_fill_max_size = Fill::max_size();
1990 Fill::set_max_size(word_size);
1991
1992 for (uintx i = 0; i < G1DummyRegionsPerGC; ++i) {
1993 // Let's use the existing mechanism for the allocation
1994 HeapWord* dummy_obj = humongous_obj_allocate(word_size);
1995 if (dummy_obj != NULL) {
1996 Fill::range(dummy_obj, word_size);
1997 } else {
1998 // If we can't allocate once, we probably cannot allocate
1999 // again. Let's get out of the loop.
2000 break;
2001 }
2002 }
2003
2004 Fill::set_max_size(prev_fill_max_size);
2005 }
2006 #endif // !PRODUCT
2007
2008 void G1CollectedHeap::increment_old_marking_cycles_started() {
2009 assert(_old_marking_cycles_started == _old_marking_cycles_completed ||
2010 _old_marking_cycles_started == _old_marking_cycles_completed + 1,
2011 "Wrong marking cycle count (started: %d, completed: %d)",
2012 _old_marking_cycles_started, _old_marking_cycles_completed);
2013
2014 _old_marking_cycles_started++;
2015 }
2016
2017 void G1CollectedHeap::increment_old_marking_cycles_completed(bool concurrent) {
2018 MonitorLockerEx x(FullGCCount_lock, Mutex::_no_safepoint_check_flag);
2019
2020 // We assume that if concurrent == true, then the caller is a
2021 // concurrent thread that was joined the Suspendible Thread
2022 // Set. If there's ever a cheap way to check this, we should add an
2023 // assert here.
2024
|