316 if (_survivor_head != NULL) {
317 assert(_survivor_tail != NULL, "cause it shouldn't be");
318 assert(_survivor_length > 0, "invariant");
319 _survivor_tail->set_next_young_region(NULL);
320 }
321
322 // Don't clear the survivor list handles until the start of
323 // the next evacuation pause - we need it in order to re-tag
324 // the survivor regions from this evacuation pause as 'young'
325 // at the start of the next.
326
327 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
328
329 assert(check_list_well_formed(), "young list should be well formed");
330 }
331
332 void YoungList::print() {
333 HeapRegion* lists[] = {_head, _survivor_head};
334 const char* names[] = {"YOUNG", "SURVIVOR"};
335
336 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
337 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
338 HeapRegion *curr = lists[list];
339 if (curr == NULL)
340 gclog_or_tty->print_cr(" empty");
341 while (curr != NULL) {
342 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
343 HR_FORMAT_PARAMS(curr),
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 OtherRegionsTable::invalidate(start_idx, num_regions);
356 }
748 if (first != G1_NO_HRM_INDEX) {
749 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
750 word_size, context);
751 assert(result != NULL, "it should always return a valid result");
752
753 // A successful humongous object allocation changes the used space
754 // information of the old generation so we need to recalculate the
755 // sizes and update the jstat counters here.
756 g1mm()->update_sizes();
757 }
758
759 verify_region_sets_optional();
760
761 return result;
762 }
763
764 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
765 assert_heap_not_locked_and_not_at_safepoint();
766 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
767
768 unsigned int dummy_gc_count_before;
769 int dummy_gclocker_retry_count = 0;
770 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
771 }
772
773 HeapWord*
774 G1CollectedHeap::mem_allocate(size_t word_size,
775 bool* gc_overhead_limit_was_exceeded) {
776 assert_heap_not_locked_and_not_at_safepoint();
777
778 // Loop until the allocation is satisfied, or unsatisfied after GC.
779 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
780 unsigned int gc_count_before;
781
782 HeapWord* result = NULL;
783 if (!is_humongous(word_size)) {
784 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
785 } else {
786 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
787 }
788 if (result != NULL) {
789 return result;
790 }
791
792 // Create the garbage collection operation...
793 VM_G1CollectForAllocation op(gc_count_before, word_size);
794 op.set_allocation_context(AllocationContext::current());
795
796 // ...and get the VM thread to execute it.
797 VMThread::execute(&op);
798
799 if (op.prologue_succeeded() && op.pause_succeeded()) {
800 // If the operation was successful we'll return the result even
812 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
813 return NULL;
814 }
815 assert(op.result() == NULL,
816 "the result should be NULL if the VM op did not succeed");
817 }
818
819 // Give a warning if we seem to be looping forever.
820 if ((QueuedAllocationWarningCount > 0) &&
821 (try_count % QueuedAllocationWarningCount == 0)) {
822 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
823 }
824 }
825
826 ShouldNotReachHere();
827 return NULL;
828 }
829
830 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
831 AllocationContext_t context,
832 unsigned int *gc_count_before_ret,
833 int* gclocker_retry_count_ret) {
834 // Make sure you read the note in attempt_allocation_humongous().
835
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
838 "be called for humongous allocation requests");
839
840 // We should only get here after the first-level allocation attempt
841 // (attempt_allocation()) failed to allocate.
842
843 // We will loop until a) we manage to successfully perform the
844 // allocation or b) we successfully schedule a collection which
845 // fails to perform the allocation. b) is the only case when we'll
846 // return NULL.
847 HeapWord* result = NULL;
848 for (int try_count = 1; /* we'll return */; try_count += 1) {
849 bool should_try_gc;
850 unsigned int gc_count_before;
851
852 {
853 MutexLockerEx x(Heap_lock);
854 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
855 false /* bot_updates */);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
869 false /* bot_updates */);
870 if (result != NULL) {
928 // iteration (after taking the Heap_lock).
929 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
930 false /* bot_updates */);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Give a warning if we seem to be looping forever.
936 if ((QueuedAllocationWarningCount > 0) &&
937 (try_count % QueuedAllocationWarningCount == 0)) {
938 warning("G1CollectedHeap::attempt_allocation_slow() "
939 "retries %d times", try_count);
940 }
941 }
942
943 ShouldNotReachHere();
944 return NULL;
945 }
946
947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
948 unsigned int * gc_count_before_ret,
949 int* gclocker_retry_count_ret) {
950 // The structure of this method has a lot of similarities to
951 // attempt_allocation_slow(). The reason these two were not merged
952 // into a single one is that such a method would require several "if
953 // allocation is not humongous do this, otherwise do that"
954 // conditional paths which would obscure its flow. In fact, an early
955 // version of this code did use a unified method which was harder to
956 // follow and, as a result, it had subtle bugs that were hard to
957 // track down. So keeping these two methods separate allows each to
958 // be more readable. It will be good to keep these two in sync as
959 // much as possible.
960
961 assert_heap_not_locked_and_not_at_safepoint();
962 assert(is_humongous(word_size), "attempt_allocation_humongous() "
963 "should only be called for humongous allocations");
964
965 // Humongous objects can exhaust the heap quickly, so we should check if we
966 // need to start a marking cycle at each humongous object allocation. We do
967 // the check before we do the actual allocation. The reason for doing it
968 // before the allocation is that we avoid having to keep track of the newly
969 // allocated memory while we do a GC.
970 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
971 word_size)) {
972 collect(GCCause::_g1_humongous_allocation);
973 }
974
975 // We will loop until a) we manage to successfully perform the
976 // allocation or b) we successfully schedule a collection which
977 // fails to perform the allocation. b) is the only case when we'll
978 // return NULL.
979 HeapWord* result = NULL;
980 for (int try_count = 1; /* we'll return */; try_count += 1) {
981 bool should_try_gc;
982 unsigned int gc_count_before;
983
984 {
985 MutexLockerEx x(Heap_lock);
986
987 // Given that humongous objects are not allocated in young
988 // regions, we'll first try to do the allocation without doing a
989 // collection hoping that there's enough space in the heap.
990 result = humongous_obj_allocate(word_size, AllocationContext::current());
991 if (result != NULL) {
992 return result;
993 }
994
995 if (GC_locker::is_active_and_needs_gc()) {
996 should_try_gc = false;
997 } else {
998 // The GCLocker may not be active but the GCLocker initiated
999 // GC may not yet have been performed (GCLocker::needs_gc()
1000 // returns true). In this case we do not try this GC and
1001 // wait until the GCLocker initiated GC is performed, and
1002 // then retry the allocation.
1800 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1801 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1802 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1803 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1804
1805 _g1h = this;
1806 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1807 vm_exit_during_initialization("Failed necessary allocation.");
1808 }
1809
1810 _allocator = G1Allocator::create_allocator(_g1h);
1811 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1812
1813 int n_queues = MAX2((int)ParallelGCThreads, 1);
1814 _task_queues = new RefToScanQueueSet(n_queues);
1815
1816 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1817 assert(n_rem_sets > 0, "Invariant.");
1818
1819 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1820 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1821 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1822
1823 for (int i = 0; i < n_queues; i++) {
1824 RefToScanQueue* q = new RefToScanQueue();
1825 q->initialize();
1826 _task_queues->register_queue(i, q);
1827 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1828 }
1829 clear_cset_start_regions();
1830
1831 // Initialize the G1EvacuationFailureALot counters and flags.
1832 NOT_PRODUCT(reset_evacuation_should_fail();)
1833
1834 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1835 }
1836
1837 jint G1CollectedHeap::initialize() {
1838 CollectedHeap::pre_initialize();
1839 os::enable_vtime();
1840
2382
2383 G1YCType G1CollectedHeap::yc_type() {
2384 bool is_young = g1_policy()->gcs_are_young();
2385 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2386 bool is_during_mark = mark_in_progress();
2387
2388 if (is_initial_mark) {
2389 return InitialMark;
2390 } else if (is_during_mark) {
2391 return DuringMark;
2392 } else if (is_young) {
2393 return Normal;
2394 } else {
2395 return Mixed;
2396 }
2397 }
2398
2399 void G1CollectedHeap::collect(GCCause::Cause cause) {
2400 assert_heap_not_locked();
2401
2402 unsigned int gc_count_before;
2403 unsigned int old_marking_count_before;
2404 unsigned int full_gc_count_before;
2405 bool retry_gc;
2406
2407 do {
2408 retry_gc = false;
2409
2410 {
2411 MutexLocker ml(Heap_lock);
2412
2413 // Read the GC count while holding the Heap_lock
2414 gc_count_before = total_collections();
2415 full_gc_count_before = total_full_collections();
2416 old_marking_count_before = _old_marking_cycles_started;
2417 }
2418
2419 if (should_do_concurrent_full_gc(cause)) {
2420 // Schedule an initial-mark evacuation pause that will start a
2421 // concurrent cycle. We're setting word_size to 0 which means that
2422 // we are not requesting a post-GC allocation.
2423 VM_G1IncCollectionPause op(gc_count_before,
2424 0, /* word_size */
3406 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3407 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3408 }
3409
3410 // FIXME: what is this about?
3411 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3412 // is set.
3413 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3414 "derived pointer present"));
3415 // always_do_update_barrier = true;
3416
3417 resize_all_tlabs();
3418 allocation_context_stats().update(full);
3419
3420 // We have just completed a GC. Update the soft reference
3421 // policy with the new heap occupancy
3422 Universe::update_heap_info_at_gc();
3423 }
3424
3425 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3426 unsigned int gc_count_before,
3427 bool* succeeded,
3428 GCCause::Cause gc_cause) {
3429 assert_heap_not_locked_and_not_at_safepoint();
3430 g1_policy()->record_stop_world_start();
3431 VM_G1IncCollectionPause op(gc_count_before,
3432 word_size,
3433 false, /* should_initiate_conc_mark */
3434 g1_policy()->max_pause_time_ms(),
3435 gc_cause);
3436
3437 op.set_allocation_context(AllocationContext::current());
3438 VMThread::execute(&op);
3439
3440 HeapWord* result = op.result();
3441 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3442 assert(result == NULL || ret_succeeded,
3443 "the result should be NULL if the VM did not succeed");
3444 *succeeded = ret_succeeded;
3445
3446 assert_heap_not_locked();
|
316 if (_survivor_head != NULL) {
317 assert(_survivor_tail != NULL, "cause it shouldn't be");
318 assert(_survivor_length > 0, "invariant");
319 _survivor_tail->set_next_young_region(NULL);
320 }
321
322 // Don't clear the survivor list handles until the start of
323 // the next evacuation pause - we need it in order to re-tag
324 // the survivor regions from this evacuation pause as 'young'
325 // at the start of the next.
326
327 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
328
329 assert(check_list_well_formed(), "young list should be well formed");
330 }
331
332 void YoungList::print() {
333 HeapRegion* lists[] = {_head, _survivor_head};
334 const char* names[] = {"YOUNG", "SURVIVOR"};
335
336 for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
337 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
338 HeapRegion *curr = lists[list];
339 if (curr == NULL)
340 gclog_or_tty->print_cr(" empty");
341 while (curr != NULL) {
342 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
343 HR_FORMAT_PARAMS(curr),
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 OtherRegionsTable::invalidate(start_idx, num_regions);
356 }
748 if (first != G1_NO_HRM_INDEX) {
749 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
750 word_size, context);
751 assert(result != NULL, "it should always return a valid result");
752
753 // A successful humongous object allocation changes the used space
754 // information of the old generation so we need to recalculate the
755 // sizes and update the jstat counters here.
756 g1mm()->update_sizes();
757 }
758
759 verify_region_sets_optional();
760
761 return result;
762 }
763
764 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
765 assert_heap_not_locked_and_not_at_safepoint();
766 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
767
768 uint dummy_gc_count_before;
769 uint dummy_gclocker_retry_count = 0;
770 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
771 }
772
773 HeapWord*
774 G1CollectedHeap::mem_allocate(size_t word_size,
775 bool* gc_overhead_limit_was_exceeded) {
776 assert_heap_not_locked_and_not_at_safepoint();
777
778 // Loop until the allocation is satisfied, or unsatisfied after GC.
779 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
780 uint gc_count_before;
781
782 HeapWord* result = NULL;
783 if (!is_humongous(word_size)) {
784 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
785 } else {
786 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
787 }
788 if (result != NULL) {
789 return result;
790 }
791
792 // Create the garbage collection operation...
793 VM_G1CollectForAllocation op(gc_count_before, word_size);
794 op.set_allocation_context(AllocationContext::current());
795
796 // ...and get the VM thread to execute it.
797 VMThread::execute(&op);
798
799 if (op.prologue_succeeded() && op.pause_succeeded()) {
800 // If the operation was successful we'll return the result even
812 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
813 return NULL;
814 }
815 assert(op.result() == NULL,
816 "the result should be NULL if the VM op did not succeed");
817 }
818
819 // Give a warning if we seem to be looping forever.
820 if ((QueuedAllocationWarningCount > 0) &&
821 (try_count % QueuedAllocationWarningCount == 0)) {
822 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
823 }
824 }
825
826 ShouldNotReachHere();
827 return NULL;
828 }
829
830 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
831 AllocationContext_t context,
832 uint* gc_count_before_ret,
833 uint* gclocker_retry_count_ret) {
834 // Make sure you read the note in attempt_allocation_humongous().
835
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
838 "be called for humongous allocation requests");
839
840 // We should only get here after the first-level allocation attempt
841 // (attempt_allocation()) failed to allocate.
842
843 // We will loop until a) we manage to successfully perform the
844 // allocation or b) we successfully schedule a collection which
845 // fails to perform the allocation. b) is the only case when we'll
846 // return NULL.
847 HeapWord* result = NULL;
848 for (int try_count = 1; /* we'll return */; try_count += 1) {
849 bool should_try_gc;
850 uint gc_count_before;
851
852 {
853 MutexLockerEx x(Heap_lock);
854 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
855 false /* bot_updates */);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
869 false /* bot_updates */);
870 if (result != NULL) {
928 // iteration (after taking the Heap_lock).
929 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
930 false /* bot_updates */);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Give a warning if we seem to be looping forever.
936 if ((QueuedAllocationWarningCount > 0) &&
937 (try_count % QueuedAllocationWarningCount == 0)) {
938 warning("G1CollectedHeap::attempt_allocation_slow() "
939 "retries %d times", try_count);
940 }
941 }
942
943 ShouldNotReachHere();
944 return NULL;
945 }
946
947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
948 uint* gc_count_before_ret,
949 uint* gclocker_retry_count_ret) {
950 // The structure of this method has a lot of similarities to
951 // attempt_allocation_slow(). The reason these two were not merged
952 // into a single one is that such a method would require several "if
953 // allocation is not humongous do this, otherwise do that"
954 // conditional paths which would obscure its flow. In fact, an early
955 // version of this code did use a unified method which was harder to
956 // follow and, as a result, it had subtle bugs that were hard to
957 // track down. So keeping these two methods separate allows each to
958 // be more readable. It will be good to keep these two in sync as
959 // much as possible.
960
961 assert_heap_not_locked_and_not_at_safepoint();
962 assert(is_humongous(word_size), "attempt_allocation_humongous() "
963 "should only be called for humongous allocations");
964
965 // Humongous objects can exhaust the heap quickly, so we should check if we
966 // need to start a marking cycle at each humongous object allocation. We do
967 // the check before we do the actual allocation. The reason for doing it
968 // before the allocation is that we avoid having to keep track of the newly
969 // allocated memory while we do a GC.
970 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
971 word_size)) {
972 collect(GCCause::_g1_humongous_allocation);
973 }
974
975 // We will loop until a) we manage to successfully perform the
976 // allocation or b) we successfully schedule a collection which
977 // fails to perform the allocation. b) is the only case when we'll
978 // return NULL.
979 HeapWord* result = NULL;
980 for (int try_count = 1; /* we'll return */; try_count += 1) {
981 bool should_try_gc;
982 uint gc_count_before;
983
984 {
985 MutexLockerEx x(Heap_lock);
986
987 // Given that humongous objects are not allocated in young
988 // regions, we'll first try to do the allocation without doing a
989 // collection hoping that there's enough space in the heap.
990 result = humongous_obj_allocate(word_size, AllocationContext::current());
991 if (result != NULL) {
992 return result;
993 }
994
995 if (GC_locker::is_active_and_needs_gc()) {
996 should_try_gc = false;
997 } else {
998 // The GCLocker may not be active but the GCLocker initiated
999 // GC may not yet have been performed (GCLocker::needs_gc()
1000 // returns true). In this case we do not try this GC and
1001 // wait until the GCLocker initiated GC is performed, and
1002 // then retry the allocation.
1800 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1801 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1802 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1803 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1804
1805 _g1h = this;
1806 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1807 vm_exit_during_initialization("Failed necessary allocation.");
1808 }
1809
1810 _allocator = G1Allocator::create_allocator(_g1h);
1811 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1812
1813 int n_queues = MAX2((int)ParallelGCThreads, 1);
1814 _task_queues = new RefToScanQueueSet(n_queues);
1815
1816 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1817 assert(n_rem_sets > 0, "Invariant.");
1818
1819 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1820 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1821 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1822
1823 for (int i = 0; i < n_queues; i++) {
1824 RefToScanQueue* q = new RefToScanQueue();
1825 q->initialize();
1826 _task_queues->register_queue(i, q);
1827 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1828 }
1829 clear_cset_start_regions();
1830
1831 // Initialize the G1EvacuationFailureALot counters and flags.
1832 NOT_PRODUCT(reset_evacuation_should_fail();)
1833
1834 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1835 }
1836
1837 jint G1CollectedHeap::initialize() {
1838 CollectedHeap::pre_initialize();
1839 os::enable_vtime();
1840
2382
2383 G1YCType G1CollectedHeap::yc_type() {
2384 bool is_young = g1_policy()->gcs_are_young();
2385 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2386 bool is_during_mark = mark_in_progress();
2387
2388 if (is_initial_mark) {
2389 return InitialMark;
2390 } else if (is_during_mark) {
2391 return DuringMark;
2392 } else if (is_young) {
2393 return Normal;
2394 } else {
2395 return Mixed;
2396 }
2397 }
2398
2399 void G1CollectedHeap::collect(GCCause::Cause cause) {
2400 assert_heap_not_locked();
2401
2402 uint gc_count_before;
2403 uint old_marking_count_before;
2404 uint full_gc_count_before;
2405 bool retry_gc;
2406
2407 do {
2408 retry_gc = false;
2409
2410 {
2411 MutexLocker ml(Heap_lock);
2412
2413 // Read the GC count while holding the Heap_lock
2414 gc_count_before = total_collections();
2415 full_gc_count_before = total_full_collections();
2416 old_marking_count_before = _old_marking_cycles_started;
2417 }
2418
2419 if (should_do_concurrent_full_gc(cause)) {
2420 // Schedule an initial-mark evacuation pause that will start a
2421 // concurrent cycle. We're setting word_size to 0 which means that
2422 // we are not requesting a post-GC allocation.
2423 VM_G1IncCollectionPause op(gc_count_before,
2424 0, /* word_size */
3406 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3407 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3408 }
3409
3410 // FIXME: what is this about?
3411 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3412 // is set.
3413 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3414 "derived pointer present"));
3415 // always_do_update_barrier = true;
3416
3417 resize_all_tlabs();
3418 allocation_context_stats().update(full);
3419
3420 // We have just completed a GC. Update the soft reference
3421 // policy with the new heap occupancy
3422 Universe::update_heap_info_at_gc();
3423 }
3424
3425 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3426 uint gc_count_before,
3427 bool* succeeded,
3428 GCCause::Cause gc_cause) {
3429 assert_heap_not_locked_and_not_at_safepoint();
3430 g1_policy()->record_stop_world_start();
3431 VM_G1IncCollectionPause op(gc_count_before,
3432 word_size,
3433 false, /* should_initiate_conc_mark */
3434 g1_policy()->max_pause_time_ms(),
3435 gc_cause);
3436
3437 op.set_allocation_context(AllocationContext::current());
3438 VMThread::execute(&op);
3439
3440 HeapWord* result = op.result();
3441 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3442 assert(result == NULL || ret_succeeded,
3443 "the result should be NULL if the VM did not succeed");
3444 *succeeded = ret_succeeded;
3445
3446 assert_heap_not_locked();
|