1 /*
2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
316 if (_survivor_head != NULL) {
317 assert(_survivor_tail != NULL, "cause it shouldn't be");
318 assert(_survivor_length > 0, "invariant");
319 _survivor_tail->set_next_young_region(NULL);
320 }
321
322 // Don't clear the survivor list handles until the start of
323 // the next evacuation pause - we need it in order to re-tag
324 // the survivor regions from this evacuation pause as 'young'
325 // at the start of the next.
326
327 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
328
329 assert(check_list_well_formed(), "young list should be well formed");
330 }
331
332 void YoungList::print() {
333 HeapRegion* lists[] = {_head, _survivor_head};
334 const char* names[] = {"YOUNG", "SURVIVOR"};
335
336 for (unsigned int list = 0; list < ARRAY_SIZE(lists); ++list) {
337 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
338 HeapRegion *curr = lists[list];
339 if (curr == NULL)
340 gclog_or_tty->print_cr(" empty");
341 while (curr != NULL) {
342 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
343 HR_FORMAT_PARAMS(curr),
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
356 }
748 if (first != G1_NO_HRM_INDEX) {
749 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
750 word_size, context);
751 assert(result != NULL, "it should always return a valid result");
752
753 // A successful humongous object allocation changes the used space
754 // information of the old generation so we need to recalculate the
755 // sizes and update the jstat counters here.
756 g1mm()->update_sizes();
757 }
758
759 verify_region_sets_optional();
760
761 return result;
762 }
763
764 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
765 assert_heap_not_locked_and_not_at_safepoint();
766 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
767
768 unsigned int dummy_gc_count_before;
769 int dummy_gclocker_retry_count = 0;
770 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
771 }
772
773 HeapWord*
774 G1CollectedHeap::mem_allocate(size_t word_size,
775 bool* gc_overhead_limit_was_exceeded) {
776 assert_heap_not_locked_and_not_at_safepoint();
777
778 // Loop until the allocation is satisfied, or unsatisfied after GC.
779 for (int try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
780 unsigned int gc_count_before;
781
782 HeapWord* result = NULL;
783 if (!is_humongous(word_size)) {
784 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
785 } else {
786 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
787 }
788 if (result != NULL) {
789 return result;
790 }
791
792 // Create the garbage collection operation...
793 VM_G1CollectForAllocation op(gc_count_before, word_size);
794 op.set_allocation_context(AllocationContext::current());
795
796 // ...and get the VM thread to execute it.
797 VMThread::execute(&op);
798
799 if (op.prologue_succeeded() && op.pause_succeeded()) {
800 // If the operation was successful we'll return the result even
812 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
813 return NULL;
814 }
815 assert(op.result() == NULL,
816 "the result should be NULL if the VM op did not succeed");
817 }
818
819 // Give a warning if we seem to be looping forever.
820 if ((QueuedAllocationWarningCount > 0) &&
821 (try_count % QueuedAllocationWarningCount == 0)) {
822 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
823 }
824 }
825
826 ShouldNotReachHere();
827 return NULL;
828 }
829
830 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
831 AllocationContext_t context,
832 unsigned int *gc_count_before_ret,
833 int* gclocker_retry_count_ret) {
834 // Make sure you read the note in attempt_allocation_humongous().
835
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
838 "be called for humongous allocation requests");
839
840 // We should only get here after the first-level allocation attempt
841 // (attempt_allocation()) failed to allocate.
842
843 // We will loop until a) we manage to successfully perform the
844 // allocation or b) we successfully schedule a collection which
845 // fails to perform the allocation. b) is the only case when we'll
846 // return NULL.
847 HeapWord* result = NULL;
848 for (int try_count = 1; /* we'll return */; try_count += 1) {
849 bool should_try_gc;
850 unsigned int gc_count_before;
851
852 {
853 MutexLockerEx x(Heap_lock);
854 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
855 false /* bot_updates */);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
869 false /* bot_updates */);
870 if (result != NULL) {
928 // iteration (after taking the Heap_lock).
929 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
930 false /* bot_updates */);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Give a warning if we seem to be looping forever.
936 if ((QueuedAllocationWarningCount > 0) &&
937 (try_count % QueuedAllocationWarningCount == 0)) {
938 warning("G1CollectedHeap::attempt_allocation_slow() "
939 "retries %d times", try_count);
940 }
941 }
942
943 ShouldNotReachHere();
944 return NULL;
945 }
946
947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
948 unsigned int * gc_count_before_ret,
949 int* gclocker_retry_count_ret) {
950 // The structure of this method has a lot of similarities to
951 // attempt_allocation_slow(). The reason these two were not merged
952 // into a single one is that such a method would require several "if
953 // allocation is not humongous do this, otherwise do that"
954 // conditional paths which would obscure its flow. In fact, an early
955 // version of this code did use a unified method which was harder to
956 // follow and, as a result, it had subtle bugs that were hard to
957 // track down. So keeping these two methods separate allows each to
958 // be more readable. It will be good to keep these two in sync as
959 // much as possible.
960
961 assert_heap_not_locked_and_not_at_safepoint();
962 assert(is_humongous(word_size), "attempt_allocation_humongous() "
963 "should only be called for humongous allocations");
964
965 // Humongous objects can exhaust the heap quickly, so we should check if we
966 // need to start a marking cycle at each humongous object allocation. We do
967 // the check before we do the actual allocation. The reason for doing it
968 // before the allocation is that we avoid having to keep track of the newly
969 // allocated memory while we do a GC.
970 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
971 word_size)) {
972 collect(GCCause::_g1_humongous_allocation);
973 }
974
975 // We will loop until a) we manage to successfully perform the
976 // allocation or b) we successfully schedule a collection which
977 // fails to perform the allocation. b) is the only case when we'll
978 // return NULL.
979 HeapWord* result = NULL;
980 for (int try_count = 1; /* we'll return */; try_count += 1) {
981 bool should_try_gc;
982 unsigned int gc_count_before;
983
984 {
985 MutexLockerEx x(Heap_lock);
986
987 // Given that humongous objects are not allocated in young
988 // regions, we'll first try to do the allocation without doing a
989 // collection hoping that there's enough space in the heap.
990 result = humongous_obj_allocate(word_size, AllocationContext::current());
991 if (result != NULL) {
992 return result;
993 }
994
995 if (GC_locker::is_active_and_needs_gc()) {
996 should_try_gc = false;
997 } else {
998 // The GCLocker may not be active but the GCLocker initiated
999 // GC may not yet have been performed (GCLocker::needs_gc()
1000 // returns true). In this case we do not try this GC and
1001 // wait until the GCLocker initiated GC is performed, and
1002 // then retry the allocation.
1798 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1802
1803 _g1h = this;
1804 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1805 vm_exit_during_initialization("Failed necessary allocation.");
1806 }
1807
1808 _allocator = G1Allocator::create_allocator(_g1h);
1809 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1810
1811 int n_queues = MAX2((int)ParallelGCThreads, 1);
1812 _task_queues = new RefToScanQueueSet(n_queues);
1813
1814 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1815 assert(n_rem_sets > 0, "Invariant.");
1816
1817 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1818 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(unsigned int, n_queues, mtGC);
1819 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1820
1821 for (int i = 0; i < n_queues; i++) {
1822 RefToScanQueue* q = new RefToScanQueue();
1823 q->initialize();
1824 _task_queues->register_queue(i, q);
1825 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1826 }
1827 clear_cset_start_regions();
1828
1829 // Initialize the G1EvacuationFailureALot counters and flags.
1830 NOT_PRODUCT(reset_evacuation_should_fail();)
1831
1832 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1833 }
1834
1835 jint G1CollectedHeap::initialize() {
1836 CollectedHeap::pre_initialize();
1837 os::enable_vtime();
1838
2379
2380 G1YCType G1CollectedHeap::yc_type() {
2381 bool is_young = g1_policy()->gcs_are_young();
2382 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2383 bool is_during_mark = mark_in_progress();
2384
2385 if (is_initial_mark) {
2386 return InitialMark;
2387 } else if (is_during_mark) {
2388 return DuringMark;
2389 } else if (is_young) {
2390 return Normal;
2391 } else {
2392 return Mixed;
2393 }
2394 }
2395
2396 void G1CollectedHeap::collect(GCCause::Cause cause) {
2397 assert_heap_not_locked();
2398
2399 unsigned int gc_count_before;
2400 unsigned int old_marking_count_before;
2401 unsigned int full_gc_count_before;
2402 bool retry_gc;
2403
2404 do {
2405 retry_gc = false;
2406
2407 {
2408 MutexLocker ml(Heap_lock);
2409
2410 // Read the GC count while holding the Heap_lock
2411 gc_count_before = total_collections();
2412 full_gc_count_before = total_full_collections();
2413 old_marking_count_before = _old_marking_cycles_started;
2414 }
2415
2416 if (should_do_concurrent_full_gc(cause)) {
2417 // Schedule an initial-mark evacuation pause that will start a
2418 // concurrent cycle. We're setting word_size to 0 which means that
2419 // we are not requesting a post-GC allocation.
2420 VM_G1IncCollectionPause op(gc_count_before,
2421 0, /* word_size */
3401 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3402 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3403 }
3404
3405 // FIXME: what is this about?
3406 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3407 // is set.
3408 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3409 "derived pointer present"));
3410 // always_do_update_barrier = true;
3411
3412 resize_all_tlabs();
3413 allocation_context_stats().update(full);
3414
3415 // We have just completed a GC. Update the soft reference
3416 // policy with the new heap occupancy
3417 Universe::update_heap_info_at_gc();
3418 }
3419
3420 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3421 unsigned int gc_count_before,
3422 bool* succeeded,
3423 GCCause::Cause gc_cause) {
3424 assert_heap_not_locked_and_not_at_safepoint();
3425 g1_policy()->record_stop_world_start();
3426 VM_G1IncCollectionPause op(gc_count_before,
3427 word_size,
3428 false, /* should_initiate_conc_mark */
3429 g1_policy()->max_pause_time_ms(),
3430 gc_cause);
3431
3432 op.set_allocation_context(AllocationContext::current());
3433 VMThread::execute(&op);
3434
3435 HeapWord* result = op.result();
3436 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3437 assert(result == NULL || ret_succeeded,
3438 "the result should be NULL if the VM did not succeed");
3439 *succeeded = ret_succeeded;
3440
3441 assert_heap_not_locked();
|
1 /*
2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
316 if (_survivor_head != NULL) {
317 assert(_survivor_tail != NULL, "cause it shouldn't be");
318 assert(_survivor_length > 0, "invariant");
319 _survivor_tail->set_next_young_region(NULL);
320 }
321
322 // Don't clear the survivor list handles until the start of
323 // the next evacuation pause - we need it in order to re-tag
324 // the survivor regions from this evacuation pause as 'young'
325 // at the start of the next.
326
327 _g1h->g1_policy()->finished_recalculating_age_indexes(false /* is_survivors */);
328
329 assert(check_list_well_formed(), "young list should be well formed");
330 }
331
332 void YoungList::print() {
333 HeapRegion* lists[] = {_head, _survivor_head};
334 const char* names[] = {"YOUNG", "SURVIVOR"};
335
336 for (uint list = 0; list < ARRAY_SIZE(lists); ++list) {
337 gclog_or_tty->print_cr("%s LIST CONTENTS", names[list]);
338 HeapRegion *curr = lists[list];
339 if (curr == NULL)
340 gclog_or_tty->print_cr(" empty");
341 while (curr != NULL) {
342 gclog_or_tty->print_cr(" "HR_FORMAT", P: "PTR_FORMAT ", N: "PTR_FORMAT", age: %4d",
343 HR_FORMAT_PARAMS(curr),
344 curr->prev_top_at_mark_start(),
345 curr->next_top_at_mark_start(),
346 curr->age_in_surv_rate_group_cond());
347 curr = curr->get_next_young_region();
348 }
349 }
350
351 gclog_or_tty->cr();
352 }
353
354 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
355 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
356 }
748 if (first != G1_NO_HRM_INDEX) {
749 result = humongous_obj_allocate_initialize_regions(first, obj_regions,
750 word_size, context);
751 assert(result != NULL, "it should always return a valid result");
752
753 // A successful humongous object allocation changes the used space
754 // information of the old generation so we need to recalculate the
755 // sizes and update the jstat counters here.
756 g1mm()->update_sizes();
757 }
758
759 verify_region_sets_optional();
760
761 return result;
762 }
763
764 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
765 assert_heap_not_locked_and_not_at_safepoint();
766 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
767
768 uint dummy_gc_count_before;
769 uint dummy_gclocker_retry_count = 0;
770 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
771 }
772
773 HeapWord*
774 G1CollectedHeap::mem_allocate(size_t word_size,
775 bool* gc_overhead_limit_was_exceeded) {
776 assert_heap_not_locked_and_not_at_safepoint();
777
778 // Loop until the allocation is satisfied, or unsatisfied after GC.
779 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
780 uint gc_count_before;
781
782 HeapWord* result = NULL;
783 if (!is_humongous(word_size)) {
784 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
785 } else {
786 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
787 }
788 if (result != NULL) {
789 return result;
790 }
791
792 // Create the garbage collection operation...
793 VM_G1CollectForAllocation op(gc_count_before, word_size);
794 op.set_allocation_context(AllocationContext::current());
795
796 // ...and get the VM thread to execute it.
797 VMThread::execute(&op);
798
799 if (op.prologue_succeeded() && op.pause_succeeded()) {
800 // If the operation was successful we'll return the result even
812 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
813 return NULL;
814 }
815 assert(op.result() == NULL,
816 "the result should be NULL if the VM op did not succeed");
817 }
818
819 // Give a warning if we seem to be looping forever.
820 if ((QueuedAllocationWarningCount > 0) &&
821 (try_count % QueuedAllocationWarningCount == 0)) {
822 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
823 }
824 }
825
826 ShouldNotReachHere();
827 return NULL;
828 }
829
830 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
831 AllocationContext_t context,
832 uint* gc_count_before_ret,
833 uint* gclocker_retry_count_ret) {
834 // Make sure you read the note in attempt_allocation_humongous().
835
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
838 "be called for humongous allocation requests");
839
840 // We should only get here after the first-level allocation attempt
841 // (attempt_allocation()) failed to allocate.
842
843 // We will loop until a) we manage to successfully perform the
844 // allocation or b) we successfully schedule a collection which
845 // fails to perform the allocation. b) is the only case when we'll
846 // return NULL.
847 HeapWord* result = NULL;
848 for (int try_count = 1; /* we'll return */; try_count += 1) {
849 bool should_try_gc;
850 uint gc_count_before;
851
852 {
853 MutexLockerEx x(Heap_lock);
854 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
855 false /* bot_updates */);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
869 false /* bot_updates */);
870 if (result != NULL) {
928 // iteration (after taking the Heap_lock).
929 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
930 false /* bot_updates */);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Give a warning if we seem to be looping forever.
936 if ((QueuedAllocationWarningCount > 0) &&
937 (try_count % QueuedAllocationWarningCount == 0)) {
938 warning("G1CollectedHeap::attempt_allocation_slow() "
939 "retries %d times", try_count);
940 }
941 }
942
943 ShouldNotReachHere();
944 return NULL;
945 }
946
947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
948 uint* gc_count_before_ret,
949 uint* gclocker_retry_count_ret) {
950 // The structure of this method has a lot of similarities to
951 // attempt_allocation_slow(). The reason these two were not merged
952 // into a single one is that such a method would require several "if
953 // allocation is not humongous do this, otherwise do that"
954 // conditional paths which would obscure its flow. In fact, an early
955 // version of this code did use a unified method which was harder to
956 // follow and, as a result, it had subtle bugs that were hard to
957 // track down. So keeping these two methods separate allows each to
958 // be more readable. It will be good to keep these two in sync as
959 // much as possible.
960
961 assert_heap_not_locked_and_not_at_safepoint();
962 assert(is_humongous(word_size), "attempt_allocation_humongous() "
963 "should only be called for humongous allocations");
964
965 // Humongous objects can exhaust the heap quickly, so we should check if we
966 // need to start a marking cycle at each humongous object allocation. We do
967 // the check before we do the actual allocation. The reason for doing it
968 // before the allocation is that we avoid having to keep track of the newly
969 // allocated memory while we do a GC.
970 if (g1_policy()->need_to_start_conc_mark("concurrent humongous allocation",
971 word_size)) {
972 collect(GCCause::_g1_humongous_allocation);
973 }
974
975 // We will loop until a) we manage to successfully perform the
976 // allocation or b) we successfully schedule a collection which
977 // fails to perform the allocation. b) is the only case when we'll
978 // return NULL.
979 HeapWord* result = NULL;
980 for (int try_count = 1; /* we'll return */; try_count += 1) {
981 bool should_try_gc;
982 uint gc_count_before;
983
984 {
985 MutexLockerEx x(Heap_lock);
986
987 // Given that humongous objects are not allocated in young
988 // regions, we'll first try to do the allocation without doing a
989 // collection hoping that there's enough space in the heap.
990 result = humongous_obj_allocate(word_size, AllocationContext::current());
991 if (result != NULL) {
992 return result;
993 }
994
995 if (GC_locker::is_active_and_needs_gc()) {
996 should_try_gc = false;
997 } else {
998 // The GCLocker may not be active but the GCLocker initiated
999 // GC may not yet have been performed (GCLocker::needs_gc()
1000 // returns true). In this case we do not try this GC and
1001 // wait until the GCLocker initiated GC is performed, and
1002 // then retry the allocation.
1798 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1802
1803 _g1h = this;
1804 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1805 vm_exit_during_initialization("Failed necessary allocation.");
1806 }
1807
1808 _allocator = G1Allocator::create_allocator(_g1h);
1809 _humongous_object_threshold_in_words = HeapRegion::GrainWords / 2;
1810
1811 int n_queues = MAX2((int)ParallelGCThreads, 1);
1812 _task_queues = new RefToScanQueueSet(n_queues);
1813
1814 uint n_rem_sets = HeapRegionRemSet::num_par_rem_sets();
1815 assert(n_rem_sets > 0, "Invariant.");
1816
1817 _worker_cset_start_region = NEW_C_HEAP_ARRAY(HeapRegion*, n_queues, mtGC);
1818 _worker_cset_start_region_time_stamp = NEW_C_HEAP_ARRAY(uint, n_queues, mtGC);
1819 _evacuation_failed_info_array = NEW_C_HEAP_ARRAY(EvacuationFailedInfo, n_queues, mtGC);
1820
1821 for (int i = 0; i < n_queues; i++) {
1822 RefToScanQueue* q = new RefToScanQueue();
1823 q->initialize();
1824 _task_queues->register_queue(i, q);
1825 ::new (&_evacuation_failed_info_array[i]) EvacuationFailedInfo();
1826 }
1827 clear_cset_start_regions();
1828
1829 // Initialize the G1EvacuationFailureALot counters and flags.
1830 NOT_PRODUCT(reset_evacuation_should_fail();)
1831
1832 guarantee(_task_queues != NULL, "task_queues allocation failure.");
1833 }
1834
1835 jint G1CollectedHeap::initialize() {
1836 CollectedHeap::pre_initialize();
1837 os::enable_vtime();
1838
2379
2380 G1YCType G1CollectedHeap::yc_type() {
2381 bool is_young = g1_policy()->gcs_are_young();
2382 bool is_initial_mark = g1_policy()->during_initial_mark_pause();
2383 bool is_during_mark = mark_in_progress();
2384
2385 if (is_initial_mark) {
2386 return InitialMark;
2387 } else if (is_during_mark) {
2388 return DuringMark;
2389 } else if (is_young) {
2390 return Normal;
2391 } else {
2392 return Mixed;
2393 }
2394 }
2395
2396 void G1CollectedHeap::collect(GCCause::Cause cause) {
2397 assert_heap_not_locked();
2398
2399 uint gc_count_before;
2400 uint old_marking_count_before;
2401 uint full_gc_count_before;
2402 bool retry_gc;
2403
2404 do {
2405 retry_gc = false;
2406
2407 {
2408 MutexLocker ml(Heap_lock);
2409
2410 // Read the GC count while holding the Heap_lock
2411 gc_count_before = total_collections();
2412 full_gc_count_before = total_full_collections();
2413 old_marking_count_before = _old_marking_cycles_started;
2414 }
2415
2416 if (should_do_concurrent_full_gc(cause)) {
2417 // Schedule an initial-mark evacuation pause that will start a
2418 // concurrent cycle. We're setting word_size to 0 which means that
2419 // we are not requesting a post-GC allocation.
2420 VM_G1IncCollectionPause op(gc_count_before,
2421 0, /* word_size */
3401 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3402 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3403 }
3404
3405 // FIXME: what is this about?
3406 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3407 // is set.
3408 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3409 "derived pointer present"));
3410 // always_do_update_barrier = true;
3411
3412 resize_all_tlabs();
3413 allocation_context_stats().update(full);
3414
3415 // We have just completed a GC. Update the soft reference
3416 // policy with the new heap occupancy
3417 Universe::update_heap_info_at_gc();
3418 }
3419
3420 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3421 uint gc_count_before,
3422 bool* succeeded,
3423 GCCause::Cause gc_cause) {
3424 assert_heap_not_locked_and_not_at_safepoint();
3425 g1_policy()->record_stop_world_start();
3426 VM_G1IncCollectionPause op(gc_count_before,
3427 word_size,
3428 false, /* should_initiate_conc_mark */
3429 g1_policy()->max_pause_time_ms(),
3430 gc_cause);
3431
3432 op.set_allocation_context(AllocationContext::current());
3433 VMThread::execute(&op);
3434
3435 HeapWord* result = op.result();
3436 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3437 assert(result == NULL || ret_succeeded,
3438 "the result should be NULL if the VM did not succeed");
3439 *succeeded = ret_succeeded;
3440
3441 assert_heap_not_locked();
|