739 word_size, context);
740 assert(result != NULL, "it should always return a valid result");
741
742 // A successful humongous object allocation changes the used space
743 // information of the old generation so we need to recalculate the
744 // sizes and update the jstat counters here.
745 g1mm()->update_sizes();
746 }
747
748 verify_region_sets_optional();
749
750 return result;
751 }
752
753 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
754 assert_heap_not_locked_and_not_at_safepoint();
755 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
756
757 uint dummy_gc_count_before;
758 uint dummy_gclocker_retry_count = 0;
759 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count);
760 }
761
762 HeapWord*
763 G1CollectedHeap::mem_allocate(size_t word_size,
764 bool* gc_overhead_limit_was_exceeded) {
765 assert_heap_not_locked_and_not_at_safepoint();
766
767 // Loop until the allocation is satisfied, or unsatisfied after GC.
768 for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) {
769 uint gc_count_before;
770
771 HeapWord* result = NULL;
772 if (!is_humongous(word_size)) {
773 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count);
774 } else {
775 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count);
776 }
777 if (result != NULL) {
778 return result;
779 }
780
781 // Create the garbage collection operation...
782 VM_G1CollectForAllocation op(gc_count_before, word_size);
783 op.set_allocation_context(AllocationContext::current());
784
785 // ...and get the VM thread to execute it.
786 VMThread::execute(&op);
787
788 if (op.prologue_succeeded() && op.pause_succeeded()) {
789 // If the operation was successful we'll return the result even
790 // if it is NULL. If the allocation attempt failed immediately
791 // after a Full GC, it's unlikely we'll be able to allocate now.
792 HeapWord* result = op.result();
793 if (result != NULL && !is_humongous(word_size)) {
794 // Allocations that take place on VM operations do not do any
795 // card dirtying and we have to do it here. We only have to do
796 // this for non-humongous allocations, though.
797 dirty_young_block(result, word_size);
798 }
799 return result;
800 } else {
801 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
802 return NULL;
803 }
804 assert(op.result() == NULL,
805 "the result should be NULL if the VM op did not succeed");
806 }
807
808 // Give a warning if we seem to be looping forever.
809 if ((QueuedAllocationWarningCount > 0) &&
810 (try_count % QueuedAllocationWarningCount == 0)) {
811 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
812 }
813 }
814
815 ShouldNotReachHere();
816 return NULL;
817 }
818
819 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
820 AllocationContext_t context,
821 uint* gc_count_before_ret,
822 uint* gclocker_retry_count_ret) {
823 // Make sure you read the note in attempt_allocation_humongous().
824
825 assert_heap_not_locked_and_not_at_safepoint();
826 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
827 "be called for humongous allocation requests");
828
829 // We should only get here after the first-level allocation attempt
830 // (attempt_allocation()) failed to allocate.
831
832 // We will loop until a) we manage to successfully perform the
833 // allocation or b) we successfully schedule a collection which
834 // fails to perform the allocation. b) is the only case when we'll
835 // return NULL.
836 HeapWord* result = NULL;
837 for (int try_count = 1; /* we'll return */; try_count += 1) {
838 bool should_try_gc;
839 uint gc_count_before;
840
841 {
842 MutexLockerEx x(Heap_lock);
863 should_try_gc = false;
864 } else {
865 // The GCLocker may not be active but the GCLocker initiated
866 // GC may not yet have been performed (GCLocker::needs_gc()
867 // returns true). In this case we do not try this GC and
868 // wait until the GCLocker initiated GC is performed, and
869 // then retry the allocation.
870 if (GC_locker::needs_gc()) {
871 should_try_gc = false;
872 } else {
873 // Read the GC count while still holding the Heap_lock.
874 gc_count_before = total_collections();
875 should_try_gc = true;
876 }
877 }
878 }
879
880 if (should_try_gc) {
881 bool succeeded;
882 result = do_collection_pause(word_size, gc_count_before, &succeeded,
883 GCCause::_g1_inc_collection_pause);
884 if (result != NULL) {
885 assert(succeeded, "only way to get back a non-NULL result");
886 return result;
887 }
888
889 if (succeeded) {
890 // If we get here we successfully scheduled a collection which
891 // failed to allocate. No point in trying to allocate
892 // further. We'll just return NULL.
893 MutexLockerEx x(Heap_lock);
894 *gc_count_before_ret = total_collections();
895 return NULL;
896 }
897 } else {
898 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
899 MutexLockerEx x(Heap_lock);
900 *gc_count_before_ret = total_collections();
901 return NULL;
902 }
903 // The GCLocker is either active or the GCLocker initiated
918 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
919 false /* bot_updates */);
920 if (result != NULL) {
921 return result;
922 }
923
924 // Give a warning if we seem to be looping forever.
925 if ((QueuedAllocationWarningCount > 0) &&
926 (try_count % QueuedAllocationWarningCount == 0)) {
927 warning("G1CollectedHeap::attempt_allocation_slow() "
928 "retries %d times", try_count);
929 }
930 }
931
932 ShouldNotReachHere();
933 return NULL;
934 }
935
936 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
937 uint* gc_count_before_ret,
938 uint* gclocker_retry_count_ret) {
939 // The structure of this method has a lot of similarities to
940 // attempt_allocation_slow(). The reason these two were not merged
941 // into a single one is that such a method would require several "if
942 // allocation is not humongous do this, otherwise do that"
943 // conditional paths which would obscure its flow. In fact, an early
944 // version of this code did use a unified method which was harder to
945 // follow and, as a result, it had subtle bugs that were hard to
946 // track down. So keeping these two methods separate allows each to
947 // be more readable. It will be good to keep these two in sync as
948 // much as possible.
949
950 assert_heap_not_locked_and_not_at_safepoint();
951 assert(is_humongous(word_size), "attempt_allocation_humongous() "
952 "should only be called for humongous allocations");
953
954 // Humongous objects can exhaust the heap quickly, so we should check if we
955 // need to start a marking cycle at each humongous object allocation. We do
956 // the check before we do the actual allocation. The reason for doing it
957 // before the allocation is that we avoid having to keep track of the newly
958 // allocated memory while we do a GC.
989 // returns true). In this case we do not try this GC and
990 // wait until the GCLocker initiated GC is performed, and
991 // then retry the allocation.
992 if (GC_locker::needs_gc()) {
993 should_try_gc = false;
994 } else {
995 // Read the GC count while still holding the Heap_lock.
996 gc_count_before = total_collections();
997 should_try_gc = true;
998 }
999 }
1000 }
1001
1002 if (should_try_gc) {
1003 // If we failed to allocate the humongous object, we should try to
1004 // do a collection pause (if we're allowed) in case it reclaims
1005 // enough space for the allocation to succeed after the pause.
1006
1007 bool succeeded;
1008 result = do_collection_pause(word_size, gc_count_before, &succeeded,
1009 GCCause::_g1_humongous_allocation);
1010 if (result != NULL) {
1011 assert(succeeded, "only way to get back a non-NULL result");
1012 return result;
1013 }
1014
1015 if (succeeded) {
1016 // If we get here we successfully scheduled a collection which
1017 // failed to allocate. No point in trying to allocate
1018 // further. We'll just return NULL.
1019 MutexLockerEx x(Heap_lock);
1020 *gc_count_before_ret = total_collections();
1021 return NULL;
1022 }
1023 } else {
1024 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1025 MutexLockerEx x(Heap_lock);
1026 *gc_count_before_ret = total_collections();
1027 return NULL;
1028 }
1029 // The GCLocker is either active or the GCLocker initiated
3384 }
3385
3386 // FIXME: what is this about?
3387 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3388 // is set.
3389 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3390 "derived pointer present"));
3391 // always_do_update_barrier = true;
3392
3393 resize_all_tlabs();
3394 allocation_context_stats().update(full);
3395
3396 // We have just completed a GC. Update the soft reference
3397 // policy with the new heap occupancy
3398 Universe::update_heap_info_at_gc();
3399 }
3400
3401 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3402 uint gc_count_before,
3403 bool* succeeded,
3404 GCCause::Cause gc_cause) {
3405 assert_heap_not_locked_and_not_at_safepoint();
3406 g1_policy()->record_stop_world_start();
3407 VM_G1IncCollectionPause op(gc_count_before,
3408 word_size,
3409 false, /* should_initiate_conc_mark */
3410 g1_policy()->max_pause_time_ms(),
3411 gc_cause);
3412
3413 op.set_allocation_context(AllocationContext::current());
3414 VMThread::execute(&op);
3415
3416 HeapWord* result = op.result();
3417 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3418 assert(result == NULL || ret_succeeded,
3419 "the result should be NULL if the VM did not succeed");
3420 *succeeded = ret_succeeded;
3421
3422 assert_heap_not_locked();
3423 return result;
3424 }
3425
3426 void
3427 G1CollectedHeap::doConcurrentMark() {
3428 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3429 if (!_cmThread->in_progress()) {
3430 _cmThread->set_started();
3431 CGC_lock->notify();
|
739 word_size, context);
740 assert(result != NULL, "it should always return a valid result");
741
742 // A successful humongous object allocation changes the used space
743 // information of the old generation so we need to recalculate the
744 // sizes and update the jstat counters here.
745 g1mm()->update_sizes();
746 }
747
748 verify_region_sets_optional();
749
750 return result;
751 }
752
753 HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) {
754 assert_heap_not_locked_and_not_at_safepoint();
755 assert(!is_humongous(word_size), "we do not allow humongous TLABs");
756
757 uint dummy_gc_count_before;
758 uint dummy_gclocker_retry_count = 0;
759 uint gc_attempts = 1;
760 return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count, &gc_attempts);
761 }
762
763 HeapWord*
764 G1CollectedHeap::mem_allocate(size_t word_size,
765 bool* gc_overhead_limit_was_exceeded) {
766 assert_heap_not_locked_and_not_at_safepoint();
767
768 // Loop until the allocation is satisfied, or unsatisfied after GC.
769 for (uint try_count = 1, gclocker_retry_count = 0, gc_attempt = 1; /* we'll return */; try_count += 1) {
770 uint gc_count_before;
771
772 HeapWord* result = NULL;
773 if (!is_humongous(word_size)) {
774 result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt);
775 } else {
776 result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt);
777 }
778 if (result != NULL) {
779 return result;
780 }
781
782 // Create the garbage collection operation...
783 VM_G1CollectForAllocation op(gc_count_before, word_size, gc_attempt++);
784 op.set_allocation_context(AllocationContext::current());
785
786 // ...and get the VM thread to execute it.
787 VMThread::execute(&op);
788
789 if (op.prologue_succeeded() && op.pause_succeeded()) {
790 // If the operation was successful we'll return the result even
791 // if it is NULL. If the allocation attempt failed immediately
792 // after a Full GC, it's unlikely we'll be able to allocate now.
793 HeapWord* result = op.result();
794 if (result != NULL && !is_humongous(word_size)) {
795 // Allocations that take place on VM operations do not do any
796 // card dirtying and we have to do it here. We only have to do
797 // this for non-humongous allocations, though.
798 dirty_young_block(result, word_size);
799 }
800 return result;
801 } else {
802 if (gclocker_retry_count > GCLockerRetryAllocationCount) {
803 return NULL;
804 }
805 assert(op.result() == NULL,
806 "the result should be NULL if the VM op did not succeed");
807 }
808
809 // Give a warning if we seem to be looping forever.
810 if ((QueuedAllocationWarningCount > 0) &&
811 (try_count % QueuedAllocationWarningCount == 0)) {
812 warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
813 }
814 }
815
816 ShouldNotReachHere();
817 return NULL;
818 }
819
820 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
821 AllocationContext_t context,
822 uint* gc_count_before_ret,
823 uint* gclocker_retry_count_ret,
824 uint* gc_attempt) {
825 // Make sure you read the note in attempt_allocation_humongous().
826
827 assert_heap_not_locked_and_not_at_safepoint();
828 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
829 "be called for humongous allocation requests");
830
831 // We should only get here after the first-level allocation attempt
832 // (attempt_allocation()) failed to allocate.
833
834 // We will loop until a) we manage to successfully perform the
835 // allocation or b) we successfully schedule a collection which
836 // fails to perform the allocation. b) is the only case when we'll
837 // return NULL.
838 HeapWord* result = NULL;
839 for (int try_count = 1; /* we'll return */; try_count += 1) {
840 bool should_try_gc;
841 uint gc_count_before;
842
843 {
844 MutexLockerEx x(Heap_lock);
865 should_try_gc = false;
866 } else {
867 // The GCLocker may not be active but the GCLocker initiated
868 // GC may not yet have been performed (GCLocker::needs_gc()
869 // returns true). In this case we do not try this GC and
870 // wait until the GCLocker initiated GC is performed, and
871 // then retry the allocation.
872 if (GC_locker::needs_gc()) {
873 should_try_gc = false;
874 } else {
875 // Read the GC count while still holding the Heap_lock.
876 gc_count_before = total_collections();
877 should_try_gc = true;
878 }
879 }
880 }
881
882 if (should_try_gc) {
883 bool succeeded;
884 result = do_collection_pause(word_size, gc_count_before, &succeeded,
885 GCCause::_g1_inc_collection_pause, *gc_attempt);
886 *gc_attempt += 1;
887 if (result != NULL) {
888 assert(succeeded, "only way to get back a non-NULL result");
889 return result;
890 }
891
892 if (succeeded) {
893 // If we get here we successfully scheduled a collection which
894 // failed to allocate. No point in trying to allocate
895 // further. We'll just return NULL.
896 MutexLockerEx x(Heap_lock);
897 *gc_count_before_ret = total_collections();
898 return NULL;
899 }
900 } else {
901 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
902 MutexLockerEx x(Heap_lock);
903 *gc_count_before_ret = total_collections();
904 return NULL;
905 }
906 // The GCLocker is either active or the GCLocker initiated
921 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
922 false /* bot_updates */);
923 if (result != NULL) {
924 return result;
925 }
926
927 // Give a warning if we seem to be looping forever.
928 if ((QueuedAllocationWarningCount > 0) &&
929 (try_count % QueuedAllocationWarningCount == 0)) {
930 warning("G1CollectedHeap::attempt_allocation_slow() "
931 "retries %d times", try_count);
932 }
933 }
934
935 ShouldNotReachHere();
936 return NULL;
937 }
938
939 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
940 uint* gc_count_before_ret,
941 uint* gclocker_retry_count_ret,
942 uint* gc_attempt) {
943 // The structure of this method has a lot of similarities to
944 // attempt_allocation_slow(). The reason these two were not merged
945 // into a single one is that such a method would require several "if
946 // allocation is not humongous do this, otherwise do that"
947 // conditional paths which would obscure its flow. In fact, an early
948 // version of this code did use a unified method which was harder to
949 // follow and, as a result, it had subtle bugs that were hard to
950 // track down. So keeping these two methods separate allows each to
951 // be more readable. It will be good to keep these two in sync as
952 // much as possible.
953
954 assert_heap_not_locked_and_not_at_safepoint();
955 assert(is_humongous(word_size), "attempt_allocation_humongous() "
956 "should only be called for humongous allocations");
957
958 // Humongous objects can exhaust the heap quickly, so we should check if we
959 // need to start a marking cycle at each humongous object allocation. We do
960 // the check before we do the actual allocation. The reason for doing it
961 // before the allocation is that we avoid having to keep track of the newly
962 // allocated memory while we do a GC.
993 // returns true). In this case we do not try this GC and
994 // wait until the GCLocker initiated GC is performed, and
995 // then retry the allocation.
996 if (GC_locker::needs_gc()) {
997 should_try_gc = false;
998 } else {
999 // Read the GC count while still holding the Heap_lock.
1000 gc_count_before = total_collections();
1001 should_try_gc = true;
1002 }
1003 }
1004 }
1005
1006 if (should_try_gc) {
1007 // If we failed to allocate the humongous object, we should try to
1008 // do a collection pause (if we're allowed) in case it reclaims
1009 // enough space for the allocation to succeed after the pause.
1010
1011 bool succeeded;
1012 result = do_collection_pause(word_size, gc_count_before, &succeeded,
1013 GCCause::_g1_humongous_allocation, *gc_attempt);
1014 *gc_attempt += 1;
1015 if (result != NULL) {
1016 assert(succeeded, "only way to get back a non-NULL result");
1017 return result;
1018 }
1019
1020 if (succeeded) {
1021 // If we get here we successfully scheduled a collection which
1022 // failed to allocate. No point in trying to allocate
1023 // further. We'll just return NULL.
1024 MutexLockerEx x(Heap_lock);
1025 *gc_count_before_ret = total_collections();
1026 return NULL;
1027 }
1028 } else {
1029 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
1030 MutexLockerEx x(Heap_lock);
1031 *gc_count_before_ret = total_collections();
1032 return NULL;
1033 }
1034 // The GCLocker is either active or the GCLocker initiated
3389 }
3390
3391 // FIXME: what is this about?
3392 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3393 // is set.
3394 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3395 "derived pointer present"));
3396 // always_do_update_barrier = true;
3397
3398 resize_all_tlabs();
3399 allocation_context_stats().update(full);
3400
3401 // We have just completed a GC. Update the soft reference
3402 // policy with the new heap occupancy
3403 Universe::update_heap_info_at_gc();
3404 }
3405
3406 HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size,
3407 uint gc_count_before,
3408 bool* succeeded,
3409 GCCause::Cause gc_cause,
3410 uint gc_attempt) {
3411 assert_heap_not_locked_and_not_at_safepoint();
3412 g1_policy()->record_stop_world_start();
3413 VM_G1IncCollectionPause op(gc_count_before,
3414 word_size,
3415 false, /* should_initiate_conc_mark */
3416 g1_policy()->max_pause_time_ms(),
3417 gc_cause,
3418 gc_attempt);
3419
3420 op.set_allocation_context(AllocationContext::current());
3421 VMThread::execute(&op);
3422
3423 HeapWord* result = op.result();
3424 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
3425 assert(result == NULL || ret_succeeded,
3426 "the result should be NULL if the VM did not succeed");
3427 *succeeded = ret_succeeded;
3428
3429 assert_heap_not_locked();
3430 return result;
3431 }
3432
3433 void
3434 G1CollectedHeap::doConcurrentMark() {
3435 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
3436 if (!_cmThread->in_progress()) {
3437 _cmThread->set_started();
3438 CGC_lock->notify();
|