< prev index next >

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page




 777         // this for non-humongous allocations, though.
 778         dirty_young_block(result, word_size);
 779       }
 780       return result;
 781     } else {
 782       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 783         return NULL;
 784       }
 785       assert(op.result() == NULL,
 786              "the result should be NULL if the VM op did not succeed");
 787     }
 788 
 789     // Give a warning if we seem to be looping forever.
 790     if ((QueuedAllocationWarningCount > 0) &&
 791         (try_count % QueuedAllocationWarningCount == 0)) {
 792       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 793     }
 794   }
 795 
 796   ShouldNotReachHere();
 797   return NULL;
 798 }
 799 
 800 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 801                                                    AllocationContext_t context,
 802                                                    uint* gc_count_before_ret,
 803                                                    uint* gclocker_retry_count_ret) {
 804   // Make sure you read the note in attempt_allocation_humongous().
 805 
 806   assert_heap_not_locked_and_not_at_safepoint();
 807   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 808          "be called for humongous allocation requests");
 809 
 810   // We should only get here after the first-level allocation attempt
 811   // (attempt_allocation()) failed to allocate.
 812 
 813   // We will loop until a) we manage to successfully perform the
 814   // allocation or b) we successfully schedule a collection which
 815   // fails to perform the allocation. b) is the only case when we'll
 816   // return NULL.
 817   HeapWord* result = NULL;


 894     // allocation attempt in case another thread successfully
 895     // performed a collection and reclaimed enough space. We do the
 896     // first attempt (without holding the Heap_lock) here and the
 897     // follow-on attempt will be at the start of the next loop
 898     // iteration (after taking the Heap_lock).
 899     result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 900                                                                            false /* bot_updates */);
 901     if (result != NULL) {
 902       return result;
 903     }
 904 
 905     // Give a warning if we seem to be looping forever.
 906     if ((QueuedAllocationWarningCount > 0) &&
 907         (try_count % QueuedAllocationWarningCount == 0)) {
 908       warning("G1CollectedHeap::attempt_allocation_slow() "
 909               "retries %d times", try_count);
 910     }
 911   }
 912 
 913   ShouldNotReachHere();
 914   return NULL;
 915 }
 916 
 917 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 918                                                         uint* gc_count_before_ret,
 919                                                         uint* gclocker_retry_count_ret) {
 920   // The structure of this method has a lot of similarities to
 921   // attempt_allocation_slow(). The reason these two were not merged
 922   // into a single one is that such a method would require several "if
 923   // allocation is not humongous do this, otherwise do that"
 924   // conditional paths which would obscure its flow. In fact, an early
 925   // version of this code did use a unified method which was harder to
 926   // follow and, as a result, it had subtle bugs that were hard to
 927   // track down. So keeping these two methods separate allows each to
 928   // be more readable. It will be good to keep these two in sync as
 929   // much as possible.
 930 
 931   assert_heap_not_locked_and_not_at_safepoint();
 932   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 933          "should only be called for humongous allocations");
 934 


1012       // then retry the allocation.
1013       GC_locker::stall_until_clear();
1014       (*gclocker_retry_count_ret) += 1;
1015     }
1016 
1017     // We can reach here if we were unsuccessful in scheduling a
1018     // collection (because another thread beat us to it) or if we were
1019     // stalled due to the GC locker. In either can we should retry the
1020     // allocation attempt in case another thread successfully
1021     // performed a collection and reclaimed enough space.  Give a
1022     // warning if we seem to be looping forever.
1023 
1024     if ((QueuedAllocationWarningCount > 0) &&
1025         (try_count % QueuedAllocationWarningCount == 0)) {
1026       warning("G1CollectedHeap::attempt_allocation_humongous() "
1027               "retries %d times", try_count);
1028     }
1029   }
1030 
1031   ShouldNotReachHere();
1032   return NULL;
1033 }
1034 
1035 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1036                                                            AllocationContext_t context,
1037                                                            bool expect_null_mutator_alloc_region) {
1038   assert_at_safepoint(true /* should_be_vm_thread */);
1039   assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1040                                              !expect_null_mutator_alloc_region,
1041          "the current alloc region was unexpectedly found to be non-NULL");
1042 
1043   if (!is_humongous(word_size)) {
1044     return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1045                                                       false /* bot_updates */);
1046   } else {
1047     HeapWord* result = humongous_obj_allocate(word_size, context);
1048     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1049       g1_policy()->set_initiate_conc_mark_if_possible();
1050     }
1051     return result;
1052   }




 777         // this for non-humongous allocations, though.
 778         dirty_young_block(result, word_size);
 779       }
 780       return result;
 781     } else {
 782       if (gclocker_retry_count > GCLockerRetryAllocationCount) {
 783         return NULL;
 784       }
 785       assert(op.result() == NULL,
 786              "the result should be NULL if the VM op did not succeed");
 787     }
 788 
 789     // Give a warning if we seem to be looping forever.
 790     if ((QueuedAllocationWarningCount > 0) &&
 791         (try_count % QueuedAllocationWarningCount == 0)) {
 792       warning("G1CollectedHeap::mem_allocate retries %d times", try_count);
 793     }
 794   }
 795 
 796   ShouldNotReachHere();

 797 }
 798 
 799 HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size,
 800                                                    AllocationContext_t context,
 801                                                    uint* gc_count_before_ret,
 802                                                    uint* gclocker_retry_count_ret) {
 803   // Make sure you read the note in attempt_allocation_humongous().
 804 
 805   assert_heap_not_locked_and_not_at_safepoint();
 806   assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
 807          "be called for humongous allocation requests");
 808 
 809   // We should only get here after the first-level allocation attempt
 810   // (attempt_allocation()) failed to allocate.
 811 
 812   // We will loop until a) we manage to successfully perform the
 813   // allocation or b) we successfully schedule a collection which
 814   // fails to perform the allocation. b) is the only case when we'll
 815   // return NULL.
 816   HeapWord* result = NULL;


 893     // allocation attempt in case another thread successfully
 894     // performed a collection and reclaimed enough space. We do the
 895     // first attempt (without holding the Heap_lock) here and the
 896     // follow-on attempt will be at the start of the next loop
 897     // iteration (after taking the Heap_lock).
 898     result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 899                                                                            false /* bot_updates */);
 900     if (result != NULL) {
 901       return result;
 902     }
 903 
 904     // Give a warning if we seem to be looping forever.
 905     if ((QueuedAllocationWarningCount > 0) &&
 906         (try_count % QueuedAllocationWarningCount == 0)) {
 907       warning("G1CollectedHeap::attempt_allocation_slow() "
 908               "retries %d times", try_count);
 909     }
 910   }
 911 
 912   ShouldNotReachHere();

 913 }
 914 
 915 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
 916                                                         uint* gc_count_before_ret,
 917                                                         uint* gclocker_retry_count_ret) {
 918   // The structure of this method has a lot of similarities to
 919   // attempt_allocation_slow(). The reason these two were not merged
 920   // into a single one is that such a method would require several "if
 921   // allocation is not humongous do this, otherwise do that"
 922   // conditional paths which would obscure its flow. In fact, an early
 923   // version of this code did use a unified method which was harder to
 924   // follow and, as a result, it had subtle bugs that were hard to
 925   // track down. So keeping these two methods separate allows each to
 926   // be more readable. It will be good to keep these two in sync as
 927   // much as possible.
 928 
 929   assert_heap_not_locked_and_not_at_safepoint();
 930   assert(is_humongous(word_size), "attempt_allocation_humongous() "
 931          "should only be called for humongous allocations");
 932 


1010       // then retry the allocation.
1011       GC_locker::stall_until_clear();
1012       (*gclocker_retry_count_ret) += 1;
1013     }
1014 
1015     // We can reach here if we were unsuccessful in scheduling a
1016     // collection (because another thread beat us to it) or if we were
1017     // stalled due to the GC locker. In either can we should retry the
1018     // allocation attempt in case another thread successfully
1019     // performed a collection and reclaimed enough space.  Give a
1020     // warning if we seem to be looping forever.
1021 
1022     if ((QueuedAllocationWarningCount > 0) &&
1023         (try_count % QueuedAllocationWarningCount == 0)) {
1024       warning("G1CollectedHeap::attempt_allocation_humongous() "
1025               "retries %d times", try_count);
1026     }
1027   }
1028 
1029   ShouldNotReachHere();

1030 }
1031 
1032 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1033                                                            AllocationContext_t context,
1034                                                            bool expect_null_mutator_alloc_region) {
1035   assert_at_safepoint(true /* should_be_vm_thread */);
1036   assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1037                                              !expect_null_mutator_alloc_region,
1038          "the current alloc region was unexpectedly found to be non-NULL");
1039 
1040   if (!is_humongous(word_size)) {
1041     return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1042                                                       false /* bot_updates */);
1043   } else {
1044     HeapWord* result = humongous_obj_allocate(word_size, context);
1045     if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1046       g1_policy()->set_initiate_conc_mark_if_possible();
1047     }
1048     return result;
1049   }


< prev index next >