< prev index

src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp

Print this page
rev 8024 : imported patch event1
* * *
imported patch event2

*** 754,787 **** assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "we do not allow humongous TLABs"); uint dummy_gc_count_before; uint dummy_gclocker_retry_count = 0; ! return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. ! for (uint try_count = 1, gclocker_retry_count = 0; /* we'll return */; try_count += 1) { uint gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { ! result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count); } else { ! result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count); } if (result != NULL) { return result; } // Create the garbage collection operation... ! VM_G1CollectForAllocation op(gc_count_before, word_size); op.set_allocation_context(AllocationContext::current()); // ...and get the VM thread to execute it. VMThread::execute(&op); --- 754,788 ---- assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "we do not allow humongous TLABs"); uint dummy_gc_count_before; uint dummy_gclocker_retry_count = 0; ! uint gc_attempts = 1; ! return attempt_allocation(word_size, &dummy_gc_count_before, &dummy_gclocker_retry_count, &gc_attempts); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); // Loop until the allocation is satisfied, or unsatisfied after GC. ! for (uint try_count = 1, gclocker_retry_count = 0, gc_attempt = 1; /* we'll return */; try_count += 1) { uint gc_count_before; HeapWord* result = NULL; if (!is_humongous(word_size)) { ! result = attempt_allocation(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt); } else { ! result = attempt_allocation_humongous(word_size, &gc_count_before, &gclocker_retry_count, &gc_attempt); } if (result != NULL) { return result; } // Create the garbage collection operation... ! VM_G1CollectForAllocation op(gc_count_before, word_size, gc_attempt++); op.set_allocation_context(AllocationContext::current()); // ...and get the VM thread to execute it. VMThread::execute(&op);
*** 817,827 **** } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, AllocationContext_t context, uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests"); --- 818,829 ---- } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, AllocationContext_t context, uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret, ! uint* gc_attempt) { // Make sure you read the note in attempt_allocation_humongous(). assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(word_size), "attempt_allocation_slow() should not " "be called for humongous allocation requests");
*** 878,888 **** } if (should_try_gc) { bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, ! GCCause::_g1_inc_collection_pause); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; } --- 880,891 ---- } if (should_try_gc) { bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, ! GCCause::_g1_inc_collection_pause, *gc_attempt); ! *gc_attempt += 1; if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; }
*** 933,943 **** return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early --- 936,947 ---- return NULL; } HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size, uint* gc_count_before_ret, ! uint* gclocker_retry_count_ret, ! uint* gc_attempt) { // The structure of this method has a lot of similarities to // attempt_allocation_slow(). The reason these two were not merged // into a single one is that such a method would require several "if // allocation is not humongous do this, otherwise do that" // conditional paths which would obscure its flow. In fact, an early
*** 1004,1014 **** // do a collection pause (if we're allowed) in case it reclaims // enough space for the allocation to succeed after the pause. bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, ! GCCause::_g1_humongous_allocation); if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; } --- 1008,1019 ---- // do a collection pause (if we're allowed) in case it reclaims // enough space for the allocation to succeed after the pause. bool succeeded; result = do_collection_pause(word_size, gc_count_before, &succeeded, ! GCCause::_g1_humongous_allocation, *gc_attempt); ! *gc_attempt += 1; if (result != NULL) { assert(succeeded, "only way to get back a non-NULL result"); return result; }
*** 3399,3416 **** } HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, uint gc_count_before, bool* succeeded, ! GCCause::Cause gc_cause) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before, word_size, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), ! gc_cause); op.set_allocation_context(AllocationContext::current()); VMThread::execute(&op); HeapWord* result = op.result(); --- 3404,3423 ---- } HeapWord* G1CollectedHeap::do_collection_pause(size_t word_size, uint gc_count_before, bool* succeeded, ! GCCause::Cause gc_cause, ! uint gc_attempt) { assert_heap_not_locked_and_not_at_safepoint(); g1_policy()->record_stop_world_start(); VM_G1IncCollectionPause op(gc_count_before, word_size, false, /* should_initiate_conc_mark */ g1_policy()->max_pause_time_ms(), ! gc_cause, ! gc_attempt); op.set_allocation_context(AllocationContext::current()); VMThread::execute(&op); HeapWord* result = op.result();
< prev index