< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 49851 : imported patch 8191471-g1-varying-tlab-allocation

*** 382,407 **** _verifier->verify_region_sets_optional(); return result; } ! HeapWord* G1CollectedHeap::allocate_new_tlab(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); ! assert(!is_humongous(word_size), "we do not allow humongous TLABs"); ! return attempt_allocation(word_size); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); if (is_humongous(word_size)) { return attempt_allocation_humongous(word_size); } ! return attempt_allocation(word_size); } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { ResourceMark rm; // For retrieving the thread names in log messages. --- 382,410 ---- _verifier->verify_region_sets_optional(); return result; } ! HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_word_size, ! size_t desired_word_size, ! size_t* actual_word_size) { assert_heap_not_locked_and_not_at_safepoint(); ! assert(!is_humongous(desired_word_size), "we do not allow humongous TLABs"); ! return attempt_allocation(min_word_size, desired_word_size, actual_word_size); } HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); if (is_humongous(word_size)) { return attempt_allocation_humongous(word_size); } ! size_t dummy = 0; ! return attempt_allocation(word_size, word_size, &dummy); } HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { ResourceMark rm; // For retrieving the thread names in log messages.
*** 490,501 **** // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. We do the // first attempt (without holding the Heap_lock) here and the // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock). ! ! result = _allocator->attempt_allocation(word_size); if (result != NULL) { return result; } // Give a warning if we seem to be looping forever. --- 493,504 ---- // allocation attempt in case another thread successfully // performed a collection and reclaimed enough space. We do the // first attempt (without holding the Heap_lock) here and the // follow-on attempt will be at the start of the next loop // iteration (after taking the Heap_lock). ! size_t dummy = 0; ! result = _allocator->attempt_allocation(word_size, word_size, &dummy); if (result != NULL) { return result; } // Give a warning if we seem to be looping forever.
*** 720,742 **** increase_used(fill_size * HeapWordSize); } } } ! inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size) { assert_heap_not_locked_and_not_at_safepoint(); ! assert(!is_humongous(word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); ! HeapWord* result = _allocator->attempt_allocation(word_size); if (result == NULL) { ! result = attempt_allocation_slow(word_size); } assert_heap_not_locked(); if (result != NULL) { ! dirty_young_block(result, word_size); } return result; } void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) { --- 723,750 ---- increase_used(fill_size * HeapWordSize); } } } ! inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, ! size_t desired_word_size, ! size_t* actual_word_size) { assert_heap_not_locked_and_not_at_safepoint(); ! assert(!is_humongous(desired_word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); ! HeapWord* result = _allocator->attempt_allocation(min_word_size, desired_word_size, actual_word_size); if (result == NULL) { ! *actual_word_size = desired_word_size; ! result = attempt_allocation_slow(desired_word_size); } + assert_heap_not_locked(); if (result != NULL) { ! assert(*actual_word_size != 0, "Actual size must have been set here"); ! dirty_young_block(result, *actual_word_size); } return result; } void G1CollectedHeap::dealloc_archive_regions(MemRegion* ranges, size_t count) {
< prev index next >