< prev index next >

src/share/vm/gc/parallel/psOldGen.cpp

Print this page

        

*** 227,238 **** if (bytes == 0) { return; } MutexLocker x(ExpandHeap_lock); const size_t alignment = virtual_space()->alignment(); ! size_t aligned_bytes = align_size_up(bytes, alignment); ! size_t aligned_expand_bytes = align_size_up(MinHeapDeltaBytes, alignment); if (UseNUMA) { // With NUMA we use round-robin page allocation for the old gen. Expand by at least // providing a page per lgroup. Alignment is larger or equal to the page size. aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num()); --- 227,238 ---- if (bytes == 0) { return; } MutexLocker x(ExpandHeap_lock); const size_t alignment = virtual_space()->alignment(); ! size_t aligned_bytes = align_up(bytes, alignment); ! size_t aligned_expand_bytes = align_up(MinHeapDeltaBytes, alignment); if (UseNUMA) { // With NUMA we use round-robin page allocation for the old gen. Expand by at least // providing a page per lgroup. Alignment is larger or equal to the page size. aligned_expand_bytes = MAX2(aligned_expand_bytes, alignment * os::numa_get_groups_num());
*** 242,252 **** // return true with the implication that and expansion was done when it // was not. A call to expand implies a best effort to expand by "bytes" // but not a guarantee. Align down to give a best effort. This is likely // the most that the generation can expand since it has some capacity to // start with. ! aligned_bytes = align_size_down(bytes, alignment); } bool success = false; if (aligned_expand_bytes > aligned_bytes) { success = expand_by(aligned_expand_bytes); --- 242,252 ---- // return true with the implication that and expansion was done when it // was not. A call to expand implies a best effort to expand by "bytes" // but not a guarantee. Align down to give a best effort. This is likely // the most that the generation can expand since it has some capacity to // start with. ! aligned_bytes = align_down(bytes, alignment); } bool success = false; if (aligned_expand_bytes > aligned_bytes) { success = expand_by(aligned_expand_bytes);
*** 316,326 **** void PSOldGen::shrink(size_t bytes) { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); ! size_t size = align_size_down(bytes, virtual_space()->alignment()); if (size > 0) { assert_lock_strong(ExpandHeap_lock); virtual_space()->shrink_by(bytes); post_resize(); --- 316,326 ---- void PSOldGen::shrink(size_t bytes) { assert_lock_strong(ExpandHeap_lock); assert_locked_or_safepoint(Heap_lock); ! size_t size = align_down(bytes, virtual_space()->alignment()); if (size > 0) { assert_lock_strong(ExpandHeap_lock); virtual_space()->shrink_by(bytes); post_resize();
*** 341,351 **** } // Adjust according to our min and max new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); ! new_size = align_size_up(new_size, alignment); const size_t current_size = capacity_in_bytes(); log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT --- 341,351 ---- } // Adjust according to our min and max new_size = MAX2(MIN2(new_size, gen_size_limit()), min_gen_size()); assert(gen_size_limit() >= reserved().byte_size(), "max new size problem?"); ! new_size = align_up(new_size, alignment); const size_t current_size = capacity_in_bytes(); log_trace(gc, ergo)("AdaptiveSizePolicy::old generation size: " "desired free: " SIZE_FORMAT " used: " SIZE_FORMAT
< prev index next >