< prev index next >

src/share/vm/gc/parallel/asPSOldGen.cpp

Print this page

        

*** 89,99 **** assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); ! size_t result_aligned = align_size_down(result, heap->generation_alignment()); return result_aligned; } size_t ASPSOldGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size(); --- 89,99 ---- assert(virtual_space()->is_aligned(gen_size_limit()), "not aligned"); assert(gen_size_limit() >= virtual_space()->committed_size(), "bad gen size"); ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); size_t result = gen_size_limit() - virtual_space()->committed_size(); ! size_t result_aligned = align_down(result, heap->generation_alignment()); return result_aligned; } size_t ASPSOldGen::available_for_contraction() { size_t uncommitted_bytes = virtual_space()->uncommitted_size();
*** 104,114 **** ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t gen_alignment = heap->generation_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); ! const size_t working_aligned = align_size_up(working_size, gen_alignment); const size_t working_or_min = MAX2(working_aligned, min_gen_size()); if (working_or_min > reserved().byte_size()) { // If the used or minimum gen size (aligned up) is greater // than the total reserved size, then the space available // for contraction should (after proper alignment) be 0 --- 104,114 ---- ParallelScavengeHeap* heap = ParallelScavengeHeap::heap(); const size_t gen_alignment = heap->generation_alignment(); PSAdaptiveSizePolicy* policy = heap->size_policy(); const size_t working_size = used_in_bytes() + (size_t) policy->avg_promoted()->padded_average(); ! const size_t working_aligned = align_up(working_size, gen_alignment); const size_t working_or_min = MAX2(working_aligned, min_gen_size()); if (working_or_min > reserved().byte_size()) { // If the used or minimum gen size (aligned up) is greater // than the total reserved size, then the space available // for contraction should (after proper alignment) be 0
*** 122,132 **** // "decrement" fraction is conservative because its intent is to // only reduce the footprint. size_t result = policy->promo_increment_aligned_down(max_contraction); // Also adjust for inter-generational alignment ! size_t result_aligned = align_size_down(result, gen_alignment); Log(gc, ergo) log; if (log.is_trace()) { size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); size_t promo_increment = policy->promo_increment(max_contraction); --- 122,132 ---- // "decrement" fraction is conservative because its intent is to // only reduce the footprint. size_t result = policy->promo_increment_aligned_down(max_contraction); // Also adjust for inter-generational alignment ! size_t result_aligned = align_down(result, gen_alignment); Log(gc, ergo) log; if (log.is_trace()) { size_t working_promoted = (size_t) policy->avg_promoted()->padded_average(); size_t promo_increment = policy->promo_increment(max_contraction);
< prev index next >