< prev index next >

src/share/vm/gc/parallel/psParallelCompact.cpp

Print this page




 415 {
 416   _region_start = covered_region.start();
 417   const size_t region_size = covered_region.word_size();
 418   DEBUG_ONLY(_region_end = _region_start + region_size;)
 419 
 420   assert(region_align_down(_region_start) == _region_start,
 421          "region start not aligned");
 422   assert((region_size & RegionSizeOffsetMask) == 0,
 423          "region size not a multiple of RegionSize");
 424 
 425   bool result = initialize_region_data(region_size) && initialize_block_data();
 426   return result;
 427 }
 428 
 429 PSVirtualSpace*
 430 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 431 {
 432   const size_t raw_bytes = count * element_size;
 433   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 434   const size_t granularity = os::vm_allocation_granularity();
 435   _reserved_byte_size = align_size_up(raw_bytes, MAX2(page_sz, granularity));
 436 
 437   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 438     MAX2(page_sz, granularity);
 439   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 440   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
 441                        rs.size());
 442 
 443   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 444 
 445   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 446   if (vspace != 0) {
 447     if (vspace->expand_by(_reserved_byte_size)) {
 448       return vspace;
 449     }
 450     delete vspace;
 451     // Release memory reserved in the space.
 452     rs.release();
 453   }
 454 
 455   return 0;


1967          old_gen->virtual_space()->alignment(), "alignments do not match");
1968 
1969   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1970     return false;
1971   }
1972 
1973   // Both generations must be completely committed.
1974   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1975     return false;
1976   }
1977   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1978     return false;
1979   }
1980 
1981   // Figure out how much to take from eden.  Include the average amount promoted
1982   // in the total; otherwise the next young gen GC will simply bail out to a
1983   // full GC.
1984   const size_t alignment = old_gen->virtual_space()->alignment();
1985   const size_t eden_used = eden_space->used_in_bytes();
1986   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1987   const size_t absorb_size = align_size_up(eden_used + promoted, alignment);
1988   const size_t eden_capacity = eden_space->capacity_in_bytes();
1989 
1990   if (absorb_size >= eden_capacity) {
1991     return false; // Must leave some space in eden.
1992   }
1993 
1994   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1995   if (new_young_size < young_gen->min_gen_size()) {
1996     return false; // Respect young gen minimum size.
1997   }
1998 
1999   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
2000                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2001                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2002                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2003                         absorb_size / K,
2004                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2005                         young_gen->from_space()->used_in_bytes() / K,
2006                         young_gen->to_space()->used_in_bytes() / K,
2007                         young_gen->capacity_in_bytes() / K, new_young_size / K);




 415 {
 416   _region_start = covered_region.start();
 417   const size_t region_size = covered_region.word_size();
 418   DEBUG_ONLY(_region_end = _region_start + region_size;)
 419 
 420   assert(region_align_down(_region_start) == _region_start,
 421          "region start not aligned");
 422   assert((region_size & RegionSizeOffsetMask) == 0,
 423          "region size not a multiple of RegionSize");
 424 
 425   bool result = initialize_region_data(region_size) && initialize_block_data();
 426   return result;
 427 }
 428 
 429 PSVirtualSpace*
 430 ParallelCompactData::create_vspace(size_t count, size_t element_size)
 431 {
 432   const size_t raw_bytes = count * element_size;
 433   const size_t page_sz = os::page_size_for_region_aligned(raw_bytes, 10);
 434   const size_t granularity = os::vm_allocation_granularity();
 435   _reserved_byte_size = align_up(raw_bytes, MAX2(page_sz, granularity));
 436 
 437   const size_t rs_align = page_sz == (size_t) os::vm_page_size() ? 0 :
 438     MAX2(page_sz, granularity);
 439   ReservedSpace rs(_reserved_byte_size, rs_align, rs_align > 0);
 440   os::trace_page_sizes("Parallel Compact Data", raw_bytes, raw_bytes, page_sz, rs.base(),
 441                        rs.size());
 442 
 443   MemTracker::record_virtual_memory_type((address)rs.base(), mtGC);
 444 
 445   PSVirtualSpace* vspace = new PSVirtualSpace(rs, page_sz);
 446   if (vspace != 0) {
 447     if (vspace->expand_by(_reserved_byte_size)) {
 448       return vspace;
 449     }
 450     delete vspace;
 451     // Release memory reserved in the space.
 452     rs.release();
 453   }
 454 
 455   return 0;


1967          old_gen->virtual_space()->alignment(), "alignments do not match");
1968 
1969   if (!(UseAdaptiveSizePolicy && UseAdaptiveGCBoundary)) {
1970     return false;
1971   }
1972 
1973   // Both generations must be completely committed.
1974   if (young_gen->virtual_space()->uncommitted_size() != 0) {
1975     return false;
1976   }
1977   if (old_gen->virtual_space()->uncommitted_size() != 0) {
1978     return false;
1979   }
1980 
1981   // Figure out how much to take from eden.  Include the average amount promoted
1982   // in the total; otherwise the next young gen GC will simply bail out to a
1983   // full GC.
1984   const size_t alignment = old_gen->virtual_space()->alignment();
1985   const size_t eden_used = eden_space->used_in_bytes();
1986   const size_t promoted = (size_t)size_policy->avg_promoted()->padded_average();
1987   const size_t absorb_size = align_up(eden_used + promoted, alignment);
1988   const size_t eden_capacity = eden_space->capacity_in_bytes();
1989 
1990   if (absorb_size >= eden_capacity) {
1991     return false; // Must leave some space in eden.
1992   }
1993 
1994   const size_t new_young_size = young_gen->capacity_in_bytes() - absorb_size;
1995   if (new_young_size < young_gen->min_gen_size()) {
1996     return false; // Respect young gen minimum size.
1997   }
1998 
1999   log_trace(heap, ergo)(" absorbing " SIZE_FORMAT "K:  "
2000                         "eden " SIZE_FORMAT "K->" SIZE_FORMAT "K "
2001                         "from " SIZE_FORMAT "K, to " SIZE_FORMAT "K "
2002                         "young_gen " SIZE_FORMAT "K->" SIZE_FORMAT "K ",
2003                         absorb_size / K,
2004                         eden_capacity / K, (eden_capacity - absorb_size) / K,
2005                         young_gen->from_space()->used_in_bytes() / K,
2006                         young_gen->to_space()->used_in_bytes() / K,
2007                         young_gen->capacity_in_bytes() / K, new_young_size / K);


< prev index next >