src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7014874 Sdiff src/share/vm/gc_implementation/parallelScavenge

src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp

Print this page




  95   const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
  96                                                           pg_max_size, 16),
  97                                  og_page_sz);
  98 
  99   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
 100   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
 101   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
 102 
 103   // Update sizes to reflect the selected page size(s).
 104   //
 105   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
 106   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
 107   // move to the common code.
 108   yg_min_size = align_size_up(yg_min_size, yg_align);
 109   yg_max_size = align_size_up(yg_max_size, yg_align);
 110   size_t yg_cur_size =
 111     align_size_up(_collector_policy->young_gen_size(), yg_align);
 112   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 113 
 114   og_min_size = align_size_up(og_min_size, og_align);
 115   og_max_size = align_size_up(og_max_size, og_align);







 116   size_t og_cur_size =
 117     align_size_up(_collector_policy->old_gen_size(), og_align);
 118   og_cur_size = MAX2(og_cur_size, og_min_size);
 119 
 120   pg_min_size = align_size_up(pg_min_size, pg_align);
 121   pg_max_size = align_size_up(pg_max_size, pg_align);
 122   size_t pg_cur_size = pg_min_size;
 123 
 124   trace_gen_sizes("ps heap rnd",
 125                   pg_min_size, pg_max_size,
 126                   og_min_size, og_max_size,
 127                   yg_min_size, yg_max_size);
 128 
 129   const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
 130   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 131 
 132   // The main part of the heap (old gen + young gen) can often use a larger page
 133   // size than is needed or wanted for the perm gen.  Use the "compound
 134   // alignment" ReservedSpace ctor to avoid having to use the same page size for
 135   // all gens.




  95   const size_t pg_page_sz = MIN2(os::page_size_for_region(pg_min_size,
  96                                                           pg_max_size, 16),
  97                                  og_page_sz);
  98 
  99   const size_t pg_align = set_alignment(_perm_gen_alignment,  pg_page_sz);
 100   const size_t og_align = set_alignment(_old_gen_alignment,   og_page_sz);
 101   const size_t yg_align = set_alignment(_young_gen_alignment, og_page_sz);
 102 
 103   // Update sizes to reflect the selected page size(s).
 104   //
 105   // NEEDS_CLEANUP.  The default TwoGenerationCollectorPolicy uses NewRatio; it
 106   // should check UseAdaptiveSizePolicy.  Changes from generationSizer could
 107   // move to the common code.
 108   yg_min_size = align_size_up(yg_min_size, yg_align);
 109   yg_max_size = align_size_up(yg_max_size, yg_align);
 110   size_t yg_cur_size =
 111     align_size_up(_collector_policy->young_gen_size(), yg_align);
 112   yg_cur_size = MAX2(yg_cur_size, yg_min_size);
 113 
 114   og_min_size = align_size_up(og_min_size, og_align);
 115   // Align old gen size down to preserve specified heap size.
 116   assert(og_align == yg_align, "sanity");
 117   size_t og_size = align_size_down(og_max_size, og_align);
 118   if (og_size < og_min_size) {
 119     og_max_size = og_min_size;
 120   } else {
 121     og_max_size = og_size;
 122   }
 123   size_t og_cur_size =
 124     align_size_up(_collector_policy->old_gen_size(), og_align);
 125   og_cur_size = MAX2(og_cur_size, og_min_size);
 126 
 127   pg_min_size = align_size_up(pg_min_size, pg_align);
 128   pg_max_size = align_size_up(pg_max_size, pg_align);
 129   size_t pg_cur_size = pg_min_size;
 130 
 131   trace_gen_sizes("ps heap rnd",
 132                   pg_min_size, pg_max_size,
 133                   og_min_size, og_max_size,
 134                   yg_min_size, yg_max_size);
 135 
 136   const size_t total_reserved = pg_max_size + og_max_size + yg_max_size;
 137   char* addr = Universe::preferred_heap_base(total_reserved, Universe::UnscaledNarrowOop);
 138 
 139   // The main part of the heap (old gen + young gen) can often use a larger page
 140   // size than is needed or wanted for the perm gen.  Use the "compound
 141   // alignment" ReservedSpace ctor to avoid having to use the same page size for
 142   // all gens.


src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File