1 /*
   2  * Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "gc/g1/g1Arguments.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1HeapVerifier.hpp"
  30 #include "gc/g1/heapRegion.hpp"
  31 #include "gc/g1/heapRegionRemSet.hpp"
  32 #include "gc/shared/cardTableRS.hpp"
  33 #include "gc/shared/gcArguments.hpp"
  34 #include "gc/shared/workerPolicy.hpp"
  35 #include "runtime/globals.hpp"
  36 
  37 static const double MaxRamFractionForYoung = 0.8;
  38 size_t G1Arguments::MaxMemoryForYoung;
  39 
  40 static size_t calculate_heap_alignment(size_t space_alignment) {
  41   size_t card_table_alignment = CardTableRS::ct_max_alignment_constraint();
  42   size_t page_size = UseLargePages ? os::large_page_size() : os::vm_page_size();
  43   return MAX3(card_table_alignment, space_alignment, page_size);
  44 }
  45 
  46 void G1Arguments::initialize_alignments() {
  47   // Set up the region size and associated fields.
  48   //
  49   // There is a circular dependency here. We base the region size on the heap
  50   // size, but the heap size should be aligned with the region size. To get
  51   // around this we use the unaligned values for the heap.
  52   HeapRegion::setup_heap_region_size(MaxHeapSize);
  53   HeapRegionRemSet::setup_remset_size();
  54 
  55   SpaceAlignment = HeapRegion::GrainBytes;
  56   HeapAlignment = calculate_heap_alignment(SpaceAlignment);
  57 }
  58 
  59 size_t G1Arguments::conservative_max_heap_alignment() {
  60   return HeapRegion::max_region_size();
  61 }
  62 
  63 void G1Arguments::initialize_verification_types() {
  64   if (strlen(VerifyGCType) > 0) {
  65     const char delimiter[] = " ,\n";
  66     size_t length = strlen(VerifyGCType);
  67     char* type_list = NEW_C_HEAP_ARRAY(char, length + 1, mtInternal);
  68     strncpy(type_list, VerifyGCType, length + 1);
  69     char* save_ptr;
  70 
  71     char* token = strtok_r(type_list, delimiter, &save_ptr);
  72     while (token != NULL) {
  73       parse_verification_type(token);
  74       token = strtok_r(NULL, delimiter, &save_ptr);
  75     }
  76     FREE_C_HEAP_ARRAY(char, type_list);
  77   }
  78 }
  79 
  80 void G1Arguments::parse_verification_type(const char* type) {
  81   if (strcmp(type, "young-normal") == 0) {
  82     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyYoungNormal);
  83   } else if (strcmp(type, "concurrent-start") == 0) {
  84     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyConcurrentStart);
  85   } else if (strcmp(type, "mixed") == 0) {
  86     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyMixed);
  87   } else if (strcmp(type, "remark") == 0) {
  88     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyRemark);
  89   } else if (strcmp(type, "cleanup") == 0) {
  90     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyCleanup);
  91   } else if (strcmp(type, "full") == 0) {
  92     G1HeapVerifier::enable_verification_type(G1HeapVerifier::G1VerifyFull);
  93   } else {
  94     log_warning(gc, verify)("VerifyGCType: '%s' is unknown. Available types are: "
  95                             "young-normal, concurrent-start, mixed, remark, cleanup and full", type);
  96   }
  97 }
  98 
  99 // Returns the maximum number of workers to be used in a concurrent
 100 // phase based on the number of GC workers being used in a STW
 101 // phase.
 102 static uint scale_concurrent_worker_threads(uint num_gc_workers) {
 103   return MAX2((num_gc_workers + 2) / 4, 1U);
 104 }
 105 
 106 void G1Arguments::initialize_mark_stack_size() {
 107   if (FLAG_IS_DEFAULT(MarkStackSize)) {
 108     size_t mark_stack_size = MIN2(MarkStackSizeMax,
 109                                   MAX2(MarkStackSize, (size_t)ConcGCThreads * TASKQUEUE_SIZE));
 110     FLAG_SET_ERGO(MarkStackSize, mark_stack_size);
 111   }
 112 
 113   log_trace(gc)("MarkStackSize: %uk  MarkStackSizeMax: %uk", (uint)(MarkStackSize / K), (uint)(MarkStackSizeMax / K));
 114 }
 115 
 116 void G1Arguments::initialize() {
 117   GCArguments::initialize();
 118   assert(UseG1GC, "Error");
 119   FLAG_SET_DEFAULT(ParallelGCThreads, WorkerPolicy::parallel_worker_threads());
 120   if (ParallelGCThreads == 0) {
 121     assert(!FLAG_IS_DEFAULT(ParallelGCThreads), "The default value for ParallelGCThreads should not be 0.");
 122     vm_exit_during_initialization("The flag -XX:+UseG1GC can not be combined with -XX:ParallelGCThreads=0", NULL);
 123   }
 124 
 125   // When dumping the CDS archive we want to reduce fragmentation by
 126   // triggering a full collection. To get as low fragmentation as
 127   // possible we only use one worker thread.
 128   if (DumpSharedSpaces) {
 129     FLAG_SET_ERGO(ParallelGCThreads, 1);
 130   }
 131 
 132   if (FLAG_IS_DEFAULT(G1ConcRefinementThreads)) {
 133     FLAG_SET_ERGO(G1ConcRefinementThreads, ParallelGCThreads);
 134   }
 135 
 136   if (FLAG_IS_DEFAULT(ConcGCThreads) || ConcGCThreads == 0) {
 137     // Calculate the number of concurrent worker threads by scaling
 138     // the number of parallel GC threads.
 139     uint marking_thread_num = scale_concurrent_worker_threads(ParallelGCThreads);
 140     FLAG_SET_ERGO(ConcGCThreads, marking_thread_num);
 141   }
 142 
 143   if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
 144     // In G1, we want the default GC overhead goal to be higher than
 145     // it is for PS, or the heap might be expanded too aggressively.
 146     // We set it here to ~8%.
 147     FLAG_SET_DEFAULT(GCTimeRatio, 12);
 148   }
 149 
 150   // Below, we might need to calculate the pause time interval based on
 151   // the pause target. When we do so we are going to give G1 maximum
 152   // flexibility and allow it to do pauses when it needs to. So, we'll
 153   // arrange that the pause interval to be pause time target + 1 to
 154   // ensure that a) the pause time target is maximized with respect to
 155   // the pause interval and b) we maintain the invariant that pause
 156   // time target < pause interval. If the user does not want this
 157   // maximum flexibility, they will have to set the pause interval
 158   // explicitly.
 159 
 160   if (FLAG_IS_DEFAULT(MaxGCPauseMillis)) {
 161     // The default pause time target in G1 is 200ms
 162     FLAG_SET_DEFAULT(MaxGCPauseMillis, 200);
 163   }
 164 
 165   // Then, if the interval parameter was not set, set it according to
 166   // the pause time target (this will also deal with the case when the
 167   // pause time target is the default value).
 168   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 169     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 170   }
 171 
 172   if (FLAG_IS_DEFAULT(ParallelRefProcEnabled) && ParallelGCThreads > 1) {
 173     FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
 174   }
 175 
 176   // By default do not let the target stack size to be more than 1/4 of the entries
 177   if (FLAG_IS_DEFAULT(GCDrainStackTargetSize)) {
 178     FLAG_SET_ERGO(GCDrainStackTargetSize, MIN2(GCDrainStackTargetSize, (uintx)TASKQUEUE_SIZE / 4));
 179   }
 180 
 181 #ifdef COMPILER2
 182   // Enable loop strip mining to offer better pause time guarantees
 183   if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
 184     FLAG_SET_DEFAULT(UseCountedLoopSafepoints, true);
 185     if (FLAG_IS_DEFAULT(LoopStripMiningIter)) {
 186       FLAG_SET_DEFAULT(LoopStripMiningIter, 1000);
 187     }
 188   }
 189 #endif
 190 
 191   initialize_mark_stack_size();
 192   initialize_verification_types();
 193 }
 194 
 195 static size_t calculate_reasonable_max_memory_for_young(FormatBuffer<100> &calc_str, double max_ram_fraction_for_young) {
 196   julong phys_mem;
 197   // If MaxRam is specified, we use that as maximum physical memory available.
 198   if (FLAG_IS_DEFAULT(MaxRAM)) {
 199     phys_mem = os::physical_memory();
 200     calc_str.append("Physical_Memory");
 201   } else {
 202     phys_mem = (julong)MaxRAM;
 203     calc_str.append("MaxRAM");
 204   }
 205 
 206   julong reasonable_max = phys_mem;
 207 
 208   // If either MaxRAMFraction or MaxRAMPercentage is specified, we use them to calculate
 209   // reasonable max size of young generation.
 210   if (!FLAG_IS_DEFAULT(MaxRAMFraction)) {
 211     reasonable_max = (julong)(phys_mem / MaxRAMFraction);
 212     calc_str.append(" / MaxRAMFraction");
 213   }  else if (!FLAG_IS_DEFAULT(MaxRAMPercentage)) {
 214     reasonable_max = (julong)((phys_mem * MaxRAMPercentage) / 100);
 215     calc_str.append(" * MaxRAMPercentage / 100");
 216   }  else {
 217     // We use our own fraction to calculate max size of young generation.
 218     reasonable_max = phys_mem * max_ram_fraction_for_young;
 219     calc_str.append(" * %0.2f", max_ram_fraction_for_young);
 220   }
 221 
 222   return (size_t)reasonable_max;
 223 }
 224 
 225 void G1Arguments::initialize_heap_flags_and_sizes() {
 226   if (AllocateOldGenAt != NULL) {
 227     initialize_heterogeneous();
 228   }
 229 
 230   GCArguments::initialize_heap_flags_and_sizes();
 231 }
 232 
 233 void G1Arguments::initialize_heterogeneous() {
 234   FormatBuffer<100> calc_str("");
 235 
 236   MaxMemoryForYoung = calculate_reasonable_max_memory_for_young(calc_str, MaxRamFractionForYoung);
 237 
 238   if (MaxNewSize > MaxMemoryForYoung) {
 239     if (FLAG_IS_CMDLINE(MaxNewSize)) {
 240       log_warning(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
 241                             MaxMemoryForYoung, calc_str.buffer());
 242     } else {
 243       log_info(gc, ergo)("Setting MaxNewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s)). "
 244                          "Dram usage can be lowered by setting MaxNewSize to a lower value", MaxMemoryForYoung, calc_str.buffer());
 245     }
 246     MaxNewSize = MaxMemoryForYoung;
 247   }
 248   if (NewSize > MaxMemoryForYoung) {
 249     if (FLAG_IS_CMDLINE(NewSize)) {
 250       log_warning(gc, ergo)("Setting NewSize to " SIZE_FORMAT " based on dram available (calculation = align(%s))",
 251                             MaxMemoryForYoung, calc_str.buffer());
 252     }
 253     NewSize = MaxMemoryForYoung;
 254   }
 255 
 256 }
 257 
 258 CollectedHeap* G1Arguments::create_heap() {
 259   return new G1CollectedHeap();
 260 }
 261 
 262 bool G1Arguments::is_heterogeneous_heap() {
 263   return AllocateOldGenAt != NULL;
 264 }
 265 
 266 size_t G1Arguments::reasonable_max_memory_for_young() {
 267   return MaxMemoryForYoung;
 268 }
 269 
 270 size_t G1Arguments::heap_reserved_size_bytes() {
 271   return (is_heterogeneous_heap() ? 2 : 1) * MaxHeapSize;
 272 }
 273 
 274 size_t G1Arguments::heap_max_size_bytes() {
 275   return MaxHeapSize;
 276 }