< prev index next >
src/share/vm/runtime/arguments.cpp
Print this page
rev 8961 : [mq]: diff-shenandoah.patch
*** 49,58 ****
--- 49,59 ----
#include "services/memTracker.hpp"
#include "utilities/defaultStream.hpp"
#include "utilities/macros.hpp"
#include "utilities/stringUtils.hpp"
#if INCLUDE_ALL_GCS
+ #include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/cms/compactibleFreeListSpace.hpp"
#include "gc/g1/g1CollectedHeap.inline.hpp"
#include "gc/parallel/parallelScavengeHeap.hpp"
#endif // INCLUDE_ALL_GCS
*** 1469,1478 ****
--- 1470,1484 ----
#ifdef _LP64
// MaxHeapSize is not set up properly at this point, but
// the only value that can override MaxHeapSize if we are
// to use UseCompressedOops is InitialHeapSize.
size_t max_heap_size = MAX2(MaxHeapSize, InitialHeapSize);
+ if (UseShenandoahGC && FLAG_IS_DEFAULT(UseCompressedOops)) {
+ warning("Compressed Oops not supported with ShenandoahGC");
+ FLAG_SET_ERGO(bool, UseCompressedOops, false);
+ FLAG_SET_ERGO(bool, UseCompressedClassPointers, false);
+ }
if (max_heap_size <= max_heap_for_compressed_oops()) {
#if !defined(COMPILER1) || defined(TIERED)
if (FLAG_IS_DEFAULT(UseCompressedOops)) {
FLAG_SET_ERGO(bool, UseCompressedOops, true);
*** 1527,1536 ****
--- 1533,1546 ----
#if INCLUDE_ALL_GCS
if (UseParallelGC) {
heap_alignment = ParallelScavengeHeap::conservative_max_heap_alignment();
} else if (UseG1GC) {
heap_alignment = G1CollectedHeap::conservative_max_heap_alignment();
+ } else if (UseShenandoahGC) {
+ // TODO: This sucks. Can't we have a clean interface to call the GC's collector
+ // policy for this?
+ heap_alignment = ShenandoahHeap::conservative_max_heap_alignment();
}
#endif // INCLUDE_ALL_GCS
_conservative_max_heap_alignment = MAX4(heap_alignment,
(size_t)os::vm_allocation_granularity(),
os::max_page_size(),
*** 1685,1694 ****
--- 1695,1719 ----
(unsigned int) (MarkStackSize / K), (uint) (MarkStackSizeMax / K));
tty->print_cr("ConcGCThreads: %u", ConcGCThreads);
}
}
+ void Arguments::set_shenandoah_gc_flags() {
+ FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, true);
+ FLAG_SET_DEFAULT(ParallelGCThreads,
+ Abstract_VM_Version::parallel_worker_threads());
+
+ if (FLAG_IS_DEFAULT(ConcGCThreads)) {
+ uint conc_threads = MAX2((uint) 1, ParallelGCThreads);
+ FLAG_SET_DEFAULT(ConcGCThreads, conc_threads);
+ }
+
+ if (FLAG_IS_DEFAULT(ParallelRefProcEnabled)) {
+ FLAG_SET_DEFAULT(ParallelRefProcEnabled, true);
+ }
+ }
+
#if !INCLUDE_ALL_GCS
#ifdef ASSERT
static bool verify_serial_gc_flags() {
return (UseSerialGC &&
!(UseParNewGC || (UseConcMarkSweepGC) || UseG1GC ||
*** 1704,1713 ****
--- 1729,1740 ----
set_parallel_gc_flags();
} else if (UseConcMarkSweepGC) {
set_cms_and_parnew_gc_flags();
} else if (UseG1GC) {
set_g1_gc_flags();
+ } else if (UseShenandoahGC) {
+ set_shenandoah_gc_flags();
}
check_deprecated_gc_flags();
if (AssumeMP && !UseSerialGC) {
if (FLAG_IS_DEFAULT(ParallelGCThreads) && ParallelGCThreads == 1) {
warning("If the number of processors is expected to increase from one, then"
< prev index next >