< prev index next >

src/share/vm/memory/universe.cpp

Print this page
rev 12854 : [mq]: gcinterface.patch

@@ -75,17 +75,10 @@
 #include "utilities/events.hpp"
 #include "utilities/hashtable.inline.hpp"
 #include "utilities/macros.hpp"
 #include "utilities/ostream.hpp"
 #include "utilities/preserveException.hpp"
-#if INCLUDE_ALL_GCS
-#include "gc/cms/cmsCollectorPolicy.hpp"
-#include "gc/g1/g1CollectedHeap.inline.hpp"
-#include "gc/g1/g1CollectorPolicy.hpp"
-#include "gc/parallel/parallelScavengeHeap.hpp"
-#include "gc/shared/adaptiveSizePolicy.hpp"
-#endif // INCLUDE_ALL_GCS
 #if INCLUDE_CDS
 #include "classfile/sharedClassUtil.hpp"
 #endif
 
 // Known objects

@@ -160,12 +153,10 @@
 bool            Universe::_fully_initialized = false;
 
 size_t          Universe::_heap_capacity_at_last_gc;
 size_t          Universe::_heap_used_at_last_gc = 0;
 
-CollectedHeap*  Universe::_collectedHeap = NULL;
-
 NarrowPtrStruct Universe::_narrow_oop = { NULL, 0, true };
 NarrowPtrStruct Universe::_narrow_klass = { NULL, 0, true };
 address Universe::_narrow_ptrs_base;
 
 void Universe::basic_type_classes_do(void f(Klass*)) {

@@ -639,11 +630,17 @@
 
   TraceTime timer("Genesis", TRACETIME_LOG(Info, startuptime));
 
   JavaClasses::compute_hard_coded_offsets();
 
-  jint status = Universe::initialize_heap();
+  assert(GC::is_initialized(), "needs to be initialized here");
+  jint status = GC::gc()->initialize_heap();
+  if (status != JNI_OK) {
+    return status;
+  }
+
+  status = Universe::initialize_heap();
   if (status != JNI_OK) {
     return status;
   }
 
   Metaspace::global_initialize();

@@ -688,35 +685,10 @@
   }
 
   return JNI_OK;
 }
 
-CollectedHeap* Universe::create_heap() {
-  assert(_collectedHeap == NULL, "Heap already created");
-#if !INCLUDE_ALL_GCS
-  if (UseParallelGC) {
-    fatal("UseParallelGC not supported in this VM.");
-  } else if (UseG1GC) {
-    fatal("UseG1GC not supported in this VM.");
-  } else if (UseConcMarkSweepGC) {
-    fatal("UseConcMarkSweepGC not supported in this VM.");
-#else
-  if (UseParallelGC) {
-    return Universe::create_heap_with_policy<ParallelScavengeHeap, GenerationSizer>();
-  } else if (UseG1GC) {
-    return Universe::create_heap_with_policy<G1CollectedHeap, G1CollectorPolicy>();
-  } else if (UseConcMarkSweepGC) {
-    return Universe::create_heap_with_policy<GenCollectedHeap, ConcurrentMarkSweepPolicy>();
-#endif
-  } else if (UseSerialGC) {
-    return Universe::create_heap_with_policy<GenCollectedHeap, MarkSweepPolicy>();
-  }
-
-  ShouldNotReachHere();
-  return NULL;
-}
-
 // Choose the heap base address and oop encoding mode
 // when compressed oops are used:
 // Unscaled  - Use 32-bits oops without encoding when
 //     NarrowOopHeapBaseMin + heap_size < 4Gb
 // ZeroBased - Use zero based compressed oops with encoding when

@@ -724,36 +696,27 @@
 // HeapBased - Use compressed oops with heap base + encoding.
 
 jint Universe::initialize_heap() {
   jint status = JNI_ERR;
 
-  _collectedHeap = create_heap_ext();
-  if (_collectedHeap == NULL) {
-    _collectedHeap = create_heap();
-  }
-
-  status = _collectedHeap->initialize();
-  if (status != JNI_OK) {
-    return status;
-  }
-  log_info(gc)("Using %s", _collectedHeap->name());
-
-  ThreadLocalAllocBuffer::set_max_size(Universe::heap()->max_tlab_size());
+  GC* gc = GC::gc();
+  CollectedHeap* heap = gc->heap();
+  ThreadLocalAllocBuffer::set_max_size(heap->max_tlab_size());
 
 #ifdef _LP64
   if (UseCompressedOops) {
     // Subtract a page because something can get allocated at heap base.
     // This also makes implicit null checking work, because the
     // memory+1 page below heap_base needs to cause a signal.
     // See needs_explicit_null_check.
     // Only set the heap base for compressed oops because it indicates
     // compressed oops for pstack code.
-    if ((uint64_t)Universe::heap()->reserved_region().end() > UnscaledOopHeapMax) {
+    if ((uint64_t) heap->reserved_region().end() > UnscaledOopHeapMax) {
       // Didn't reserve heap below 4Gb.  Must shift.
       Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
     }
-    if ((uint64_t)Universe::heap()->reserved_region().end() <= OopEncodingHeapMax) {
+    if ((uint64_t) heap->reserved_region().end() <= OopEncodingHeapMax) {
       // Did reserve heap below 32Gb. Can use base == 0;
       Universe::set_narrow_oop_base(0);
     }
 
     Universe::set_narrow_ptrs_base(Universe::narrow_oop_base());

@@ -768,31 +731,31 @@
     Arguments::PropertyList_add(new SystemProperty("java.vm.compressedOopsMode",
                                                    narrow_oop_mode_to_string(narrow_oop_mode()),
                                                    false));
   }
   // Universe::narrow_oop_base() is one page below the heap.
-  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(Universe::heap()->base() -
+  assert((intptr_t)Universe::narrow_oop_base() <= (intptr_t)(heap->base() -
          os::vm_page_size()) ||
          Universe::narrow_oop_base() == NULL, "invalid value");
   assert(Universe::narrow_oop_shift() == LogMinObjAlignmentInBytes ||
          Universe::narrow_oop_shift() == 0, "invalid value");
 #endif
 
   // We will never reach the CATCH below since Exceptions::_throw will cause
   // the VM to exit if an exception is thrown during initialization
 
   if (UseTLAB) {
-    assert(Universe::heap()->supports_tlab_allocation(),
+    assert(heap->supports_tlab_allocation(),
            "Should support thread-local allocation buffers");
     ThreadLocalAllocBuffer::startup_initialization();
   }
   return JNI_OK;
 }
 
 void Universe::print_compressed_oops_mode(outputStream* st) {
   st->print("Heap address: " PTR_FORMAT ", size: " SIZE_FORMAT " MB",
-            p2i(Universe::heap()->base()), Universe::heap()->reserved_region().byte_size()/M);
+            p2i(GC::gc()->heap()->base()), GC::gc()->heap()->reserved_region().byte_size()/M);
 
   st->print(", Compressed Oops mode: %s", narrow_oop_mode_to_string(narrow_oop_mode()));
 
   if (Universe::narrow_oop_base() != 0) {
     st->print(": " PTR_FORMAT, p2i(Universe::narrow_oop_base()));

@@ -852,12 +815,12 @@
 
 
 // It's the caller's responsibility to ensure glitch-freedom
 // (if required).
 void Universe::update_heap_info_at_gc() {
-  _heap_capacity_at_last_gc = heap()->capacity();
-  _heap_used_at_last_gc     = heap()->used();
+  _heap_capacity_at_last_gc = GC::gc()->heap()->capacity();
+  _heap_used_at_last_gc     = GC::gc()->heap()->used();
 }
 
 
 const char* Universe::narrow_oop_mode_to_string(Universe::NARROW_OOP_MODE mode) {
   switch (mode) {

@@ -1057,19 +1020,19 @@
     MutexLocker x(Heap_lock);
     Universe::update_heap_info_at_gc();
   }
 
   // ("weak") refs processing infrastructure initialization
-  Universe::heap()->post_initialize();
+  GC::gc()->heap()->post_initialize();
 
   // Initialize performance counters for metaspaces
   MetaspaceCounters::initialize_performance_counters();
   CompressedClassSpaceCounters::initialize_performance_counters();
 
   MemoryService::add_metaspace_memory_pools();
 
-  MemoryService::set_universe_heap(Universe::heap());
+  MemoryService::set_universe_heap(GC::gc()->heap());
 #if INCLUDE_CDS
   SharedClassUtil::initialize(CHECK_false);
 #endif
   return true;
 }

@@ -1080,11 +1043,11 @@
 }
 
 void Universe::print_on(outputStream* st) {
   GCMutexLocker hl(Heap_lock); // Heap_lock might be locked by caller thread.
   st->print_cr("Heap");
-  heap()->print_on(st);
+  GC::gc()->heap()->print_on(st);
 }
 
 void Universe::print_heap_at_SIGBREAK() {
   if (PrintHeapAtSIGBREAK) {
     print_on(tty);

@@ -1094,22 +1057,22 @@
 }
 
 void Universe::print_heap_before_gc() {
   Log(gc, heap) log;
   if (log.is_debug()) {
-    log.debug("Heap before GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
+    log.debug("Heap before GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
     ResourceMark rm;
-    heap()->print_on(log.debug_stream());
+    GC::gc()->heap()->print_on(log.debug_stream());
   }
 }
 
 void Universe::print_heap_after_gc() {
   Log(gc, heap) log;
   if (log.is_debug()) {
-    log.debug("Heap after GC invocations=%u (full %u):", heap()->total_collections(), heap()->total_full_collections());
+    log.debug("Heap after GC invocations=%u (full %u):", GC::gc()->heap()->total_collections(), GC::gc()->heap()->total_full_collections());
     ResourceMark rm;
-    heap()->print_on(log.debug_stream());
+    GC::gc()->heap()->print_on(log.debug_stream());
   }
 }
 
 void Universe::initialize_verify_flags() {
   verify_flags = 0;

@@ -1179,11 +1142,11 @@
     log_debug(gc, verify)("Threads");
     Threads::verify();
   }
   if (should_verify_subset(Verify_Heap)) {
     log_debug(gc, verify)("Heap");
-    heap()->verify(option);
+    GC::gc()->heap()->verify(option);
   }
   if (should_verify_subset(Verify_SymbolTable)) {
     log_debug(gc, verify)("SymbolTable");
     SymbolTable::verify();
   }

@@ -1259,17 +1222,17 @@
 }
 
 // Oop verification (see MacroAssembler::verify_oop)
 
 uintptr_t Universe::verify_oop_mask() {
-  MemRegion m = heap()->reserved_region();
+  MemRegion m = GC::gc()->heap()->reserved_region();
   calculate_verify_data(m.start(), m.end());
   return _verify_oop_mask;
 }
 
 uintptr_t Universe::verify_oop_bits() {
-  MemRegion m = heap()->reserved_region();
+  MemRegion m = GC::gc()->heap()->reserved_region();
   calculate_verify_data(m.start(), m.end());
   return _verify_oop_bits;
 }
 
 uintptr_t Universe::verify_mark_mask() {
< prev index next >