< prev index next >

src/hotspot/share/gc/g1/g1CollectedHeap.cpp

Print this page
rev 47957 : 8191564: Refactor GC related servicability code into GC specific subclasses


  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1Allocator.inline.hpp"
  34 #include "gc/g1/g1CollectedHeap.inline.hpp"
  35 #include "gc/g1/g1CollectionSet.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ConcurrentRefine.hpp"
  39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullCollector.hpp"
  42 #include "gc/g1/g1FullGCScope.hpp"
  43 #include "gc/g1/g1GCPhaseTimes.hpp"
  44 #include "gc/g1/g1HeapSizingPolicy.hpp"
  45 #include "gc/g1/g1HeapTransition.hpp"
  46 #include "gc/g1/g1HeapVerifier.hpp"
  47 #include "gc/g1/g1HotCardCache.hpp"

  48 #include "gc/g1/g1OopClosures.inline.hpp"
  49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  50 #include "gc/g1/g1Policy.hpp"
  51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  52 #include "gc/g1/g1RemSet.hpp"
  53 #include "gc/g1/g1RootClosures.hpp"
  54 #include "gc/g1/g1RootProcessor.hpp"
  55 #include "gc/g1/g1StringDedup.hpp"
  56 #include "gc/g1/g1YCTypes.hpp"
  57 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
  58 #include "gc/g1/heapRegion.inline.hpp"
  59 #include "gc/g1/heapRegionRemSet.hpp"
  60 #include "gc/g1/heapRegionSet.inline.hpp"
  61 #include "gc/g1/vm_operations_g1.hpp"
  62 #include "gc/shared/gcHeapSummary.hpp"
  63 #include "gc/shared/gcId.hpp"
  64 #include "gc/shared/gcLocker.inline.hpp"
  65 #include "gc/shared/gcTimer.hpp"
  66 #include "gc/shared/gcTrace.hpp"
  67 #include "gc/shared/gcTraceTime.inline.hpp"
  68 #include "gc/shared/generationSpec.hpp"
  69 #include "gc/shared/isGCActiveMark.hpp"
  70 #include "gc/shared/preservedMarks.inline.hpp"
  71 #include "gc/shared/suspendibleThreadSet.hpp"
  72 #include "gc/shared/referenceProcessor.inline.hpp"
  73 #include "gc/shared/taskqueue.inline.hpp"
  74 #include "gc/shared/weakProcessor.hpp"
  75 #include "logging/log.hpp"
  76 #include "memory/allocation.hpp"
  77 #include "memory/iterator.hpp"
  78 #include "memory/resourceArea.hpp"
  79 #include "oops/oop.inline.hpp"
  80 #include "prims/resolvedMethodTable.hpp"
  81 #include "runtime/atomic.hpp"
  82 #include "runtime/init.hpp"
  83 #include "runtime/orderAccess.inline.hpp"
  84 #include "runtime/vmThread.hpp"

  85 #include "utilities/align.hpp"
  86 #include "utilities/globalDefinitions.hpp"
  87 #include "utilities/stack.inline.hpp"
  88 
  89 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  90 
  91 // INVARIANTS/NOTES
  92 //
  93 // All allocation activity covered by the G1CollectedHeap interface is
  94 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  95 // and allocate_new_tlab, which are the "entry" points to the
  96 // allocation code from the rest of the JVM.  (Note that this does not
  97 // apply to TLAB allocation, which is not part of this interface: it
  98 // is done by clients of this interface.)
  99 
 100 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 101  private:
 102   size_t _num_dirtied;
 103   G1CollectedHeap* _g1h;
 104   G1SATBCardTableLoggingModRefBS* _g1_bs;


2963 
2964     GCTraceCPUTime tcpu;
2965 
2966     FormatBuffer<> gc_string("Pause ");
2967     if (collector_state()->during_initial_mark_pause()) {
2968       gc_string.append("Initial Mark");
2969     } else if (collector_state()->gcs_are_young()) {
2970       gc_string.append("Young");
2971     } else {
2972       gc_string.append("Mixed");
2973     }
2974     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2975 
2976     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2977                                                                   workers()->active_workers(),
2978                                                                   Threads::number_of_non_daemon_threads());
2979     workers()->update_active_workers(active_workers);
2980     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2981 
2982     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2983     TraceMemoryManagerStats tms(false /* fullGC */, gc_cause());
2984 
2985     // If the secondary_free_list is not empty, append it to the
2986     // free_list. No need to wait for the cleanup operation to finish;
2987     // the region allocation code will check the secondary_free_list
2988     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2989     // set, skip this step so that the region allocation code has to
2990     // get entries from the secondary_free_list.
2991     if (!G1StressConcRegionFreeing) {
2992       append_secondary_free_list_if_not_empty_with_lock();
2993     }
2994 
2995     G1HeapTransition heap_transition(this);
2996     size_t heap_used_bytes_before_gc = used();
2997 
2998     // Don't dynamically change the number of GC threads this early.  A value of
2999     // 0 is used to indicate serial work.  When parallel work is done,
3000     // it will be set.
3001 
3002     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3003       IsGCActiveMark x;


5376 
5377 public:
5378   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5379     _g1h(g1h) {}
5380 
5381   void do_code_blob(CodeBlob* cb) {
5382     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5383     if (nm == NULL) {
5384       return;
5385     }
5386 
5387     if (ScavengeRootsInCode) {
5388       _g1h->register_nmethod(nm);
5389     }
5390   }
5391 };
5392 
5393 void G1CollectedHeap::rebuild_strong_code_roots() {
5394   RebuildStrongCodeRootClosure blob_cl(this);
5395   CodeCache::blobs_do(&blob_cl);





















































5396 }


  28 #include "classfile/symbolTable.hpp"
  29 #include "code/codeCache.hpp"
  30 #include "code/icBuffer.hpp"
  31 #include "gc/g1/bufferingOopClosure.hpp"
  32 #include "gc/g1/concurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1Allocator.inline.hpp"
  34 #include "gc/g1/g1CollectedHeap.inline.hpp"
  35 #include "gc/g1/g1CollectionSet.hpp"
  36 #include "gc/g1/g1CollectorPolicy.hpp"
  37 #include "gc/g1/g1CollectorState.hpp"
  38 #include "gc/g1/g1ConcurrentRefine.hpp"
  39 #include "gc/g1/g1ConcurrentRefineThread.hpp"
  40 #include "gc/g1/g1EvacStats.inline.hpp"
  41 #include "gc/g1/g1FullCollector.hpp"
  42 #include "gc/g1/g1FullGCScope.hpp"
  43 #include "gc/g1/g1GCPhaseTimes.hpp"
  44 #include "gc/g1/g1HeapSizingPolicy.hpp"
  45 #include "gc/g1/g1HeapTransition.hpp"
  46 #include "gc/g1/g1HeapVerifier.hpp"
  47 #include "gc/g1/g1HotCardCache.hpp"
  48 #include "gc/g1/g1MemoryPool.hpp"
  49 #include "gc/g1/g1OopClosures.inline.hpp"
  50 #include "gc/g1/g1ParScanThreadState.inline.hpp"
  51 #include "gc/g1/g1Policy.hpp"
  52 #include "gc/g1/g1RegionToSpaceMapper.hpp"
  53 #include "gc/g1/g1RemSet.hpp"
  54 #include "gc/g1/g1RootClosures.hpp"
  55 #include "gc/g1/g1RootProcessor.hpp"
  56 #include "gc/g1/g1StringDedup.hpp"
  57 #include "gc/g1/g1YCTypes.hpp"
  58 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
  59 #include "gc/g1/heapRegion.inline.hpp"
  60 #include "gc/g1/heapRegionRemSet.hpp"
  61 #include "gc/g1/heapRegionSet.inline.hpp"
  62 #include "gc/g1/vm_operations_g1.hpp"
  63 #include "gc/shared/gcHeapSummary.hpp"
  64 #include "gc/shared/gcId.hpp"
  65 #include "gc/shared/gcLocker.inline.hpp"
  66 #include "gc/shared/gcTimer.hpp"
  67 #include "gc/shared/gcTrace.hpp"
  68 #include "gc/shared/gcTraceTime.inline.hpp"
  69 #include "gc/shared/generationSpec.hpp"
  70 #include "gc/shared/isGCActiveMark.hpp"
  71 #include "gc/shared/preservedMarks.inline.hpp"
  72 #include "gc/shared/suspendibleThreadSet.hpp"
  73 #include "gc/shared/referenceProcessor.inline.hpp"
  74 #include "gc/shared/taskqueue.inline.hpp"
  75 #include "gc/shared/weakProcessor.hpp"
  76 #include "logging/log.hpp"
  77 #include "memory/allocation.hpp"
  78 #include "memory/iterator.hpp"
  79 #include "memory/resourceArea.hpp"
  80 #include "oops/oop.inline.hpp"
  81 #include "prims/resolvedMethodTable.hpp"
  82 #include "runtime/atomic.hpp"
  83 #include "runtime/init.hpp"
  84 #include "runtime/orderAccess.inline.hpp"
  85 #include "runtime/vmThread.hpp"
  86 #include "services/memoryManager.hpp"
  87 #include "utilities/align.hpp"
  88 #include "utilities/globalDefinitions.hpp"
  89 #include "utilities/stack.inline.hpp"
  90 
  91 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
  92 
  93 // INVARIANTS/NOTES
  94 //
  95 // All allocation activity covered by the G1CollectedHeap interface is
  96 // serialized by acquiring the HeapLock.  This happens in mem_allocate
  97 // and allocate_new_tlab, which are the "entry" points to the
  98 // allocation code from the rest of the JVM.  (Note that this does not
  99 // apply to TLAB allocation, which is not part of this interface: it
 100 // is done by clients of this interface.)
 101 
 102 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
 103  private:
 104   size_t _num_dirtied;
 105   G1CollectedHeap* _g1h;
 106   G1SATBCardTableLoggingModRefBS* _g1_bs;


2965 
2966     GCTraceCPUTime tcpu;
2967 
2968     FormatBuffer<> gc_string("Pause ");
2969     if (collector_state()->during_initial_mark_pause()) {
2970       gc_string.append("Initial Mark");
2971     } else if (collector_state()->gcs_are_young()) {
2972       gc_string.append("Young");
2973     } else {
2974       gc_string.append("Mixed");
2975     }
2976     GCTraceTime(Info, gc) tm(gc_string, NULL, gc_cause(), true);
2977 
2978     uint active_workers = AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
2979                                                                   workers()->active_workers(),
2980                                                                   Threads::number_of_non_daemon_threads());
2981     workers()->update_active_workers(active_workers);
2982     log_info(gc,task)("Using %u workers of %u for evacuation", active_workers, workers()->total_workers());
2983 
2984     TraceCollectorStats tcs(g1mm()->incremental_collection_counters());
2985     TraceMemoryManagerStats tms(_minor_mgr, gc_cause());
2986 
2987     // If the secondary_free_list is not empty, append it to the
2988     // free_list. No need to wait for the cleanup operation to finish;
2989     // the region allocation code will check the secondary_free_list
2990     // and wait if necessary. If the G1StressConcRegionFreeing flag is
2991     // set, skip this step so that the region allocation code has to
2992     // get entries from the secondary_free_list.
2993     if (!G1StressConcRegionFreeing) {
2994       append_secondary_free_list_if_not_empty_with_lock();
2995     }
2996 
2997     G1HeapTransition heap_transition(this);
2998     size_t heap_used_bytes_before_gc = used();
2999 
3000     // Don't dynamically change the number of GC threads this early.  A value of
3001     // 0 is used to indicate serial work.  When parallel work is done,
3002     // it will be set.
3003 
3004     { // Call to jvmpi::post_class_unload_events must occur outside of active GC
3005       IsGCActiveMark x;


5378 
5379 public:
5380   RebuildStrongCodeRootClosure(G1CollectedHeap* g1h) :
5381     _g1h(g1h) {}
5382 
5383   void do_code_blob(CodeBlob* cb) {
5384     nmethod* nm = (cb != NULL) ? cb->as_nmethod_or_null() : NULL;
5385     if (nm == NULL) {
5386       return;
5387     }
5388 
5389     if (ScavengeRootsInCode) {
5390       _g1h->register_nmethod(nm);
5391     }
5392   }
5393 };
5394 
5395 void G1CollectedHeap::rebuild_strong_code_roots() {
5396   RebuildStrongCodeRootClosure blob_cl(this);
5397   CodeCache::blobs_do(&blob_cl);
5398 }
5399 
5400 class G1YoungGenMemoryManager : public GCMemoryManager {
5401 private:
5402 public:
5403   G1YoungGenMemoryManager() : GCMemoryManager() {}
5404 
5405   const char* name() { return "G1 Young Generation"; }
5406   virtual const char* gc_end_message() { return "end of minor GC"; }
5407 };
5408 
5409 class G1OldGenMemoryManager : public GCMemoryManager {
5410 private:
5411 public:
5412   G1OldGenMemoryManager() : GCMemoryManager() {}
5413 
5414   const char* name() { return "G1 Old Generation"; }
5415   virtual const char* gc_end_message() { return "end of major GC"; }
5416 };
5417 
5418 GrowableArray<MemoryManager*> G1CollectedHeap::memory_managers() {
5419   _minor_mgr = new G1YoungGenMemoryManager();
5420   _major_mgr = new G1OldGenMemoryManager();
5421   GrowableArray<MemoryManager*> mem_mgrs;
5422   mem_mgrs.append(_minor_mgr);
5423   mem_mgrs.append(_major_mgr);
5424   return mem_mgrs;
5425 }
5426 
5427 GCMemoryManager* G1CollectedHeap::minor_mgr() {
5428   return _minor_mgr;
5429 }
5430 
5431 GCMemoryManager* G1CollectedHeap::major_mgr() {
5432   return _major_mgr;
5433 }
5434 
5435 GrowableArray<MemoryPool*> G1CollectedHeap::memory_pools() {
5436   G1EdenPool* eden = new G1EdenPool(this);
5437   G1SurvivorPool* survivor = new G1SurvivorPool(this);
5438   G1OldGenPool* old_gen = new G1OldGenPool(this);
5439 
5440   _major_mgr->add_pool(eden);
5441   _major_mgr->add_pool(survivor);
5442   _major_mgr->add_pool(old_gen);
5443   _minor_mgr->add_pool(eden);
5444   _minor_mgr->add_pool(survivor);
5445 
5446   GrowableArray<MemoryPool*> mem_pools;
5447   mem_pools.append(eden);
5448   mem_pools.append(survivor);
5449   mem_pools.append(old_gen);
5450   return mem_pools;
5451 }
< prev index next >