< prev index next >

src/hotspot/share/gc/g1/g1ConcurrentMark.cpp

Print this page
rev 52689 : 8213224: Move code related to GC threads calculation out of AdaptiveSizePolicy
Summary: Consolidate code related to GC threads calculation into a single class


  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/g1ThreadLocalData.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"
  42 #include "gc/shared/adaptiveSizePolicy.hpp"
  43 #include "gc/shared/gcId.hpp"
  44 #include "gc/shared/gcTimer.hpp"
  45 #include "gc/shared/gcTrace.hpp"
  46 #include "gc/shared/gcTraceTime.inline.hpp"
  47 #include "gc/shared/genOopClosures.inline.hpp"
  48 #include "gc/shared/referencePolicy.hpp"
  49 #include "gc/shared/strongRootsScope.hpp"
  50 #include "gc/shared/suspendibleThreadSet.hpp"
  51 #include "gc/shared/taskqueue.inline.hpp"
  52 #include "gc/shared/vmGCOperations.hpp"
  53 #include "gc/shared/weakProcessor.inline.hpp"

  54 #include "include/jvm.h"
  55 #include "logging/log.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/access.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  69   assert(addr < _cm->finger(), "invariant");
  70   assert(addr >= _task->finger(), "invariant");
  71 
  72   // We move that task's local finger along.
  73   _task->move_finger_to(addr);


 828     }
 829 
 830     double end_vtime = os::elapsedVTime();
 831     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 832   }
 833 
 834   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 835       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 836 
 837   ~G1CMConcurrentMarkingTask() { }
 838 };
 839 
 840 uint G1ConcurrentMark::calc_active_marking_workers() {
 841   uint result = 0;
 842   if (!UseDynamicNumberOfGCThreads ||
 843       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 844        !ForceDynamicNumberOfGCThreads)) {
 845     result = _max_concurrent_workers;
 846   } else {
 847     result =
 848       AdaptiveSizePolicy::calc_default_active_workers(_max_concurrent_workers,
 849                                                       1, /* Minimum workers */
 850                                                       _num_concurrent_workers,
 851                                                       Threads::number_of_non_daemon_threads());
 852     // Don't scale the result down by scale_concurrent_workers() because
 853     // that scaling has already gone into "_max_concurrent_workers".
 854   }
 855   assert(result > 0 && result <= _max_concurrent_workers,
 856          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 857          _max_concurrent_workers, result);
 858   return result;
 859 }
 860 
 861 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 862   // Currently, only survivors can be root regions.
 863   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 864   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 865 
 866   const uintx interval = PrefetchScanIntervalInBytes;
 867   HeapWord* curr = hr->bottom();
 868   const HeapWord* end = hr->top();




  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/classLoaderDataGraph.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1HeapVerifier.hpp"
  34 #include "gc/g1/g1OopClosures.inline.hpp"
  35 #include "gc/g1/g1Policy.hpp"
  36 #include "gc/g1/g1RegionMarkStatsCache.inline.hpp"
  37 #include "gc/g1/g1StringDedup.hpp"
  38 #include "gc/g1/g1ThreadLocalData.hpp"
  39 #include "gc/g1/heapRegion.inline.hpp"
  40 #include "gc/g1/heapRegionRemSet.hpp"
  41 #include "gc/g1/heapRegionSet.inline.hpp"

  42 #include "gc/shared/gcId.hpp"
  43 #include "gc/shared/gcTimer.hpp"
  44 #include "gc/shared/gcTrace.hpp"
  45 #include "gc/shared/gcTraceTime.inline.hpp"
  46 #include "gc/shared/genOopClosures.inline.hpp"
  47 #include "gc/shared/referencePolicy.hpp"
  48 #include "gc/shared/strongRootsScope.hpp"
  49 #include "gc/shared/suspendibleThreadSet.hpp"
  50 #include "gc/shared/taskqueue.inline.hpp"
  51 #include "gc/shared/vmGCOperations.hpp"
  52 #include "gc/shared/weakProcessor.inline.hpp"
  53 #include "gc/shared/workerPolicy.hpp"
  54 #include "include/jvm.h"
  55 #include "logging/log.hpp"
  56 #include "memory/allocation.hpp"
  57 #include "memory/resourceArea.hpp"
  58 #include "oops/access.inline.hpp"
  59 #include "oops/oop.inline.hpp"
  60 #include "runtime/atomic.hpp"
  61 #include "runtime/handles.inline.hpp"
  62 #include "runtime/java.hpp"
  63 #include "runtime/prefetch.inline.hpp"
  64 #include "services/memTracker.hpp"
  65 #include "utilities/align.hpp"
  66 #include "utilities/growableArray.hpp"
  67 
  68 bool G1CMBitMapClosure::do_addr(HeapWord* const addr) {
  69   assert(addr < _cm->finger(), "invariant");
  70   assert(addr >= _task->finger(), "invariant");
  71 
  72   // We move that task's local finger along.
  73   _task->move_finger_to(addr);


 828     }
 829 
 830     double end_vtime = os::elapsedVTime();
 831     _cm->update_accum_task_vtime(worker_id, end_vtime - start_vtime);
 832   }
 833 
 834   G1CMConcurrentMarkingTask(G1ConcurrentMark* cm) :
 835       AbstractGangTask("Concurrent Mark"), _cm(cm) { }
 836 
 837   ~G1CMConcurrentMarkingTask() { }
 838 };
 839 
 840 uint G1ConcurrentMark::calc_active_marking_workers() {
 841   uint result = 0;
 842   if (!UseDynamicNumberOfGCThreads ||
 843       (!FLAG_IS_DEFAULT(ConcGCThreads) &&
 844        !ForceDynamicNumberOfGCThreads)) {
 845     result = _max_concurrent_workers;
 846   } else {
 847     result =
 848       WorkerPolicy::calc_default_active_workers(_max_concurrent_workers,
 849                                                 1, /* Minimum workers */
 850                                                 _num_concurrent_workers,
 851                                                 Threads::number_of_non_daemon_threads());
 852     // Don't scale the result down by scale_concurrent_workers() because
 853     // that scaling has already gone into "_max_concurrent_workers".
 854   }
 855   assert(result > 0 && result <= _max_concurrent_workers,
 856          "Calculated number of marking workers must be larger than zero and at most the maximum %u, but is %u",
 857          _max_concurrent_workers, result);
 858   return result;
 859 }
 860 
 861 void G1ConcurrentMark::scan_root_region(HeapRegion* hr, uint worker_id) {
 862   // Currently, only survivors can be root regions.
 863   assert(hr->next_top_at_mark_start() == hr->bottom(), "invariant");
 864   G1RootRegionScanClosure cl(_g1h, this, worker_id);
 865 
 866   const uintx interval = PrefetchScanIntervalInBytes;
 867   HeapWord* curr = hr->bottom();
 868   const HeapWord* end = hr->top();


< prev index next >