src/share/vm/memory/sharedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hotspot Sdiff src/share/vm/memory

src/share/vm/memory/sharedHeap.hpp

Print this page
rev 5732 : [mq]: comments2


  75 //                       &buf_scan_non_heap_roots,
  76 //                       &eager_scan_code_roots);
  77 //  which delegates to SharedHeap::process_strong_roots() and uses
  78 //  SubTasksDone* _process_strong_tasks to claim tasks.
  79 //  process_strong_roots() calls
  80 //      rem_set()->younger_refs_iterate()
  81 //  to scan the card table and which eventually calls down into
  82 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  83 //  uses SequentialSubTasksDone* _pst to claim tasks.
  84 //  Both SubTasksDone and SequentialSubTasksDone call their method
  85 //  all_tasks_completed() to count the number of GC workers that have
  86 //  finished their work.  That logic is "when all the workers are
  87 //  finished the tasks are finished".
  88 //
  89 //  The pattern that appears  in the code is to set _n_threads
  90 //  to a value > 1 before a task that you would like executed in parallel
  91 //  and then to set it to 0 after that task has completed.  A value of
  92 //  0 is a "special" value in set_n_threads() which translates to
  93 //  setting _n_threads to 1.
  94 //
  95 //  Some code uses _n_terminiation to decide if work should be done in
  96 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  97 //  is an example of such code.  Look for variable "is_par" for other
  98 //  examples.
  99 //
 100 //  The active_workers is not reset to 0 after a parallel phase.  It's
 101 //  value may be used in later phases and in one instance at least
 102 //  (the parallel remark) it has to be used (the parallel remark depends
 103 //  on the partitioning done in the previous parallel scavenge).
 104 
 105 class SharedHeap : public CollectedHeap {
 106   friend class VMStructs;
 107 
 108   friend class VM_GC_Operation;
 109   friend class VM_CGC_Operation;
 110 
 111 private:
 112   // For claiming strong_roots tasks.
 113   SubTasksDone* _process_strong_tasks;
 114 
 115 protected:




  75 //                       &buf_scan_non_heap_roots,
  76 //                       &eager_scan_code_roots);
  77 //  which delegates to SharedHeap::process_strong_roots() and uses
  78 //  SubTasksDone* _process_strong_tasks to claim tasks.
  79 //  process_strong_roots() calls
  80 //      rem_set()->younger_refs_iterate()
  81 //  to scan the card table and which eventually calls down into
  82 //  CardTableModRefBS::par_non_clean_card_iterate_work().  This method
  83 //  uses SequentialSubTasksDone* _pst to claim tasks.
  84 //  Both SubTasksDone and SequentialSubTasksDone call their method
  85 //  all_tasks_completed() to count the number of GC workers that have
  86 //  finished their work.  That logic is "when all the workers are
  87 //  finished the tasks are finished".
  88 //
  89 //  The pattern that appears  in the code is to set _n_threads
  90 //  to a value > 1 before a task that you would like executed in parallel
  91 //  and then to set it to 0 after that task has completed.  A value of
  92 //  0 is a "special" value in set_n_threads() which translates to
  93 //  setting _n_threads to 1.
  94 //
  95 //  Some code uses _n_termination to decide if work should be done in
  96 //  parallel.  The notorious possibly_parallel_oops_do() in threads.cpp
  97 //  is an example of such code.  Look for variable "is_par" for other
  98 //  examples.
  99 //
 100 //  The active_workers is not reset to 0 after a parallel phase.  It's
 101 //  value may be used in later phases and in one instance at least
 102 //  (the parallel remark) it has to be used (the parallel remark depends
 103 //  on the partitioning done in the previous parallel scavenge).
 104 
 105 class SharedHeap : public CollectedHeap {
 106   friend class VMStructs;
 107 
 108   friend class VM_GC_Operation;
 109   friend class VM_CGC_Operation;
 110 
 111 private:
 112   // For claiming strong_roots tasks.
 113   SubTasksDone* _process_strong_tasks;
 114 
 115 protected:


src/share/vm/memory/sharedHeap.hpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File