< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page
rev 57840 : imported patch 8215297-remove-ptt
rev 57842 : [mq]: 8238220-rename-owsttaskterminator


  32 #include "code/codeCache.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"
  52 #include "gc/shared/owstTaskTerminator.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/referenceProcessor.hpp"
  55 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  56 #include "gc/shared/spaceDecorator.inline.hpp"

  57 #include "gc/shared/weakProcessor.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workgroup.hpp"
  60 #include "logging/log.hpp"
  61 #include "memory/iterator.inline.hpp"
  62 #include "memory/resourceArea.hpp"
  63 #include "memory/universe.hpp"
  64 #include "oops/access.inline.hpp"
  65 #include "oops/instanceClassLoaderKlass.inline.hpp"
  66 #include "oops/instanceKlass.inline.hpp"
  67 #include "oops/instanceMirrorKlass.inline.hpp"
  68 #include "oops/methodData.hpp"
  69 #include "oops/objArrayKlass.inline.hpp"
  70 #include "oops/oop.inline.hpp"
  71 #include "runtime/atomic.hpp"
  72 #include "runtime/handles.inline.hpp"
  73 #include "runtime/safepoint.hpp"
  74 #include "runtime/vmThread.hpp"
  75 #include "services/management.hpp"
  76 #include "services/memTracker.hpp"


1953       VerifyAfterGC) {
1954     old_gen->verify_object_start_array();
1955   }
1956 
1957   if (ZapUnusedHeapArea) {
1958     old_gen->object_space()->check_mangled_unused_area_complete();
1959   }
1960 
1961   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1962 
1963   collection_exit.update();
1964 
1965   heap->print_heap_after_gc();
1966   heap->trace_heap_after_gc(&_gc_tracer);
1967 
1968   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1969                          marking_start.ticks(), compaction_start.ticks(),
1970                          collection_exit.ticks());
1971 
1972 #ifdef TRACESPINNING
1973   OWSTTaskTerminator::print_termination_counts();
1974 #endif
1975 
1976   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1977 
1978   _gc_timer.register_gc_end();
1979 
1980   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1981   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1982 
1983   return true;
1984 }
1985 
1986 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1987                                              PSYoungGen* young_gen,
1988                                              PSOldGen* old_gen) {
1989   MutableSpace* const eden_space = young_gen->eden_space();
1990   assert(!eden_space->is_empty(), "eden must be non-empty");
1991   assert(young_gen->virtual_space()->alignment() ==
1992          old_gen->virtual_space()->alignment(), "alignments do not match");
1993 


2133         ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
2134       }
2135       break;
2136 
2137     case ParallelRootType::code_cache:
2138       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
2139       //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
2140       AOTLoader::oops_do(&mark_and_push_closure);
2141       break;
2142 
2143     case ParallelRootType::sentinel:
2144     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
2145       fatal("Bad enumeration value: %u", root_type);
2146       break;
2147   }
2148 
2149   // Do the real work
2150   cm->follow_marking_stacks();
2151 }
2152 
2153 static void steal_marking_work(OWSTTaskTerminator& terminator, uint worker_id) {
2154   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2155 
2156   ParCompactionManager* cm =
2157     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2158 
2159   oop obj = NULL;
2160   ObjArrayTask task;
2161   do {
2162     while (ParCompactionManager::steal_objarray(worker_id,  task)) {
2163       cm->follow_array((objArrayOop)task.obj(), task.index());
2164       cm->follow_marking_stacks();
2165     }
2166     while (ParCompactionManager::steal(worker_id, obj)) {
2167       cm->follow_contents(obj);
2168       cm->follow_marking_stacks();
2169     }
2170   } while (!terminator.offer_termination());
2171 }
2172 
2173 class MarkFromRootsTask : public AbstractGangTask {
2174   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2175   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
2176   SequentialSubTasksDone _subtasks;
2177   OWSTTaskTerminator _terminator;
2178   uint _active_workers;
2179 
2180 public:
2181   MarkFromRootsTask(uint active_workers) :
2182       AbstractGangTask("MarkFromRootsTask"),
2183       _strong_roots_scope(active_workers),
2184       _subtasks(),
2185       _terminator(active_workers, ParCompactionManager::stack_array()),
2186       _active_workers(active_workers) {
2187     _subtasks.set_n_threads(active_workers);
2188     _subtasks.set_n_tasks(ParallelRootType::sentinel);
2189   }
2190 
2191   virtual void work(uint worker_id) {
2192     for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
2193       mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
2194     }
2195     _subtasks.all_tasks_completed();
2196 
2197     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
2198     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
2199 
2200     if (_active_workers > 1) {
2201       steal_marking_work(_terminator, worker_id);
2202     }
2203   }
2204 };
2205 
2206 class PCRefProcTask : public AbstractGangTask {
2207   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2208   ProcessTask& _task;
2209   uint _ergo_workers;
2210   OWSTTaskTerminator _terminator;
2211 
2212 public:
2213   PCRefProcTask(ProcessTask& task, uint ergo_workers) :
2214       AbstractGangTask("PCRefProcTask"),
2215       _task(task),
2216       _ergo_workers(ergo_workers),
2217       _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
2218   }
2219 
2220   virtual void work(uint worker_id) {
2221     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2222     assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2223 
2224     ParCompactionManager* cm =
2225       ParCompactionManager::gc_thread_compaction_manager(worker_id);
2226     PCMarkAndPushClosure mark_and_push_closure(cm);
2227     ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2228     _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
2229                mark_and_push_closure, follow_stack_closure);
2230 


2570       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2571 
2572       size_t histo[5] = { 0, 0, 0, 0, 0 };
2573       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2574       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2575 
2576       for (const rd_t* cur = beg; cur < end; ++cur) {
2577         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2578       }
2579       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2580       for (size_t i = 0; i < histo_len; ++i) {
2581         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2582                    histo[i], 100.0 * histo[i] / region_cnt);
2583       }
2584       out->cr();
2585     }
2586   }
2587 }
2588 #endif // #ifdef ASSERT
2589 
2590 static void compaction_with_stealing_work(OWSTTaskTerminator* terminator, uint worker_id) {
2591   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2592 
2593   ParCompactionManager* cm =
2594     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2595 
2596   // Drain the stacks that have been preloaded with regions
2597   // that are ready to fill.
2598 
2599   cm->drain_region_stacks();
2600 
2601   guarantee(cm->region_stack()->is_empty(), "Not empty");
2602 
2603   size_t region_index = 0;
2604 
2605   while (true) {
2606     if (ParCompactionManager::steal(worker_id, region_index)) {
2607       PSParallelCompact::fill_and_update_region(cm, region_index);
2608       cm->drain_region_stacks();
2609     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2610       // Fill and update an unavailable region with the help of a shadow region
2611       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2612       cm->drain_region_stacks();
2613     } else {
2614       if (terminator->offer_termination()) {
2615         break;
2616       }
2617       // Go around again.
2618     }
2619   }
2620   return;
2621 }
2622 
2623 class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
2624   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2625   TaskQueue& _tq;
2626   OWSTTaskTerminator _terminator;
2627   uint _active_workers;
2628 
2629 public:
2630   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2631       AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
2632       _tq(tq),
2633       _terminator(active_workers, ParCompactionManager::region_array()),
2634       _active_workers(active_workers) {
2635   }
2636   virtual void work(uint worker_id) {
2637     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2638 
2639     for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2640       PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2641                                                              task._space_id,
2642                                                              task._region_index_start,
2643                                                              task._region_index_end);
2644     }
2645 
2646     // Once a thread has drained it's stack, it should try to steal regions from




  32 #include "code/codeCache.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"

  52 #include "gc/shared/referencePolicy.hpp"
  53 #include "gc/shared/referenceProcessor.hpp"
  54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  55 #include "gc/shared/spaceDecorator.inline.hpp"
  56 #include "gc/shared/taskTerminator.hpp"
  57 #include "gc/shared/weakProcessor.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workgroup.hpp"
  60 #include "logging/log.hpp"
  61 #include "memory/iterator.inline.hpp"
  62 #include "memory/resourceArea.hpp"
  63 #include "memory/universe.hpp"
  64 #include "oops/access.inline.hpp"
  65 #include "oops/instanceClassLoaderKlass.inline.hpp"
  66 #include "oops/instanceKlass.inline.hpp"
  67 #include "oops/instanceMirrorKlass.inline.hpp"
  68 #include "oops/methodData.hpp"
  69 #include "oops/objArrayKlass.inline.hpp"
  70 #include "oops/oop.inline.hpp"
  71 #include "runtime/atomic.hpp"
  72 #include "runtime/handles.inline.hpp"
  73 #include "runtime/safepoint.hpp"
  74 #include "runtime/vmThread.hpp"
  75 #include "services/management.hpp"
  76 #include "services/memTracker.hpp"


1953       VerifyAfterGC) {
1954     old_gen->verify_object_start_array();
1955   }
1956 
1957   if (ZapUnusedHeapArea) {
1958     old_gen->object_space()->check_mangled_unused_area_complete();
1959   }
1960 
1961   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1962 
1963   collection_exit.update();
1964 
1965   heap->print_heap_after_gc();
1966   heap->trace_heap_after_gc(&_gc_tracer);
1967 
1968   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1969                          marking_start.ticks(), compaction_start.ticks(),
1970                          collection_exit.ticks());
1971 
1972 #ifdef TRACESPINNING
1973   TaskTerminator::print_termination_counts();
1974 #endif
1975 
1976   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1977 
1978   _gc_timer.register_gc_end();
1979 
1980   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1981   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1982 
1983   return true;
1984 }
1985 
1986 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1987                                              PSYoungGen* young_gen,
1988                                              PSOldGen* old_gen) {
1989   MutableSpace* const eden_space = young_gen->eden_space();
1990   assert(!eden_space->is_empty(), "eden must be non-empty");
1991   assert(young_gen->virtual_space()->alignment() ==
1992          old_gen->virtual_space()->alignment(), "alignments do not match");
1993 


2133         ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
2134       }
2135       break;
2136 
2137     case ParallelRootType::code_cache:
2138       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
2139       //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
2140       AOTLoader::oops_do(&mark_and_push_closure);
2141       break;
2142 
2143     case ParallelRootType::sentinel:
2144     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
2145       fatal("Bad enumeration value: %u", root_type);
2146       break;
2147   }
2148 
2149   // Do the real work
2150   cm->follow_marking_stacks();
2151 }
2152 
2153 static void steal_marking_work(TaskTerminator& terminator, uint worker_id) {
2154   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2155 
2156   ParCompactionManager* cm =
2157     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2158 
2159   oop obj = NULL;
2160   ObjArrayTask task;
2161   do {
2162     while (ParCompactionManager::steal_objarray(worker_id,  task)) {
2163       cm->follow_array((objArrayOop)task.obj(), task.index());
2164       cm->follow_marking_stacks();
2165     }
2166     while (ParCompactionManager::steal(worker_id, obj)) {
2167       cm->follow_contents(obj);
2168       cm->follow_marking_stacks();
2169     }
2170   } while (!terminator.offer_termination());
2171 }
2172 
2173 class MarkFromRootsTask : public AbstractGangTask {
2174   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2175   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
2176   SequentialSubTasksDone _subtasks;
2177   TaskTerminator _terminator;
2178   uint _active_workers;
2179 
2180 public:
2181   MarkFromRootsTask(uint active_workers) :
2182       AbstractGangTask("MarkFromRootsTask"),
2183       _strong_roots_scope(active_workers),
2184       _subtasks(),
2185       _terminator(active_workers, ParCompactionManager::stack_array()),
2186       _active_workers(active_workers) {
2187     _subtasks.set_n_threads(active_workers);
2188     _subtasks.set_n_tasks(ParallelRootType::sentinel);
2189   }
2190 
2191   virtual void work(uint worker_id) {
2192     for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
2193       mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
2194     }
2195     _subtasks.all_tasks_completed();
2196 
2197     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
2198     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
2199 
2200     if (_active_workers > 1) {
2201       steal_marking_work(_terminator, worker_id);
2202     }
2203   }
2204 };
2205 
2206 class PCRefProcTask : public AbstractGangTask {
2207   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2208   ProcessTask& _task;
2209   uint _ergo_workers;
2210   TaskTerminator _terminator;
2211 
2212 public:
2213   PCRefProcTask(ProcessTask& task, uint ergo_workers) :
2214       AbstractGangTask("PCRefProcTask"),
2215       _task(task),
2216       _ergo_workers(ergo_workers),
2217       _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
2218   }
2219 
2220   virtual void work(uint worker_id) {
2221     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2222     assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2223 
2224     ParCompactionManager* cm =
2225       ParCompactionManager::gc_thread_compaction_manager(worker_id);
2226     PCMarkAndPushClosure mark_and_push_closure(cm);
2227     ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2228     _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
2229                mark_and_push_closure, follow_stack_closure);
2230 


2570       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2571 
2572       size_t histo[5] = { 0, 0, 0, 0, 0 };
2573       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2574       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2575 
2576       for (const rd_t* cur = beg; cur < end; ++cur) {
2577         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2578       }
2579       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2580       for (size_t i = 0; i < histo_len; ++i) {
2581         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2582                    histo[i], 100.0 * histo[i] / region_cnt);
2583       }
2584       out->cr();
2585     }
2586   }
2587 }
2588 #endif // #ifdef ASSERT
2589 
2590 static void compaction_with_stealing_work(TaskTerminator* terminator, uint worker_id) {
2591   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2592 
2593   ParCompactionManager* cm =
2594     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2595 
2596   // Drain the stacks that have been preloaded with regions
2597   // that are ready to fill.
2598 
2599   cm->drain_region_stacks();
2600 
2601   guarantee(cm->region_stack()->is_empty(), "Not empty");
2602 
2603   size_t region_index = 0;
2604 
2605   while (true) {
2606     if (ParCompactionManager::steal(worker_id, region_index)) {
2607       PSParallelCompact::fill_and_update_region(cm, region_index);
2608       cm->drain_region_stacks();
2609     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2610       // Fill and update an unavailable region with the help of a shadow region
2611       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2612       cm->drain_region_stacks();
2613     } else {
2614       if (terminator->offer_termination()) {
2615         break;
2616       }
2617       // Go around again.
2618     }
2619   }
2620   return;
2621 }
2622 
2623 class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
2624   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2625   TaskQueue& _tq;
2626   TaskTerminator _terminator;
2627   uint _active_workers;
2628 
2629 public:
2630   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2631       AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
2632       _tq(tq),
2633       _terminator(active_workers, ParCompactionManager::region_array()),
2634       _active_workers(active_workers) {
2635   }
2636   virtual void work(uint worker_id) {
2637     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2638 
2639     for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2640       PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2641                                                              task._space_id,
2642                                                              task._region_index_start,
2643                                                              task._region_index_end);
2644     }
2645 
2646     // Once a thread has drained it's stack, it should try to steal regions from


< prev index next >