< prev index next >

src/hotspot/share/gc/parallel/psParallelCompact.cpp

Print this page
rev 57895 : [mq]: 8215297-remove-ptt
   1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  32 #include "code/codeCache.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"

  52 #include "gc/shared/referencePolicy.hpp"
  53 #include "gc/shared/referenceProcessor.hpp"
  54 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  55 #include "gc/shared/spaceDecorator.inline.hpp"
  56 #include "gc/shared/weakProcessor.hpp"
  57 #include "gc/shared/workerPolicy.hpp"
  58 #include "gc/shared/workgroup.hpp"
  59 #include "logging/log.hpp"
  60 #include "memory/iterator.inline.hpp"
  61 #include "memory/resourceArea.hpp"
  62 #include "memory/universe.hpp"
  63 #include "oops/access.inline.hpp"
  64 #include "oops/instanceClassLoaderKlass.inline.hpp"
  65 #include "oops/instanceKlass.inline.hpp"
  66 #include "oops/instanceMirrorKlass.inline.hpp"
  67 #include "oops/methodData.hpp"
  68 #include "oops/objArrayKlass.inline.hpp"
  69 #include "oops/oop.inline.hpp"
  70 #include "runtime/atomic.hpp"
  71 #include "runtime/handles.inline.hpp"


1952       VerifyAfterGC) {
1953     old_gen->verify_object_start_array();
1954   }
1955 
1956   if (ZapUnusedHeapArea) {
1957     old_gen->object_space()->check_mangled_unused_area_complete();
1958   }
1959 
1960   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1961 
1962   collection_exit.update();
1963 
1964   heap->print_heap_after_gc();
1965   heap->trace_heap_after_gc(&_gc_tracer);
1966 
1967   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1968                          marking_start.ticks(), compaction_start.ticks(),
1969                          collection_exit.ticks());
1970 
1971 #ifdef TRACESPINNING
1972   ParallelTaskTerminator::print_termination_counts();
1973 #endif
1974 
1975   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1976 
1977   _gc_timer.register_gc_end();
1978 
1979   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1980   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1981 
1982   return true;
1983 }
1984 
1985 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1986                                              PSYoungGen* young_gen,
1987                                              PSOldGen* old_gen) {
1988   MutableSpace* const eden_space = young_gen->eden_space();
1989   assert(!eden_space->is_empty(), "eden must be non-empty");
1990   assert(young_gen->virtual_space()->alignment() ==
1991          old_gen->virtual_space()->alignment(), "alignments do not match");
1992 


2132         ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
2133       }
2134       break;
2135 
2136     case ParallelRootType::code_cache:
2137       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
2138       //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
2139       AOTLoader::oops_do(&mark_and_push_closure);
2140       break;
2141 
2142     case ParallelRootType::sentinel:
2143     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
2144       fatal("Bad enumeration value: %u", root_type);
2145       break;
2146   }
2147 
2148   // Do the real work
2149   cm->follow_marking_stacks();
2150 }
2151 
2152 static void steal_marking_work(ParallelTaskTerminator& terminator, uint worker_id) {
2153   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2154 
2155   ParCompactionManager* cm =
2156     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2157 
2158   oop obj = NULL;
2159   ObjArrayTask task;
2160   do {
2161     while (ParCompactionManager::steal_objarray(worker_id,  task)) {
2162       cm->follow_array((objArrayOop)task.obj(), task.index());
2163       cm->follow_marking_stacks();
2164     }
2165     while (ParCompactionManager::steal(worker_id, obj)) {
2166       cm->follow_contents(obj);
2167       cm->follow_marking_stacks();
2168     }
2169   } while (!terminator.offer_termination());
2170 }
2171 
2172 class MarkFromRootsTask : public AbstractGangTask {
2173   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2174   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
2175   SequentialSubTasksDone _subtasks;
2176   TaskTerminator _terminator;
2177   uint _active_workers;
2178 
2179 public:
2180   MarkFromRootsTask(uint active_workers) :
2181       AbstractGangTask("MarkFromRootsTask"),
2182       _strong_roots_scope(active_workers),
2183       _subtasks(),
2184       _terminator(active_workers, ParCompactionManager::stack_array()),
2185       _active_workers(active_workers) {
2186     _subtasks.set_n_threads(active_workers);
2187     _subtasks.set_n_tasks(ParallelRootType::sentinel);
2188   }
2189 
2190   virtual void work(uint worker_id) {
2191     for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
2192       mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
2193     }
2194     _subtasks.all_tasks_completed();
2195 
2196     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
2197     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
2198 
2199     if (_active_workers > 1) {
2200       steal_marking_work(*_terminator.terminator(), worker_id);
2201     }
2202   }
2203 };
2204 
2205 class PCRefProcTask : public AbstractGangTask {
2206   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2207   ProcessTask& _task;
2208   uint _ergo_workers;
2209   TaskTerminator _terminator;
2210 
2211 public:
2212   PCRefProcTask(ProcessTask& task, uint ergo_workers) :
2213       AbstractGangTask("PCRefProcTask"),
2214       _task(task),
2215       _ergo_workers(ergo_workers),
2216       _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
2217   }
2218 
2219   virtual void work(uint worker_id) {
2220     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2221     assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2222 
2223     ParCompactionManager* cm =
2224       ParCompactionManager::gc_thread_compaction_manager(worker_id);
2225     PCMarkAndPushClosure mark_and_push_closure(cm);
2226     ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2227     _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
2228                mark_and_push_closure, follow_stack_closure);
2229 
2230     steal_marking_work(*_terminator.terminator(), worker_id);
2231   }
2232 };
2233 
2234 class RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2235   void execute(ProcessTask& process_task, uint ergo_workers) {
2236     assert(ParallelScavengeHeap::heap()->workers().active_workers() == ergo_workers,
2237            "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
2238            ergo_workers, ParallelScavengeHeap::heap()->workers().active_workers());
2239 
2240     PCRefProcTask task(process_task, ergo_workers);
2241     ParallelScavengeHeap::heap()->workers().run_task(&task);
2242   }
2243 };
2244 
2245 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2246                                       bool maximum_heap_compaction,
2247                                       ParallelOldTracer *gc_tracer) {
2248   // Recursively traverse all live objects and mark them
2249   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2250 


2569       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2570 
2571       size_t histo[5] = { 0, 0, 0, 0, 0 };
2572       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2573       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2574 
2575       for (const rd_t* cur = beg; cur < end; ++cur) {
2576         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2577       }
2578       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2579       for (size_t i = 0; i < histo_len; ++i) {
2580         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2581                    histo[i], 100.0 * histo[i] / region_cnt);
2582       }
2583       out->cr();
2584     }
2585   }
2586 }
2587 #endif // #ifdef ASSERT
2588 
2589 static void compaction_with_stealing_work(ParallelTaskTerminator* terminator, uint worker_id) {
2590   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2591 
2592   ParCompactionManager* cm =
2593     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2594 
2595   // Drain the stacks that have been preloaded with regions
2596   // that are ready to fill.
2597 
2598   cm->drain_region_stacks();
2599 
2600   guarantee(cm->region_stack()->is_empty(), "Not empty");
2601 
2602   size_t region_index = 0;
2603 
2604   while (true) {
2605     if (ParCompactionManager::steal(worker_id, region_index)) {
2606       PSParallelCompact::fill_and_update_region(cm, region_index);
2607       cm->drain_region_stacks();
2608     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2609       // Fill and update an unavailable region with the help of a shadow region
2610       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2611       cm->drain_region_stacks();
2612     } else {
2613       if (terminator->offer_termination()) {
2614         break;
2615       }
2616       // Go around again.
2617     }
2618   }
2619   return;
2620 }
2621 
2622 class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
2623   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2624   TaskQueue& _tq;
2625   TaskTerminator _terminator;
2626   uint _active_workers;
2627 
2628 public:
2629   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2630       AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
2631       _tq(tq),
2632       _terminator(active_workers, ParCompactionManager::region_array()),
2633       _active_workers(active_workers) {
2634   }
2635   virtual void work(uint worker_id) {
2636     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2637 
2638     for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2639       PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2640                                                              task._space_id,
2641                                                              task._region_index_start,
2642                                                              task._region_index_end);
2643     }
2644 
2645     // Once a thread has drained it's stack, it should try to steal regions from
2646     // other threads.
2647     compaction_with_stealing_work(_terminator.terminator(), worker_id);
2648   }
2649 };
2650 
2651 void PSParallelCompact::compact() {
2652   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2653 
2654   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2655   PSOldGen* old_gen = heap->old_gen();
2656   old_gen->start_array()->reset();
2657   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2658 
2659   // for [0..last_space_id)
2660   //     for [0..active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)
2661   //         push
2662   //     push
2663   //
2664   // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)
2665   TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1));
2666   initialize_shadow_regions(active_gc_threads);
2667   prepare_region_draining_tasks(active_gc_threads);


   1 /*
   2  * Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *


  32 #include "code/codeCache.hpp"
  33 #include "gc/parallel/parallelArguments.hpp"
  34 #include "gc/parallel/parallelScavengeHeap.inline.hpp"
  35 #include "gc/parallel/parMarkBitMap.inline.hpp"
  36 #include "gc/parallel/psAdaptiveSizePolicy.hpp"
  37 #include "gc/parallel/psCompactionManager.inline.hpp"
  38 #include "gc/parallel/psOldGen.hpp"
  39 #include "gc/parallel/psParallelCompact.inline.hpp"
  40 #include "gc/parallel/psPromotionManager.inline.hpp"
  41 #include "gc/parallel/psRootType.hpp"
  42 #include "gc/parallel/psScavenge.hpp"
  43 #include "gc/parallel/psYoungGen.hpp"
  44 #include "gc/shared/gcCause.hpp"
  45 #include "gc/shared/gcHeapSummary.hpp"
  46 #include "gc/shared/gcId.hpp"
  47 #include "gc/shared/gcLocker.hpp"
  48 #include "gc/shared/gcTimer.hpp"
  49 #include "gc/shared/gcTrace.hpp"
  50 #include "gc/shared/gcTraceTime.inline.hpp"
  51 #include "gc/shared/isGCActiveMark.hpp"
  52 #include "gc/shared/owstTaskTerminator.hpp"
  53 #include "gc/shared/referencePolicy.hpp"
  54 #include "gc/shared/referenceProcessor.hpp"
  55 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  56 #include "gc/shared/spaceDecorator.inline.hpp"
  57 #include "gc/shared/weakProcessor.hpp"
  58 #include "gc/shared/workerPolicy.hpp"
  59 #include "gc/shared/workgroup.hpp"
  60 #include "logging/log.hpp"
  61 #include "memory/iterator.inline.hpp"
  62 #include "memory/resourceArea.hpp"
  63 #include "memory/universe.hpp"
  64 #include "oops/access.inline.hpp"
  65 #include "oops/instanceClassLoaderKlass.inline.hpp"
  66 #include "oops/instanceKlass.inline.hpp"
  67 #include "oops/instanceMirrorKlass.inline.hpp"
  68 #include "oops/methodData.hpp"
  69 #include "oops/objArrayKlass.inline.hpp"
  70 #include "oops/oop.inline.hpp"
  71 #include "runtime/atomic.hpp"
  72 #include "runtime/handles.inline.hpp"


1953       VerifyAfterGC) {
1954     old_gen->verify_object_start_array();
1955   }
1956 
1957   if (ZapUnusedHeapArea) {
1958     old_gen->object_space()->check_mangled_unused_area_complete();
1959   }
1960 
1961   NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
1962 
1963   collection_exit.update();
1964 
1965   heap->print_heap_after_gc();
1966   heap->trace_heap_after_gc(&_gc_tracer);
1967 
1968   log_debug(gc, task, time)("VM-Thread " JLONG_FORMAT " " JLONG_FORMAT " " JLONG_FORMAT,
1969                          marking_start.ticks(), compaction_start.ticks(),
1970                          collection_exit.ticks());
1971 
1972 #ifdef TRACESPINNING
1973   OWSTTaskTerminator::print_termination_counts();
1974 #endif
1975 
1976   AdaptiveSizePolicyOutput::print(size_policy, heap->total_collections());
1977 
1978   _gc_timer.register_gc_end();
1979 
1980   _gc_tracer.report_dense_prefix(dense_prefix(old_space_id));
1981   _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());
1982 
1983   return true;
1984 }
1985 
1986 bool PSParallelCompact::absorb_live_data_from_eden(PSAdaptiveSizePolicy* size_policy,
1987                                              PSYoungGen* young_gen,
1988                                              PSOldGen* old_gen) {
1989   MutableSpace* const eden_space = young_gen->eden_space();
1990   assert(!eden_space->is_empty(), "eden must be non-empty");
1991   assert(young_gen->virtual_space()->alignment() ==
1992          old_gen->virtual_space()->alignment(), "alignments do not match");
1993 


2133         ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
2134       }
2135       break;
2136 
2137     case ParallelRootType::code_cache:
2138       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
2139       //ScavengableNMethods::scavengable_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
2140       AOTLoader::oops_do(&mark_and_push_closure);
2141       break;
2142 
2143     case ParallelRootType::sentinel:
2144     DEBUG_ONLY(default:) // DEBUG_ONLY hack will create compile error on release builds (-Wswitch) and runtime check on debug builds
2145       fatal("Bad enumeration value: %u", root_type);
2146       break;
2147   }
2148 
2149   // Do the real work
2150   cm->follow_marking_stacks();
2151 }
2152 
2153 static void steal_marking_work(OWSTTaskTerminator& terminator, uint worker_id) {
2154   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2155 
2156   ParCompactionManager* cm =
2157     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2158 
2159   oop obj = NULL;
2160   ObjArrayTask task;
2161   do {
2162     while (ParCompactionManager::steal_objarray(worker_id,  task)) {
2163       cm->follow_array((objArrayOop)task.obj(), task.index());
2164       cm->follow_marking_stacks();
2165     }
2166     while (ParCompactionManager::steal(worker_id, obj)) {
2167       cm->follow_contents(obj);
2168       cm->follow_marking_stacks();
2169     }
2170   } while (!terminator.offer_termination());
2171 }
2172 
2173 class MarkFromRootsTask : public AbstractGangTask {
2174   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2175   StrongRootsScope _strong_roots_scope; // needed for Threads::possibly_parallel_threads_do
2176   SequentialSubTasksDone _subtasks;
2177   OWSTTaskTerminator _terminator;
2178   uint _active_workers;
2179 
2180 public:
2181   MarkFromRootsTask(uint active_workers) :
2182       AbstractGangTask("MarkFromRootsTask"),
2183       _strong_roots_scope(active_workers),
2184       _subtasks(),
2185       _terminator(active_workers, ParCompactionManager::stack_array()),
2186       _active_workers(active_workers) {
2187     _subtasks.set_n_threads(active_workers);
2188     _subtasks.set_n_tasks(ParallelRootType::sentinel);
2189   }
2190 
2191   virtual void work(uint worker_id) {
2192     for (uint task = 0; _subtasks.try_claim_task(task); /*empty*/ ) {
2193       mark_from_roots_work(static_cast<ParallelRootType::Value>(task), worker_id);
2194     }
2195     _subtasks.all_tasks_completed();
2196 
2197     PCAddThreadRootsMarkingTaskClosure closure(worker_id);
2198     Threads::possibly_parallel_threads_do(true /*parallel */, &closure);
2199 
2200     if (_active_workers > 1) {
2201       steal_marking_work(_terminator, worker_id);
2202     }
2203   }
2204 };
2205 
2206 class PCRefProcTask : public AbstractGangTask {
2207   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2208   ProcessTask& _task;
2209   uint _ergo_workers;
2210   OWSTTaskTerminator _terminator;
2211 
2212 public:
2213   PCRefProcTask(ProcessTask& task, uint ergo_workers) :
2214       AbstractGangTask("PCRefProcTask"),
2215       _task(task),
2216       _ergo_workers(ergo_workers),
2217       _terminator(_ergo_workers, ParCompactionManager::stack_array()) {
2218   }
2219 
2220   virtual void work(uint worker_id) {
2221     ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2222     assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2223 
2224     ParCompactionManager* cm =
2225       ParCompactionManager::gc_thread_compaction_manager(worker_id);
2226     PCMarkAndPushClosure mark_and_push_closure(cm);
2227     ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
2228     _task.work(worker_id, *PSParallelCompact::is_alive_closure(),
2229                mark_and_push_closure, follow_stack_closure);
2230 
2231     steal_marking_work(_terminator, worker_id);
2232   }
2233 };
2234 
2235 class RefProcTaskExecutor: public AbstractRefProcTaskExecutor {
2236   void execute(ProcessTask& process_task, uint ergo_workers) {
2237     assert(ParallelScavengeHeap::heap()->workers().active_workers() == ergo_workers,
2238            "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
2239            ergo_workers, ParallelScavengeHeap::heap()->workers().active_workers());
2240 
2241     PCRefProcTask task(process_task, ergo_workers);
2242     ParallelScavengeHeap::heap()->workers().run_task(&task);
2243   }
2244 };
2245 
2246 void PSParallelCompact::marking_phase(ParCompactionManager* cm,
2247                                       bool maximum_heap_compaction,
2248                                       ParallelOldTracer *gc_tracer) {
2249   // Recursively traverse all live objects and mark them
2250   GCTraceTime(Info, gc, phases) tm("Marking Phase", &_gc_timer);
2251 


2570       const rd_t* const end = sd.addr_to_region_ptr(top_aligned_up);
2571 
2572       size_t histo[5] = { 0, 0, 0, 0, 0 };
2573       const size_t histo_len = sizeof(histo) / sizeof(size_t);
2574       const size_t region_cnt = pointer_delta(end, beg, sizeof(rd_t));
2575 
2576       for (const rd_t* cur = beg; cur < end; ++cur) {
2577         ++histo[MIN2(cur->blocks_filled_count(), histo_len - 1)];
2578       }
2579       out->print("Block fill histogram: %u %-4s" SIZE_FORMAT_W(5), id, space_names[id], region_cnt);
2580       for (size_t i = 0; i < histo_len; ++i) {
2581         out->print(" " SIZE_FORMAT_W(5) " %5.1f%%",
2582                    histo[i], 100.0 * histo[i] / region_cnt);
2583       }
2584       out->cr();
2585     }
2586   }
2587 }
2588 #endif // #ifdef ASSERT
2589 
2590 static void compaction_with_stealing_work(OWSTTaskTerminator* terminator, uint worker_id) {
2591   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
2592 
2593   ParCompactionManager* cm =
2594     ParCompactionManager::gc_thread_compaction_manager(worker_id);
2595 
2596   // Drain the stacks that have been preloaded with regions
2597   // that are ready to fill.
2598 
2599   cm->drain_region_stacks();
2600 
2601   guarantee(cm->region_stack()->is_empty(), "Not empty");
2602 
2603   size_t region_index = 0;
2604 
2605   while (true) {
2606     if (ParCompactionManager::steal(worker_id, region_index)) {
2607       PSParallelCompact::fill_and_update_region(cm, region_index);
2608       cm->drain_region_stacks();
2609     } else if (PSParallelCompact::steal_unavailable_region(cm, region_index)) {
2610       // Fill and update an unavailable region with the help of a shadow region
2611       PSParallelCompact::fill_and_update_shadow_region(cm, region_index);
2612       cm->drain_region_stacks();
2613     } else {
2614       if (terminator->offer_termination()) {
2615         break;
2616       }
2617       // Go around again.
2618     }
2619   }
2620   return;
2621 }
2622 
2623 class UpdateDensePrefixAndCompactionTask: public AbstractGangTask {
2624   typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
2625   TaskQueue& _tq;
2626   OWSTTaskTerminator _terminator;
2627   uint _active_workers;
2628 
2629 public:
2630   UpdateDensePrefixAndCompactionTask(TaskQueue& tq, uint active_workers) :
2631       AbstractGangTask("UpdateDensePrefixAndCompactionTask"),
2632       _tq(tq),
2633       _terminator(active_workers, ParCompactionManager::region_array()),
2634       _active_workers(active_workers) {
2635   }
2636   virtual void work(uint worker_id) {
2637     ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(worker_id);
2638 
2639     for (PSParallelCompact::UpdateDensePrefixTask task; _tq.try_claim(task); /* empty */) {
2640       PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
2641                                                              task._space_id,
2642                                                              task._region_index_start,
2643                                                              task._region_index_end);
2644     }
2645 
2646     // Once a thread has drained it's stack, it should try to steal regions from
2647     // other threads.
2648     compaction_with_stealing_work(&_terminator, worker_id);
2649   }
2650 };
2651 
2652 void PSParallelCompact::compact() {
2653   GCTraceTime(Info, gc, phases) tm("Compaction Phase", &_gc_timer);
2654 
2655   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
2656   PSOldGen* old_gen = heap->old_gen();
2657   old_gen->start_array()->reset();
2658   uint active_gc_threads = ParallelScavengeHeap::heap()->workers().active_workers();
2659 
2660   // for [0..last_space_id)
2661   //     for [0..active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING)
2662   //         push
2663   //     push
2664   //
2665   // max push count is thus: last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1)
2666   TaskQueue task_queue(last_space_id * (active_gc_threads * PAR_OLD_DENSE_PREFIX_OVER_PARTITIONING + 1));
2667   initialize_shadow_regions(active_gc_threads);
2668   prepare_region_draining_tasks(active_gc_threads);


< prev index next >