1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "code/codeCache.hpp"
  28 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  29 #include "gc_implementation/parallelScavenge/pcTasks.hpp"
  30 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
  31 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  32 #include "gc_implementation/shared/gcTimer.hpp"
  33 #include "gc_implementation/shared/gcTraceTime.hpp"
  34 #include "gc_interface/collectedHeap.hpp"
  35 #include "memory/universe.hpp"
  36 #include "oops/objArrayKlass.inline.hpp"
  37 #include "oops/oop.inline.hpp"
  38 #include "prims/jvmtiExport.hpp"
  39 #include "runtime/fprofiler.hpp"
  40 #include "runtime/jniHandles.hpp"
  41 #include "runtime/thread.hpp"
  42 #include "runtime/vmThread.hpp"
  43 #include "services/management.hpp"
  44 
  45 PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
  46 
  47 //
  48 // ThreadRootsMarkingTask
  49 //
  50 
  51 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
  52   assert(Universe::heap()->is_gc_active(), "called outside gc");
  53 
  54   ResourceMark rm;
  55 
  56   NOT_PRODUCT(GCTraceTime tm("ThreadRootsMarkingTask",
  57     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
  58   ParCompactionManager* cm =
  59     ParCompactionManager::gc_thread_compaction_manager(which);
  60 
  61   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
  62   CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true);
  63   MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
  64 
  65   if (_java_thread != NULL)
  66     _java_thread->oops_do(
  67         &mark_and_push_closure,
  68         &mark_and_push_from_clds,
  69         &mark_and_push_in_blobs);
  70 
  71   if (_vm_thread != NULL)
  72     _vm_thread->oops_do(
  73         &mark_and_push_closure,
  74         &mark_and_push_from_clds,
  75         &mark_and_push_in_blobs);
  76 
  77   // Do the real work
  78   cm->follow_marking_stacks();
  79 }
  80 
  81 
  82 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
  83   assert(Universe::heap()->is_gc_active(), "called outside gc");
  84 
  85   NOT_PRODUCT(GCTraceTime tm("MarkFromRootsTask",
  86     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
  87   ParCompactionManager* cm =
  88     ParCompactionManager::gc_thread_compaction_manager(which);
  89   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
  90   PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
  91 
  92   switch (_root_type) {
  93     case universe:
  94       Universe::oops_do(&mark_and_push_closure);
  95       break;
  96 
  97     case jni_handles:
  98       JNIHandles::oops_do(&mark_and_push_closure);
  99       break;
 100 
 101     case threads:
 102     {
 103       ResourceMark rm;
 104       MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
 105       CLDToOopClosure mark_and_push_from_cld(&mark_and_push_closure);
 106       Threads::oops_do(&mark_and_push_closure, &mark_and_push_from_cld, &each_active_code_blob);
 107     }
 108     break;
 109 
 110     case object_synchronizer:
 111       ObjectSynchronizer::oops_do(&mark_and_push_closure);
 112       break;
 113 
 114     case flat_profiler:
 115       FlatProfiler::oops_do(&mark_and_push_closure);
 116       break;
 117 
 118     case management:
 119       Management::oops_do(&mark_and_push_closure);
 120       break;
 121 
 122     case jvmti:
 123       JvmtiExport::oops_do(&mark_and_push_closure);
 124       break;
 125 
 126     case system_dictionary:
 127       SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
 128       break;
 129 
 130     case class_loader_data:
 131       ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
 132       break;
 133 
 134     case code_cache:
 135       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 136       //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
 137       break;
 138 
 139     default:
 140       fatal("Unknown root type");
 141   }
 142 
 143   // Do the real work
 144   cm->follow_marking_stacks();
 145 }
 146 
 147 
 148 //
 149 // RefProcTaskProxy
 150 //
 151 
 152 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 153 {
 154   assert(Universe::heap()->is_gc_active(), "called outside gc");
 155 
 156   NOT_PRODUCT(GCTraceTime tm("RefProcTask",
 157     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 158   ParCompactionManager* cm =
 159     ParCompactionManager::gc_thread_compaction_manager(which);
 160   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
 161   PSParallelCompact::FollowStackClosure follow_stack_closure(cm);
 162   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
 163                 mark_and_push_closure, follow_stack_closure);
 164 }
 165 
 166 //
 167 // RefProcTaskExecutor
 168 //
 169 
 170 void RefProcTaskExecutor::execute(ProcessTask& task)
 171 {
 172   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
 173   uint parallel_gc_threads = heap->gc_task_manager()->workers();
 174   uint active_gc_threads = heap->gc_task_manager()->active_workers();
 175   RegionTaskQueueSet* qset = ParCompactionManager::region_array();
 176   ParallelTaskTerminator terminator(active_gc_threads, qset);
 177   GCTaskQueue* q = GCTaskQueue::create();
 178   for(uint i=0; i<parallel_gc_threads; i++) {
 179     q->enqueue(new RefProcTaskProxy(task, i));
 180   }
 181   if (task.marks_oops_alive()) {
 182     if (parallel_gc_threads>1) {
 183       for (uint j=0; j<active_gc_threads; j++) {
 184         q->enqueue(new StealMarkingTask(&terminator));
 185       }
 186     }
 187   }
 188   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
 189 }
 190 
 191 void RefProcTaskExecutor::execute(EnqueueTask& task)
 192 {
 193   ParallelScavengeHeap* heap = PSParallelCompact::gc_heap();
 194   uint parallel_gc_threads = heap->gc_task_manager()->workers();
 195   GCTaskQueue* q = GCTaskQueue::create();
 196   for(uint i=0; i<parallel_gc_threads; i++) {
 197     q->enqueue(new RefEnqueueTaskProxy(task, i));
 198   }
 199   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
 200 }
 201 
 202 //
 203 // StealMarkingTask
 204 //
 205 
 206 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
 207   _terminator(t) {}
 208 
 209 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
 210   assert(Universe::heap()->is_gc_active(), "called outside gc");
 211 
 212   NOT_PRODUCT(GCTraceTime tm("StealMarkingTask",
 213     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 214 
 215   ParCompactionManager* cm =
 216     ParCompactionManager::gc_thread_compaction_manager(which);
 217   PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm);
 218 
 219   oop obj = NULL;
 220   ObjArrayTask task;
 221   int random_seed = 17;
 222   do {
 223     while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
 224       cm->follow_contents((objArrayOop)task.obj(), task.index());
 225       cm->follow_marking_stacks();
 226     }
 227     while (ParCompactionManager::steal(which, &random_seed, obj)) {
 228       cm->follow_contents(obj);
 229       cm->follow_marking_stacks();
 230     }
 231   } while (!terminator()->offer_termination());
 232 }
 233 
 234 //
 235 // StealRegionCompactionTask
 236 //
 237 
 238 StealRegionCompactionTask::StealRegionCompactionTask(ParallelTaskTerminator* t):
 239   _terminator(t) {}
 240 
 241 void StealRegionCompactionTask::do_it(GCTaskManager* manager, uint which) {
 242   assert(Universe::heap()->is_gc_active(), "called outside gc");
 243 
 244   NOT_PRODUCT(GCTraceTime tm("StealRegionCompactionTask",
 245     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 246 
 247   ParCompactionManager* cm =
 248     ParCompactionManager::gc_thread_compaction_manager(which);
 249 
 250 
 251   // If not all threads are active, get a draining stack
 252   // from the list.  Else, just use this threads draining stack.
 253   uint which_stack_index;
 254   bool use_all_workers = manager->all_workers_active();
 255   if (use_all_workers) {
 256     which_stack_index = which;
 257     assert(manager->active_workers() == ParallelGCThreads,
 258            err_msg("all_workers_active has been incorrectly set: "
 259                    " active %d  ParallelGCThreads %d", manager->active_workers(),
 260                    ParallelGCThreads));
 261   } else {
 262     which_stack_index = ParCompactionManager::pop_recycled_stack_index();
 263   }
 264 
 265   cm->set_region_stack_index(which_stack_index);
 266   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
 267   if (TraceDynamicGCThreads) {
 268     gclog_or_tty->print_cr("StealRegionCompactionTask::do_it "
 269                            "region_stack_index %d region_stack = " PTR_FORMAT " "
 270                            " empty (%d) use all workers %d",
 271     which_stack_index, ParCompactionManager::region_list(which_stack_index),
 272     cm->region_stack()->is_empty(),
 273     use_all_workers);
 274   }
 275 
 276   // Has to drain stacks first because there may be regions on
 277   // preloaded onto the stack and this thread may never have
 278   // done a draining task.  Are the draining tasks needed?
 279 
 280   cm->drain_region_stacks();
 281 
 282   size_t region_index = 0;
 283   int random_seed = 17;
 284 
 285   // If we're the termination task, try 10 rounds of stealing before
 286   // setting the termination flag
 287 
 288   while(true) {
 289     if (ParCompactionManager::steal(which, &random_seed, region_index)) {
 290       PSParallelCompact::fill_and_update_region(cm, region_index);
 291       cm->drain_region_stacks();
 292     } else {
 293       if (terminator()->offer_termination()) {
 294         break;
 295       }
 296       // Go around again.
 297     }
 298   }
 299   return;
 300 }
 301 
 302 UpdateDensePrefixTask::UpdateDensePrefixTask(
 303                                    PSParallelCompact::SpaceId space_id,
 304                                    size_t region_index_start,
 305                                    size_t region_index_end) :
 306   _space_id(space_id), _region_index_start(region_index_start),
 307   _region_index_end(region_index_end) {}
 308 
 309 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
 310 
 311   NOT_PRODUCT(GCTraceTime tm("UpdateDensePrefixTask",
 312     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 313 
 314   ParCompactionManager* cm =
 315     ParCompactionManager::gc_thread_compaction_manager(which);
 316 
 317   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
 318                                                          _space_id,
 319                                                          _region_index_start,
 320                                                          _region_index_end);
 321 }
 322 
 323 void DrainStacksCompactionTask::do_it(GCTaskManager* manager, uint which) {
 324   assert(Universe::heap()->is_gc_active(), "called outside gc");
 325 
 326   NOT_PRODUCT(GCTraceTime tm("DrainStacksCompactionTask",
 327     PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id()));
 328 
 329   ParCompactionManager* cm =
 330     ParCompactionManager::gc_thread_compaction_manager(which);
 331 
 332   uint which_stack_index;
 333   bool use_all_workers = manager->all_workers_active();
 334   if (use_all_workers) {
 335     which_stack_index = which;
 336     assert(manager->active_workers() == ParallelGCThreads,
 337            err_msg("all_workers_active has been incorrectly set: "
 338                    " active %d  ParallelGCThreads %d", manager->active_workers(),
 339                    ParallelGCThreads));
 340   } else {
 341     which_stack_index = stack_index();
 342   }
 343 
 344   cm->set_region_stack(ParCompactionManager::region_list(which_stack_index));
 345   if (TraceDynamicGCThreads) {
 346     gclog_or_tty->print_cr("DrainStacksCompactionTask::do_it which = %d "
 347                            "which_stack_index = %d/empty(%d) "
 348                            "use all workers %d",
 349                            which, which_stack_index,
 350                            cm->region_stack()->is_empty(),
 351                            use_all_workers);
 352   }
 353 
 354   cm->set_region_stack_index(which_stack_index);
 355 
 356   // Process any regions already in the compaction managers stacks.
 357   cm->drain_region_stacks();
 358 
 359   assert(cm->region_stack()->is_empty(), "Not empty");
 360 
 361   if (!use_all_workers) {
 362     // Always give up the region stack.
 363     assert(cm->region_stack() ==
 364            ParCompactionManager::region_list(cm->region_stack_index()),
 365            "region_stack and region_stack_index are inconsistent");
 366     ParCompactionManager::push_recycled_stack_index(cm->region_stack_index());
 367 
 368     if (TraceDynamicGCThreads) {
 369       void* old_region_stack = (void*) cm->region_stack();
 370       int old_region_stack_index = cm->region_stack_index();
 371       gclog_or_tty->print_cr("Pushing region stack " PTR_FORMAT "/%d",
 372         old_region_stack, old_region_stack_index);
 373     }
 374 
 375     cm->set_region_stack(NULL);
 376     cm->set_region_stack_index((uint)max_uintx);
 377   }
 378 }