1 /*
   2  * Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "aot/aotLoader.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 #include "gc/parallel/parallelScavengeHeap.hpp"
  30 #include "gc/parallel/pcTasks.hpp"
  31 #include "gc/parallel/psCompactionManager.inline.hpp"
  32 #include "gc/parallel/psParallelCompact.inline.hpp"
  33 #include "gc/shared/collectedHeap.hpp"
  34 #include "gc/shared/gcTimer.hpp"
  35 #include "gc/shared/gcTraceTime.inline.hpp"
  36 #include "logging/log.hpp"
  37 #include "memory/resourceArea.hpp"
  38 #include "memory/universe.hpp"
  39 #include "oops/objArrayKlass.inline.hpp"
  40 #include "oops/oop.inline.hpp"
  41 #include "prims/jvmtiExport.hpp"
  42 #include "runtime/jniHandles.hpp"
  43 #include "runtime/thread.hpp"
  44 #include "runtime/vmThread.hpp"
  45 #include "services/management.hpp"
  46 #include "utilities/stack.inline.hpp"
  47 
  48 //
  49 // ThreadRootsMarkingTask
  50 //
  51 
  52 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
  53   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
  54 
  55   ResourceMark rm;
  56 
  57   ParCompactionManager* cm =
  58     ParCompactionManager::gc_thread_compaction_manager(which);
  59 
  60   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
  61   MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
  62 
  63   if (_java_thread != NULL)
  64     _java_thread->oops_do(
  65         &mark_and_push_closure,
  66         &mark_and_push_in_blobs);
  67 
  68   if (_vm_thread != NULL)
  69     _vm_thread->oops_do(
  70         &mark_and_push_closure,
  71         &mark_and_push_in_blobs);
  72 
  73   // Do the real work
  74   cm->follow_marking_stacks();
  75 }
  76 
  77 
  78 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
  79   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
  80 
  81   ParCompactionManager* cm =
  82     ParCompactionManager::gc_thread_compaction_manager(which);
  83   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
  84 
  85   switch (_root_type) {
  86     case universe:
  87       Universe::oops_do(&mark_and_push_closure);
  88       break;
  89 
  90     case jni_handles:
  91       JNIHandles::oops_do(&mark_and_push_closure);
  92       break;
  93 
  94     case threads:
  95     {
  96       ResourceMark rm;
  97       MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
  98       Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
  99     }
 100     break;
 101 
 102     case object_synchronizer:
 103       ObjectSynchronizer::oops_do(&mark_and_push_closure);
 104       break;
 105 
 106     case management:
 107       Management::oops_do(&mark_and_push_closure);
 108       break;
 109 
 110     case jvmti:
 111       JvmtiExport::oops_do(&mark_and_push_closure);
 112       break;
 113 
 114     case system_dictionary:
 115       SystemDictionary::always_strong_oops_do(&mark_and_push_closure);
 116       break;
 117 
 118     case class_loader_data:
 119       ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true);
 120       break;
 121 
 122     case code_cache:
 123       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
 124       //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
 125       AOTLoader::oops_do(&mark_and_push_closure);
 126       break;
 127 
 128     default:
 129       fatal("Unknown root type");
 130   }
 131 
 132   // Do the real work
 133   cm->follow_marking_stacks();
 134 }
 135 
 136 
 137 //
 138 // RefProcTaskProxy
 139 //
 140 
 141 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
 142 {
 143   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 144 
 145   ParCompactionManager* cm =
 146     ParCompactionManager::gc_thread_compaction_manager(which);
 147   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
 148   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
 149   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
 150                 mark_and_push_closure, follow_stack_closure);
 151 }
 152 
 153 //
 154 // RefProcTaskExecutor
 155 //
 156 
 157 void RefProcTaskExecutor::execute(ProcessTask& task)
 158 {
 159   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 160   uint parallel_gc_threads = heap->gc_task_manager()->workers();
 161   uint active_gc_threads = heap->gc_task_manager()->active_workers();
 162   OopTaskQueueSet* qset = ParCompactionManager::stack_array();
 163   ParallelTaskTerminator terminator(active_gc_threads, qset);
 164   GCTaskQueue* q = GCTaskQueue::create();
 165   for(uint i=0; i<parallel_gc_threads; i++) {
 166     q->enqueue(new RefProcTaskProxy(task, i));
 167   }
 168   if (task.marks_oops_alive()) {
 169     if (parallel_gc_threads>1) {
 170       for (uint j=0; j<active_gc_threads; j++) {
 171         q->enqueue(new StealMarkingTask(&terminator));
 172       }
 173     }
 174   }
 175   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
 176 }
 177 
 178 void RefProcTaskExecutor::execute(EnqueueTask& task)
 179 {
 180   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
 181   uint parallel_gc_threads = heap->gc_task_manager()->workers();
 182   GCTaskQueue* q = GCTaskQueue::create();
 183   for(uint i=0; i<parallel_gc_threads; i++) {
 184     q->enqueue(new RefEnqueueTaskProxy(task, i));
 185   }
 186   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
 187 }
 188 
 189 //
 190 // StealMarkingTask
 191 //
 192 
 193 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
 194   _terminator(t) {}
 195 
 196 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
 197   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 198 
 199   ParCompactionManager* cm =
 200     ParCompactionManager::gc_thread_compaction_manager(which);
 201   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
 202 
 203   oop obj = NULL;
 204   ObjArrayTask task;
 205   int random_seed = 17;
 206   do {
 207     while (ParCompactionManager::steal_objarray(which, &random_seed, task)) {
 208       cm->follow_contents((objArrayOop)task.obj(), task.index());
 209       cm->follow_marking_stacks();
 210     }
 211     while (ParCompactionManager::steal(which, &random_seed, obj)) {
 212       cm->follow_contents(obj);
 213       cm->follow_marking_stacks();
 214     }
 215   } while (!terminator()->offer_termination());
 216 }
 217 
 218 //
 219 // CompactionWithStealingTask
 220 //
 221 
 222 CompactionWithStealingTask::CompactionWithStealingTask(ParallelTaskTerminator* t):
 223   _terminator(t) {}
 224 
 225 void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) {
 226   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 227 
 228   ParCompactionManager* cm =
 229     ParCompactionManager::gc_thread_compaction_manager(which);
 230 
 231   // Drain the stacks that have been preloaded with regions
 232   // that are ready to fill.
 233 
 234   cm->drain_region_stacks();
 235 
 236   guarantee(cm->region_stack()->is_empty(), "Not empty");
 237 
 238   size_t region_index = 0;
 239   int random_seed = 17;
 240 
 241   while(true) {
 242     if (ParCompactionManager::steal(which, &random_seed, region_index)) {
 243       PSParallelCompact::fill_and_update_region(cm, region_index);
 244       cm->drain_region_stacks();
 245     } else {
 246       if (terminator()->offer_termination()) {
 247         break;
 248       }
 249       // Go around again.
 250     }
 251   }
 252   return;
 253 }
 254 
 255 UpdateDensePrefixTask::UpdateDensePrefixTask(
 256                                    PSParallelCompact::SpaceId space_id,
 257                                    size_t region_index_start,
 258                                    size_t region_index_end) :
 259   _space_id(space_id), _region_index_start(region_index_start),
 260   _region_index_end(region_index_end) {}
 261 
 262 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
 263 
 264   ParCompactionManager* cm =
 265     ParCompactionManager::gc_thread_compaction_manager(which);
 266 
 267   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
 268                                                          _space_id,
 269                                                          _region_index_start,
 270                                                          _region_index_end);
 271 }