1 /*
  2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #include "precompiled.hpp"
 26 #include "aot/aotLoader.hpp"
 27 #include "classfile/systemDictionary.hpp"
 28 #include "code/codeCache.hpp"
 29 #include "gc/parallel/parallelScavengeHeap.hpp"
 30 #include "gc/parallel/pcTasks.hpp"
 31 #include "gc/parallel/psCompactionManager.inline.hpp"
 32 #include "gc/parallel/psParallelCompact.inline.hpp"
 33 #include "gc/shared/collectedHeap.hpp"
 34 #include "gc/shared/gcTimer.hpp"
 35 #include "gc/shared/gcTraceTime.inline.hpp"
 36 #include "logging/log.hpp"
 37 #include "memory/resourceArea.hpp"
 38 #include "memory/universe.hpp"
 39 #include "oops/objArrayKlass.inline.hpp"
 40 #include "oops/oop.inline.hpp"
 41 #include "prims/jvmtiExport.hpp"
 42 #include "runtime/jniHandles.hpp"
 43 #include "runtime/thread.hpp"
 44 #include "runtime/vmThread.hpp"
 45 #include "services/management.hpp"
 46 #include "utilities/stack.inline.hpp"
 47 
 48 //
 49 // ThreadRootsMarkingTask
 50 //
 51 
 52 void ThreadRootsMarkingTask::do_it(GCTaskManager* manager, uint which) {
 53   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 54 
 55   ResourceMark rm;
 56 
 57   ParCompactionManager* cm =
 58     ParCompactionManager::gc_thread_compaction_manager(which);
 59 
 60   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
 61   MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
 62 
 63   _thread->oops_do(&mark_and_push_closure, &mark_and_push_in_blobs);
 64 
 65   // Do the real work
 66   cm->follow_marking_stacks();
 67 }
 68 
 69 
 70 void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
 71   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
 72 
 73   ParCompactionManager* cm =
 74     ParCompactionManager::gc_thread_compaction_manager(which);
 75   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
 76 
 77   switch (_root_type) {
 78     case universe:
 79       Universe::oops_do(&mark_and_push_closure);
 80       break;
 81 
 82     case jni_handles:
 83       JNIHandles::oops_do(&mark_and_push_closure);
 84       break;
 85 
 86     case threads:
 87     {
 88       ResourceMark rm;
 89       MarkingCodeBlobClosure each_active_code_blob(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations);
 90       Threads::oops_do(&mark_and_push_closure, &each_active_code_blob);
 91     }
 92     break;
 93 
 94     case object_synchronizer:
 95       ObjectSynchronizer::oops_do(&mark_and_push_closure);
 96       break;
 97 
 98     case management:
 99       Management::oops_do(&mark_and_push_closure);
100       break;
101 
102     case jvmti:
103       JvmtiExport::oops_do(&mark_and_push_closure);
104       break;
105 
106     case system_dictionary:
107       SystemDictionary::oops_do(&mark_and_push_closure);
108       break;
109 
110     case class_loader_data: {
111         CLDToOopClosure cld_closure(&mark_and_push_closure, ClassLoaderData::_claim_value_strong);
112         ClassLoaderDataGraph::always_strong_cld_do(&cld_closure);
113       }
114       break;
115 
116     case code_cache:
117       // Do not treat nmethods as strong roots for mark/sweep, since we can unload them.
118       //CodeCache::scavenge_root_nmethods_do(CodeBlobToOopClosure(&mark_and_push_closure));
119       AOTLoader::oops_do(&mark_and_push_closure);
120       break;
121 
122     default:
123       fatal("Unknown root type");
124   }
125 
126   // Do the real work
127   cm->follow_marking_stacks();
128 }
129 
130 
131 //
132 // RefProcTaskProxy
133 //
134 
135 void RefProcTaskProxy::do_it(GCTaskManager* manager, uint which)
136 {
137   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
138 
139   ParCompactionManager* cm =
140     ParCompactionManager::gc_thread_compaction_manager(which);
141   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
142   ParCompactionManager::FollowStackClosure follow_stack_closure(cm);
143   _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(),
144                 mark_and_push_closure, follow_stack_closure);
145 }
146 
147 //
148 // RefProcTaskExecutor
149 //
150 
151 void RefProcTaskExecutor::execute(ProcessTask& task, uint ergo_workers)
152 {
153   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
154   uint active_gc_threads = heap->gc_task_manager()->active_workers();
155   assert(active_gc_threads == ergo_workers,
156          "Ergonomically chosen workers (%u) must be equal to active workers (%u)",
157          ergo_workers, active_gc_threads);
158   OopTaskQueueSet* qset = ParCompactionManager::stack_array();
159   ParallelTaskTerminator terminator(active_gc_threads, qset);
160   GCTaskQueue* q = GCTaskQueue::create();
161   for(uint i=0; i<active_gc_threads; i++) {
162     q->enqueue(new RefProcTaskProxy(task, i));
163   }
164   if (task.marks_oops_alive() && (active_gc_threads>1)) {
165     for (uint j=0; j<active_gc_threads; j++) {
166       q->enqueue(new StealMarkingTask(&terminator));
167     }
168   }
169   PSParallelCompact::gc_task_manager()->execute_and_wait(q);
170 }
171 
172 //
173 // StealMarkingTask
174 //
175 
176 StealMarkingTask::StealMarkingTask(ParallelTaskTerminator* t) :
177   _terminator(t) {}
178 
179 void StealMarkingTask::do_it(GCTaskManager* manager, uint which) {
180   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
181 
182   ParCompactionManager* cm =
183     ParCompactionManager::gc_thread_compaction_manager(which);
184   ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
185 
186   oop obj = NULL;
187   ObjArrayTask task;
188   do {
189     while (ParCompactionManager::steal_objarray(which,  task)) {
190       cm->follow_contents((objArrayOop)task.obj(), task.index());
191       cm->follow_marking_stacks();
192     }
193     while (ParCompactionManager::steal(which, obj)) {
194       cm->follow_contents(obj);
195       cm->follow_marking_stacks();
196     }
197   } while (!terminator()->offer_termination());
198 }
199 
200 //
201 // CompactionWithStealingTask
202 //
203 
204 CompactionWithStealingTask::CompactionWithStealingTask(ParallelTaskTerminator* t):
205   _terminator(t) {}
206 
207 void CompactionWithStealingTask::do_it(GCTaskManager* manager, uint which) {
208   assert(ParallelScavengeHeap::heap()->is_gc_active(), "called outside gc");
209 
210   ParCompactionManager* cm =
211     ParCompactionManager::gc_thread_compaction_manager(which);
212 
213   // Drain the stacks that have been preloaded with regions
214   // that are ready to fill.
215 
216   cm->drain_region_stacks();
217 
218   guarantee(cm->region_stack()->is_empty(), "Not empty");
219 
220   size_t region_index = 0;
221 
222   while(true) {
223     if (ParCompactionManager::steal(which, region_index)) {
224       PSParallelCompact::fill_and_update_region(cm, region_index);
225       cm->drain_region_stacks();
226     } else {
227       if (terminator()->offer_termination()) {
228         break;
229       }
230       // Go around again.
231     }
232   }
233   return;
234 }
235 
236 UpdateDensePrefixTask::UpdateDensePrefixTask(
237                                    PSParallelCompact::SpaceId space_id,
238                                    size_t region_index_start,
239                                    size_t region_index_end) :
240   _space_id(space_id), _region_index_start(region_index_start),
241   _region_index_end(region_index_end) {}
242 
243 void UpdateDensePrefixTask::do_it(GCTaskManager* manager, uint which) {
244 
245   ParCompactionManager* cm =
246     ParCompactionManager::gc_thread_compaction_manager(which);
247 
248   PSParallelCompact::update_and_deadwood_in_dense_prefix(cm,
249                                                          _space_id,
250                                                          _region_index_start,
251                                                          _region_index_end);
252 }