1 /*
   2  * Copyright (c) 2005, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/parallel/gcTaskManager.hpp"
  28 #include "gc/parallel/objectStartArray.hpp"
  29 #include "gc/parallel/parMarkBitMap.inline.hpp"
  30 #include "gc/parallel/parallelScavengeHeap.hpp"
  31 #include "gc/parallel/psCompactionManager.inline.hpp"
  32 #include "gc/parallel/psOldGen.hpp"
  33 #include "gc/parallel/psParallelCompact.inline.hpp"
  34 #include "gc/shared/taskqueue.inline.hpp"
  35 #include "logging/log.hpp"
  36 #include "memory/iterator.inline.hpp"
  37 #include "oops/access.inline.hpp"
  38 #include "oops/compressedOops.inline.hpp"
  39 #include "oops/instanceKlass.inline.hpp"
  40 #include "oops/instanceMirrorKlass.inline.hpp"
  41 #include "oops/objArrayKlass.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "runtime/atomic.hpp"
  44 
  45 PSOldGen*            ParCompactionManager::_old_gen = NULL;
  46 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
  47 
  48 OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
  49 ParCompactionManager::ObjArrayTaskQueueSet*
  50   ParCompactionManager::_objarray_queues = NULL;
  51 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
  52 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
  53 RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
  54 
  55 ParCompactionManager::ParCompactionManager() :
  56     _action(CopyAndUpdate) {
  57 
  58   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  59 
  60   _old_gen = heap->old_gen();
  61   _start_array = old_gen()->start_array();
  62 
  63   marking_stack()->initialize();
  64   _objarray_stack.initialize();
  65   _region_stack.initialize();
  66 
  67   reset_bitmap_query_cache();
  68 }
  69 
  70 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
  71   assert(PSParallelCompact::gc_task_manager() != NULL,
  72     "Needed for initialization");
  73 
  74   _mark_bitmap = mbm;
  75 
  76   uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
  77 
  78   assert(_manager_array == NULL, "Attempt to initialize twice");
  79   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
  80   guarantee(_manager_array != NULL, "Could not allocate manager_array");
  81 
  82   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
  83   guarantee(_stack_array != NULL, "Could not allocate stack_array");
  84   _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
  85   guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
  86   _region_array = new RegionTaskQueueSet(parallel_gc_threads);
  87   guarantee(_region_array != NULL, "Could not allocate region_array");
  88 
  89   // Create and register the ParCompactionManager(s) for the worker threads.
  90   for(uint i=0; i<parallel_gc_threads; i++) {
  91     _manager_array[i] = new ParCompactionManager();
  92     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
  93     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
  94     _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
  95     region_array()->register_queue(i, _manager_array[i]->region_stack());
  96   }
  97 
  98   // The VMThread gets its own ParCompactionManager, which is not available
  99   // for work stealing.
 100   _manager_array[parallel_gc_threads] = new ParCompactionManager();
 101   guarantee(_manager_array[parallel_gc_threads] != NULL,
 102     "Could not create ParCompactionManager");
 103   assert(PSParallelCompact::gc_task_manager()->workers() != 0,
 104     "Not initialized?");
 105 }
 106 
 107 void ParCompactionManager::reset_all_bitmap_query_caches() {
 108   uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
 109   for (uint i=0; i<=parallel_gc_threads; i++) {
 110     _manager_array[i]->reset_bitmap_query_cache();
 111   }
 112 }
 113 
 114 bool ParCompactionManager::should_update() {
 115   assert(action() != NotValid, "Action is not set");
 116   return (action() == ParCompactionManager::Update) ||
 117          (action() == ParCompactionManager::CopyAndUpdate) ||
 118          (action() == ParCompactionManager::UpdateAndCopy);
 119 }
 120 
 121 bool ParCompactionManager::should_copy() {
 122   assert(action() != NotValid, "Action is not set");
 123   return (action() == ParCompactionManager::Copy) ||
 124          (action() == ParCompactionManager::CopyAndUpdate) ||
 125          (action() == ParCompactionManager::UpdateAndCopy);
 126 }
 127 
 128 ParCompactionManager*
 129 ParCompactionManager::gc_thread_compaction_manager(uint index) {
 130   assert(index < ParallelGCThreads, "index out of range");
 131   assert(_manager_array != NULL, "Sanity");
 132   return _manager_array[index];
 133 }
 134 
 135 void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 136   assert(obj != NULL, "can't follow the content of NULL object");
 137 
 138   cm->follow_klass(this);
 139   // Only mark the header and let the scan of the meta-data mark
 140   // everything else.
 141 
 142   ParCompactionManager::MarkAndPushClosure cl(cm);
 143   InstanceKlass::oop_oop_iterate_oop_maps<true>(obj, &cl);
 144 }
 145 
 146 void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 147   InstanceKlass::oop_pc_follow_contents(obj, cm);
 148 
 149   // Follow the klass field in the mirror.
 150   Klass* klass = java_lang_Class::as_Klass(obj);
 151   if (klass != NULL) {
 152     // An anonymous class doesn't have its own class loader, so the call
 153     // to follow_klass will mark and push its java mirror instead of the
 154     // class loader. When handling the java mirror for an anonymous class
 155     // we need to make sure its class loader data is claimed, this is done
 156     // by calling follow_class_loader explicitly. For non-anonymous classes
 157     // the call to follow_class_loader is made when the class loader itself
 158     // is handled.
 159     if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) {
 160       cm->follow_class_loader(klass->class_loader_data());
 161     } else {
 162       cm->follow_klass(klass);
 163     }
 164   } else {
 165     // If klass is NULL then this a mirror for a primitive type.
 166     // We don't have to follow them, since they are handled as strong
 167     // roots in Universe::oops_do.
 168     assert(java_lang_Class::is_primitive(obj), "Sanity check");
 169   }
 170 
 171   ParCompactionManager::MarkAndPushClosure cl(cm);
 172   oop_oop_iterate_statics<true>(obj, &cl);
 173 }
 174 
 175 void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 176   InstanceKlass::oop_pc_follow_contents(obj, cm);
 177 
 178   ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
 179   if (loader_data != NULL) {
 180     cm->follow_class_loader(loader_data);
 181   }
 182 }
 183 
 184 template <class T>
 185 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
 186   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
 187   T heap_oop = RawAccess<>::oop_load(referent_addr);
 188   log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
 189   if (!CompressedOops::is_null(heap_oop)) {
 190     oop referent = CompressedOops::decode_not_null(heap_oop);
 191     if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
 192         PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
 193       // reference already enqueued, referent will be traversed later
 194       klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
 195       log_develop_trace(gc, ref)("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
 196       return;
 197     } else {
 198       // treat referent as normal oop
 199       log_develop_trace(gc, ref)("       Non NULL normal " PTR_FORMAT, p2i(obj));
 200       cm->mark_and_push(referent_addr);
 201     }
 202   }
 203   T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
 204   // Treat discovered as normal oop, if ref is not "active",
 205   // i.e. if next is non-NULL.
 206   T  next_oop = RawAccess<>::oop_load(next_addr);
 207   if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
 208     T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
 209     log_develop_trace(gc, ref)("   Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
 210     cm->mark_and_push(discovered_addr);
 211   }
 212   cm->mark_and_push(next_addr);
 213   klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
 214 }
 215 
 216 
 217 void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 218   if (UseCompressedOops) {
 219     oop_pc_follow_contents_specialized<narrowOop>(this, obj, cm);
 220   } else {
 221     oop_pc_follow_contents_specialized<oop>(this, obj, cm);
 222   }
 223 }
 224 
 225 void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 226   cm->follow_klass(this);
 227 
 228   if (UseCompressedOops) {
 229     oop_pc_follow_contents_specialized<narrowOop>(objArrayOop(obj), 0, cm);
 230   } else {
 231     oop_pc_follow_contents_specialized<oop>(objArrayOop(obj), 0, cm);
 232   }
 233 }
 234 
 235 void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 236   assert(obj->is_typeArray(),"must be a type array");
 237   // Performance tweak: We skip iterating over the klass pointer since we
 238   // know that Universe::TypeArrayKlass never moves.
 239 }
 240 
 241 void ParCompactionManager::follow_marking_stacks() {
 242   do {
 243     // Drain the overflow stack first, to allow stealing from the marking stack.
 244     oop obj;
 245     while (marking_stack()->pop_overflow(obj)) {
 246       follow_contents(obj);
 247     }
 248     while (marking_stack()->pop_local(obj)) {
 249       follow_contents(obj);
 250     }
 251 
 252     // Process ObjArrays one at a time to avoid marking stack bloat.
 253     ObjArrayTask task;
 254     if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
 255       follow_contents((objArrayOop)task.obj(), task.index());
 256     }
 257   } while (!marking_stacks_empty());
 258 
 259   assert(marking_stacks_empty(), "Sanity");
 260 }
 261 
 262 void ParCompactionManager::drain_region_stacks() {
 263   do {
 264     // Drain overflow stack first so other threads can steal.
 265     size_t region_index;
 266     while (region_stack()->pop_overflow(region_index)) {
 267       PSParallelCompact::fill_and_update_region(this, region_index);
 268     }
 269 
 270     while (region_stack()->pop_local(region_index)) {
 271       PSParallelCompact::fill_and_update_region(this, region_index);
 272     }
 273   } while (!region_stack()->is_empty());
 274 }