1 /*
   2  * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
  29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
  30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  31 #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp"
  32 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  33 #include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp"
  34 #include "memory/iterator.inline.hpp"
  35 #include "oops/instanceKlass.inline.hpp"
  36 #include "oops/instanceMirrorKlass.inline.hpp"
  37 #include "oops/objArrayKlass.inline.hpp"
  38 #include "oops/oop.inline.hpp"
  39 #include "runtime/atomic.inline.hpp"
  40 #include "utilities/stack.inline.hpp"
  41 
  42 PSOldGen*            ParCompactionManager::_old_gen = NULL;
  43 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
  44 
  45 RegionTaskQueue**              ParCompactionManager::_region_list = NULL;
  46 
  47 OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
  48 ParCompactionManager::ObjArrayTaskQueueSet*
  49   ParCompactionManager::_objarray_queues = NULL;
  50 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
  51 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
  52 RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
  53 
  54 uint*                 ParCompactionManager::_recycled_stack_index = NULL;
  55 int                   ParCompactionManager::_recycled_top = -1;
  56 int                   ParCompactionManager::_recycled_bottom = -1;
  57 
  58 ParCompactionManager::ParCompactionManager() :
  59     _action(CopyAndUpdate),
  60     _region_stack(NULL),
  61     _region_stack_index((uint)max_uintx) {
  62 
  63   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  64 
  65   _old_gen = heap->old_gen();
  66   _start_array = old_gen()->start_array();
  67 
  68   marking_stack()->initialize();
  69   _objarray_stack.initialize();
  70 }
  71 
  72 ParCompactionManager::~ParCompactionManager() {
  73   delete _recycled_stack_index;
  74 }
  75 
  76 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
  77   assert(PSParallelCompact::gc_task_manager() != NULL,
  78     "Needed for initialization");
  79 
  80   _mark_bitmap = mbm;
  81 
  82   uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
  83 
  84   assert(_manager_array == NULL, "Attempt to initialize twice");
  85   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
  86   guarantee(_manager_array != NULL, "Could not allocate manager_array");
  87 
  88   _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
  89                          parallel_gc_threads+1, mtGC);
  90   guarantee(_region_list != NULL, "Could not initialize promotion manager");
  91 
  92   _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
  93 
  94   // parallel_gc-threads + 1 to be consistent with the number of
  95   // compaction managers.
  96   for(uint i=0; i<parallel_gc_threads + 1; i++) {
  97     _region_list[i] = new RegionTaskQueue();
  98     region_list(i)->initialize();
  99   }
 100 
 101   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
 102   guarantee(_stack_array != NULL, "Could not allocate stack_array");
 103   _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
 104   guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
 105   _region_array = new RegionTaskQueueSet(parallel_gc_threads);
 106   guarantee(_region_array != NULL, "Could not allocate region_array");
 107 
 108   // Create and register the ParCompactionManager(s) for the worker threads.
 109   for(uint i=0; i<parallel_gc_threads; i++) {
 110     _manager_array[i] = new ParCompactionManager();
 111     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
 112     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
 113     _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
 114     region_array()->register_queue(i, region_list(i));
 115   }
 116 
 117   // The VMThread gets its own ParCompactionManager, which is not available
 118   // for work stealing.
 119   _manager_array[parallel_gc_threads] = new ParCompactionManager();
 120   guarantee(_manager_array[parallel_gc_threads] != NULL,
 121     "Could not create ParCompactionManager");
 122   assert(PSParallelCompact::gc_task_manager()->workers() != 0,
 123     "Not initialized?");
 124 }
 125 
 126 int ParCompactionManager::pop_recycled_stack_index() {
 127   assert(_recycled_bottom <= _recycled_top, "list is empty");
 128   // Get the next available index
 129   if (_recycled_bottom < _recycled_top) {
 130     uint cur, next, last;
 131     do {
 132       cur = _recycled_bottom;
 133       next = cur + 1;
 134       last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
 135     } while (cur != last);
 136     return _recycled_stack_index[next];
 137   } else {
 138     return -1;
 139   }
 140 }
 141 
 142 void ParCompactionManager::push_recycled_stack_index(uint v) {
 143   // Get the next available index
 144   int cur = Atomic::add(1, &_recycled_top);
 145   _recycled_stack_index[cur] = v;
 146   assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
 147 }
 148 
 149 bool ParCompactionManager::should_update() {
 150   assert(action() != NotValid, "Action is not set");
 151   return (action() == ParCompactionManager::Update) ||
 152          (action() == ParCompactionManager::CopyAndUpdate) ||
 153          (action() == ParCompactionManager::UpdateAndCopy);
 154 }
 155 
 156 bool ParCompactionManager::should_copy() {
 157   assert(action() != NotValid, "Action is not set");
 158   return (action() == ParCompactionManager::Copy) ||
 159          (action() == ParCompactionManager::CopyAndUpdate) ||
 160          (action() == ParCompactionManager::UpdateAndCopy);
 161 }
 162 
 163 void ParCompactionManager::region_list_push(uint list_index,
 164                                             size_t region_index) {
 165   region_list(list_index)->push(region_index);
 166 }
 167 
 168 void ParCompactionManager::verify_region_list_empty(uint list_index) {
 169   assert(region_list(list_index)->is_empty(), "Not empty");
 170 }
 171 
 172 ParCompactionManager*
 173 ParCompactionManager::gc_thread_compaction_manager(int index) {
 174   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
 175   assert(_manager_array != NULL, "Sanity");
 176   return _manager_array[index];
 177 }
 178 
 179 void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 180   assert(obj != NULL, "can't follow the content of NULL object");
 181 
 182   cm->follow_klass(this);
 183   // Only mark the header and let the scan of the meta-data mark
 184   // everything else.
 185 
 186   ParCompactionManager::MarkAndPushClosure cl(cm);
 187   InstanceKlass::oop_oop_iterate_oop_maps<true>(obj, &cl);
 188 }
 189 
 190 void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 191   InstanceKlass::oop_pc_follow_contents(obj, cm);
 192 
 193   // Follow the klass field in the mirror.
 194   Klass* klass = java_lang_Class::as_Klass(obj);
 195   if (klass != NULL) {
 196     // An anonymous class doesn't have its own class loader, so the call
 197     // to follow_klass will mark and push its java mirror instead of the
 198     // class loader. When handling the java mirror for an anonymous class
 199     // we need to make sure its class loader data is claimed, this is done
 200     // by calling follow_class_loader explicitly. For non-anonymous classes
 201     // the call to follow_class_loader is made when the class loader itself
 202     // is handled.
 203     if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) {
 204       PSParallelCompact::follow_class_loader(cm, klass->class_loader_data());
 205     } else {
 206       cm->follow_klass(klass);
 207     }
 208   } else {
 209     // If klass is NULL then this a mirror for a primitive type.
 210     // We don't have to follow them, since they are handled as strong
 211     // roots in Universe::oops_do.
 212     assert(java_lang_Class::is_primitive(obj), "Sanity check");
 213   }
 214 
 215   ParCompactionManager::MarkAndPushClosure cl(cm);
 216   oop_oop_iterate_statics<true>(obj, &cl);
 217 }
 218 
 219 void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 220   InstanceKlass::oop_pc_follow_contents(obj, cm);
 221 
 222   ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj);
 223   if (loader_data != NULL) {
 224     PSParallelCompact::follow_class_loader(cm, loader_data);
 225   }
 226 }
 227 
 228 template <class T>
 229 static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
 230   T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj);
 231   T heap_oop = oopDesc::load_heap_oop(referent_addr);
 232   debug_only(
 233     if(TraceReferenceGC && PrintGCDetails) {
 234       gclog_or_tty->print_cr("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
 235     }
 236   )
 237   if (!oopDesc::is_null(heap_oop)) {
 238     oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
 239     if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
 240         PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
 241       // reference already enqueued, referent will be traversed later
 242       klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
 243       debug_only(
 244         if(TraceReferenceGC && PrintGCDetails) {
 245           gclog_or_tty->print_cr("       Non NULL enqueued " PTR_FORMAT, p2i(obj));
 246         }
 247       )
 248       return;
 249     } else {
 250       // treat referent as normal oop
 251       debug_only(
 252         if(TraceReferenceGC && PrintGCDetails) {
 253           gclog_or_tty->print_cr("       Non NULL normal " PTR_FORMAT, p2i(obj));
 254         }
 255       )
 256       cm->mark_and_push(referent_addr);
 257     }
 258   }
 259   T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj);
 260   if (ReferenceProcessor::pending_list_uses_discovered_field()) {
 261     // Treat discovered as normal oop, if ref is not "active",
 262     // i.e. if next is non-NULL.
 263     T  next_oop = oopDesc::load_heap_oop(next_addr);
 264     if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
 265       T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj);
 266       debug_only(
 267         if(TraceReferenceGC && PrintGCDetails) {
 268           gclog_or_tty->print_cr("   Process discovered as normal "
 269                                  PTR_FORMAT, p2i(discovered_addr));
 270         }
 271       )
 272       cm->mark_and_push(discovered_addr);
 273     }
 274   } else {
 275 #ifdef ASSERT
 276     // In the case of older JDKs which do not use the discovered
 277     // field for the pending list, an inactive ref (next != NULL)
 278     // must always have a NULL discovered field.
 279     T next = oopDesc::load_heap_oop(next_addr);
 280     oop discovered = java_lang_ref_Reference::discovered(obj);
 281     assert(oopDesc::is_null(next) || oopDesc::is_null(discovered),
 282            err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field",
 283                    p2i(obj)));
 284 #endif
 285   }
 286   cm->mark_and_push(next_addr);
 287   klass->InstanceKlass::oop_pc_follow_contents(obj, cm);
 288 }
 289 
 290 
 291 void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 292   if (UseCompressedOops) {
 293     oop_pc_follow_contents_specialized<narrowOop>(this, obj, cm);
 294   } else {
 295     oop_pc_follow_contents_specialized<oop>(this, obj, cm);
 296   }
 297 }
 298 
 299 void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 300   cm->follow_klass(this);
 301 
 302   if (UseCompressedOops) {
 303     oop_pc_follow_contents_specialized<narrowOop>(objArrayOop(obj), 0, cm);
 304   } else {
 305     oop_pc_follow_contents_specialized<oop>(objArrayOop(obj), 0, cm);
 306   }
 307 }
 308 
 309 void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) {
 310   assert(obj->is_typeArray(),"must be a type array");
 311   // Performance tweak: We skip iterating over the klass pointer since we
 312   // know that Universe::TypeArrayKlass never moves.
 313 }
 314 
 315 void ParCompactionManager::follow_marking_stacks() {
 316   do {
 317     // Drain the overflow stack first, to allow stealing from the marking stack.
 318     oop obj;
 319     while (marking_stack()->pop_overflow(obj)) {
 320       follow_contents(obj);
 321     }
 322     while (marking_stack()->pop_local(obj)) {
 323       follow_contents(obj);
 324     }
 325 
 326     // Process ObjArrays one at a time to avoid marking stack bloat.
 327     ObjArrayTask task;
 328     if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
 329       follow_contents((objArrayOop)task.obj(), task.index());
 330     }
 331   } while (!marking_stacks_empty());
 332 
 333   assert(marking_stacks_empty(), "Sanity");
 334 }
 335 
 336 void ParCompactionManager::drain_region_stacks() {
 337   do {
 338     // Drain overflow stack first so other threads can steal.
 339     size_t region_index;
 340     while (region_stack()->pop_overflow(region_index)) {
 341       PSParallelCompact::fill_and_update_region(this, region_index);
 342     }
 343 
 344     while (region_stack()->pop_local(region_index)) {
 345       PSParallelCompact::fill_and_update_region(this, region_index);
 346     }
 347   } while (!region_stack()->is_empty());
 348 }