1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc/parallel/objectStartArray.hpp"
  28 #include "gc/parallel/parMarkBitMap.inline.hpp"
  29 #include "gc/parallel/parallelScavengeHeap.hpp"
  30 #include "gc/parallel/psCompactionManager.inline.hpp"
  31 #include "gc/parallel/psOldGen.hpp"
  32 #include "gc/parallel/psParallelCompact.inline.hpp"
  33 #include "gc/shared/taskqueue.inline.hpp"
  34 #include "logging/log.hpp"
  35 #include "memory/iterator.inline.hpp"
  36 #include "oops/access.inline.hpp"
  37 #include "oops/compressedOops.inline.hpp"
  38 #include "oops/instanceKlass.inline.hpp"
  39 #include "oops/instanceMirrorKlass.inline.hpp"
  40 #include "oops/objArrayKlass.inline.hpp"
  41 #include "oops/oop.inline.hpp"
  42 #include "runtime/atomic.hpp"
  43 
  44 PSOldGen*            ParCompactionManager::_old_gen = NULL;
  45 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
  46 
  47 OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
  48 ParCompactionManager::ObjArrayTaskQueueSet*
  49   ParCompactionManager::_objarray_queues = NULL;
  50 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
  51 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
  52 RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
  53 GrowableArray<size_t >* ParCompactionManager::_shadow_region_array = NULL;
  54 Monitor*                ParCompactionManager::_shadow_region_monitor = NULL;
  55 
  56 ParCompactionManager::ParCompactionManager() :
  57     _action(CopyAndUpdate) {
  58 
  59   ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  60 
  61   _old_gen = heap->old_gen();
  62   _start_array = old_gen()->start_array();
  63 
  64   marking_stack()->initialize();
  65   _objarray_stack.initialize();
  66   _region_stack.initialize();
  67 
  68   reset_bitmap_query_cache();
  69 }
  70 
  71 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
  72   assert(ParallelScavengeHeap::heap() != NULL,
  73     "Needed for initialization");
  74 
  75   _mark_bitmap = mbm;
  76 
  77   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
  78 
  79   assert(_manager_array == NULL, "Attempt to initialize twice");
  80   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
  81 
  82   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
  83   guarantee(_stack_array != NULL, "Could not allocate stack_array");
  84   _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
  85   guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
  86   _region_array = new RegionTaskQueueSet(parallel_gc_threads);
  87   guarantee(_region_array != NULL, "Could not allocate region_array");
  88 
  89   // Create and register the ParCompactionManager(s) for the worker threads.
  90   for(uint i=0; i<parallel_gc_threads; i++) {
  91     _manager_array[i] = new ParCompactionManager();
  92     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
  93     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
  94     _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
  95     region_array()->register_queue(i, _manager_array[i]->region_stack());
  96   }
  97 
  98   // The VMThread gets its own ParCompactionManager, which is not available
  99   // for work stealing.
 100   _manager_array[parallel_gc_threads] = new ParCompactionManager();
 101   guarantee(_manager_array[parallel_gc_threads] != NULL,
 102     "Could not create ParCompactionManager");
 103   assert(ParallelScavengeHeap::heap()->workers().total_workers() != 0,
 104     "Not initialized?");
 105 
 106   _shadow_region_array = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<size_t >(10, true);
 107   guarantee(_shadow_region_array != NULL, "Could not allocate shadow_region_array");
 108 
 109   _shadow_region_monitor = new Monitor(Mutex::barrier, "CompactionManager monitor",
 110                                        Mutex::_allow_vm_block_flag, Monitor::_safepoint_check_never);
 111   guarantee(_shadow_region_monitor != NULL, "Could not allocate shadow_region_monitor");
 112 }
 113 
 114 void ParCompactionManager::reset_all_bitmap_query_caches() {
 115   uint parallel_gc_threads = ParallelScavengeHeap::heap()->workers().total_workers();
 116   for (uint i=0; i<=parallel_gc_threads; i++) {
 117     _manager_array[i]->reset_bitmap_query_cache();
 118   }
 119 }
 120 
 121 bool ParCompactionManager::should_update() {
 122   assert(action() != NotValid, "Action is not set");
 123   return (action() == ParCompactionManager::Update) ||
 124          (action() == ParCompactionManager::CopyAndUpdate) ||
 125          (action() == ParCompactionManager::UpdateAndCopy);
 126 }
 127 
 128 bool ParCompactionManager::should_copy() {
 129   assert(action() != NotValid, "Action is not set");
 130   return (action() == ParCompactionManager::Copy) ||
 131          (action() == ParCompactionManager::CopyAndUpdate) ||
 132          (action() == ParCompactionManager::UpdateAndCopy);
 133 }
 134 
 135 ParCompactionManager*
 136 ParCompactionManager::gc_thread_compaction_manager(uint index) {
 137   assert(index < ParallelGCThreads, "index out of range");
 138   assert(_manager_array != NULL, "Sanity");
 139   return _manager_array[index];
 140 }
 141 
 142 void ParCompactionManager::follow_marking_stacks() {
 143   do {
 144     // Drain the overflow stack first, to allow stealing from the marking stack.
 145     oop obj;
 146     while (marking_stack()->pop_overflow(obj)) {
 147       follow_contents(obj);
 148     }
 149     while (marking_stack()->pop_local(obj)) {
 150       follow_contents(obj);
 151     }
 152 
 153     // Process ObjArrays one at a time to avoid marking stack bloat.
 154     ObjArrayTask task;
 155     if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
 156       follow_array((objArrayOop)task.obj(), task.index());
 157     }
 158   } while (!marking_stacks_empty());
 159 
 160   assert(marking_stacks_empty(), "Sanity");
 161 }
 162 
 163 void ParCompactionManager::drain_region_stacks() {
 164   do {
 165     // Drain overflow stack first so other threads can steal.
 166     size_t region_index;
 167     while (region_stack()->pop_overflow(region_index)) {
 168       PSParallelCompact::fill_and_update_region(this, region_index);
 169     }
 170 
 171     while (region_stack()->pop_local(region_index)) {
 172       PSParallelCompact::fill_and_update_region(this, region_index);
 173     }
 174   } while (!region_stack()->is_empty());
 175 }
 176 
 177 size_t ParCompactionManager::acquire_shadow_region(PSParallelCompact::RegionData* region_ptr) {
 178   while (true) {
 179     MutexLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
 180     if (_shadow_region_array->is_empty()) {
 181       // The corresponding heap region may be available now,
 182       // so we don't need to acquire a shadow region anymore,
 183       // and we return 0 to indicate this case
 184       if (region_ptr->claimed()) {
 185         return 0;
 186       }
 187     } else {
 188       return _shadow_region_array->pop();
 189     }
 190   }
 191 }
 192 
 193 void ParCompactionManager::release_shadow_region(size_t shadow_region) {
 194   MutexLocker ml(_shadow_region_monitor, Mutex::_no_safepoint_check_flag);
 195   _shadow_region_array->append(shadow_region);
 196 }
 197 
 198 void ParCompactionManager::enqueue_shadow_region(size_t shadow_region) {
 199   _shadow_region_array->append(shadow_region);
 200 }
 201 
 202 void ParCompactionManager::dequeue_shadow_region() {
 203   _shadow_region_array->clear();
 204 }