1 /*
   2  * Copyright (c) 2005, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/systemDictionary.hpp"
  27 #include "gc_implementation/parallelScavenge/gcTaskManager.hpp"
  28 #include "gc_implementation/parallelScavenge/objectStartArray.hpp"
  29 #include "gc_implementation/parallelScavenge/parMarkBitMap.hpp"
  30 #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp"
  31 #include "gc_implementation/parallelScavenge/psCompactionManager.hpp"
  32 #include "gc_implementation/parallelScavenge/psOldGen.hpp"
  33 #include "gc_implementation/parallelScavenge/psParallelCompact.hpp"
  34 #include "oops/objArrayKlass.inline.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "oops/oop.pcgc.inline.hpp"
  37 #include "runtime/atomic.inline.hpp"
  38 #include "utilities/stack.inline.hpp"
  39 
  40 PSOldGen*            ParCompactionManager::_old_gen = NULL;
  41 ParCompactionManager**  ParCompactionManager::_manager_array = NULL;
  42 
  43 RegionTaskQueue**              ParCompactionManager::_region_list = NULL;
  44 
  45 OopTaskQueueSet*     ParCompactionManager::_stack_array = NULL;
  46 ParCompactionManager::ObjArrayTaskQueueSet*
  47   ParCompactionManager::_objarray_queues = NULL;
  48 ObjectStartArray*    ParCompactionManager::_start_array = NULL;
  49 ParMarkBitMap*       ParCompactionManager::_mark_bitmap = NULL;
  50 RegionTaskQueueSet*  ParCompactionManager::_region_array = NULL;
  51 
  52 uint*                 ParCompactionManager::_recycled_stack_index = NULL;
  53 int                   ParCompactionManager::_recycled_top = -1;
  54 int                   ParCompactionManager::_recycled_bottom = -1;
  55 
  56 ParCompactionManager::ParCompactionManager() :
  57     _action(CopyAndUpdate),
  58     _region_stack(NULL),
  59     _region_stack_index((uint)max_uintx) {
  60 
  61   ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  62   assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  63 
  64   _old_gen = heap->old_gen();
  65   _start_array = old_gen()->start_array();
  66 
  67   marking_stack()->initialize();
  68   _objarray_stack.initialize();
  69 }
  70 
  71 ParCompactionManager::~ParCompactionManager() {
  72   delete _recycled_stack_index;
  73 }
  74 
  75 void ParCompactionManager::initialize(ParMarkBitMap* mbm) {
  76   assert(PSParallelCompact::gc_task_manager() != NULL,
  77     "Needed for initialization");
  78 
  79   _mark_bitmap = mbm;
  80 
  81   uint parallel_gc_threads = PSParallelCompact::gc_task_manager()->workers();
  82 
  83   assert(_manager_array == NULL, "Attempt to initialize twice");
  84   _manager_array = NEW_C_HEAP_ARRAY(ParCompactionManager*, parallel_gc_threads+1, mtGC);
  85   guarantee(_manager_array != NULL, "Could not allocate manager_array");
  86 
  87   _region_list = NEW_C_HEAP_ARRAY(RegionTaskQueue*,
  88                          parallel_gc_threads+1, mtGC);
  89   guarantee(_region_list != NULL, "Could not initialize promotion manager");
  90 
  91   _recycled_stack_index = NEW_C_HEAP_ARRAY(uint, parallel_gc_threads, mtGC);
  92 
  93   // parallel_gc-threads + 1 to be consistent with the number of
  94   // compaction managers.
  95   for(uint i=0; i<parallel_gc_threads + 1; i++) {
  96     _region_list[i] = new RegionTaskQueue();
  97     region_list(i)->initialize();
  98   }
  99 
 100   _stack_array = new OopTaskQueueSet(parallel_gc_threads);
 101   guarantee(_stack_array != NULL, "Could not allocate stack_array");
 102   _objarray_queues = new ObjArrayTaskQueueSet(parallel_gc_threads);
 103   guarantee(_objarray_queues != NULL, "Could not allocate objarray_queues");
 104   _region_array = new RegionTaskQueueSet(parallel_gc_threads);
 105   guarantee(_region_array != NULL, "Could not allocate region_array");
 106 
 107   // Create and register the ParCompactionManager(s) for the worker threads.
 108   for(uint i=0; i<parallel_gc_threads; i++) {
 109     _manager_array[i] = new ParCompactionManager();
 110     guarantee(_manager_array[i] != NULL, "Could not create ParCompactionManager");
 111     stack_array()->register_queue(i, _manager_array[i]->marking_stack());
 112     _objarray_queues->register_queue(i, &_manager_array[i]->_objarray_stack);
 113     region_array()->register_queue(i, region_list(i));
 114   }
 115 
 116   // The VMThread gets its own ParCompactionManager, which is not available
 117   // for work stealing.
 118   _manager_array[parallel_gc_threads] = new ParCompactionManager();
 119   guarantee(_manager_array[parallel_gc_threads] != NULL,
 120     "Could not create ParCompactionManager");
 121   assert(PSParallelCompact::gc_task_manager()->workers() != 0,
 122     "Not initialized?");
 123 }
 124 
 125 int ParCompactionManager::pop_recycled_stack_index() {
 126   assert(_recycled_bottom <= _recycled_top, "list is empty");
 127   // Get the next available index
 128   if (_recycled_bottom < _recycled_top) {
 129     uint cur, next, last;
 130     do {
 131       cur = _recycled_bottom;
 132       next = cur + 1;
 133       last = Atomic::cmpxchg(next, &_recycled_bottom, cur);
 134     } while (cur != last);
 135     return _recycled_stack_index[next];
 136   } else {
 137     return -1;
 138   }
 139 }
 140 
 141 void ParCompactionManager::push_recycled_stack_index(uint v) {
 142   // Get the next available index
 143   int cur = Atomic::add(1, &_recycled_top);
 144   _recycled_stack_index[cur] = v;
 145   assert(_recycled_bottom <= _recycled_top, "list top and bottom are wrong");
 146 }
 147 
 148 bool ParCompactionManager::should_update() {
 149   assert(action() != NotValid, "Action is not set");
 150   return (action() == ParCompactionManager::Update) ||
 151          (action() == ParCompactionManager::CopyAndUpdate) ||
 152          (action() == ParCompactionManager::UpdateAndCopy);
 153 }
 154 
 155 bool ParCompactionManager::should_copy() {
 156   assert(action() != NotValid, "Action is not set");
 157   return (action() == ParCompactionManager::Copy) ||
 158          (action() == ParCompactionManager::CopyAndUpdate) ||
 159          (action() == ParCompactionManager::UpdateAndCopy);
 160 }
 161 
 162 void ParCompactionManager::region_list_push(uint list_index,
 163                                             size_t region_index) {
 164   region_list(list_index)->push(region_index);
 165 }
 166 
 167 void ParCompactionManager::verify_region_list_empty(uint list_index) {
 168   assert(region_list(list_index)->is_empty(), "Not empty");
 169 }
 170 
 171 ParCompactionManager*
 172 ParCompactionManager::gc_thread_compaction_manager(int index) {
 173   assert(index >= 0 && index < (int)ParallelGCThreads, "index out of range");
 174   assert(_manager_array != NULL, "Sanity");
 175   return _manager_array[index];
 176 }
 177 
 178 void ParCompactionManager::follow_marking_stacks() {
 179   do {
 180     // Drain the overflow stack first, to allow stealing from the marking stack.
 181     oop obj;
 182     while (marking_stack()->pop_overflow(obj)) {
 183       obj->follow_contents(this);
 184     }
 185     while (marking_stack()->pop_local(obj)) {
 186       obj->follow_contents(this);
 187     }
 188 
 189     // Process ObjArrays one at a time to avoid marking stack bloat.
 190     ObjArrayTask task;
 191     if (_objarray_stack.pop_overflow(task) || _objarray_stack.pop_local(task)) {
 192       ObjArrayKlass* k = (ObjArrayKlass*)task.obj()->klass();
 193       k->oop_follow_contents(this, task.obj(), task.index());
 194     }
 195   } while (!marking_stacks_empty());
 196 
 197   assert(marking_stacks_empty(), "Sanity");
 198 }
 199 
 200 void ParCompactionManager::drain_region_stacks() {
 201   do {
 202     // Drain overflow stack first so other threads can steal.
 203     size_t region_index;
 204     while (region_stack()->pop_overflow(region_index)) {
 205       PSParallelCompact::fill_and_update_region(this, region_index);
 206     }
 207 
 208     while (region_stack()->pop_local(region_index)) {
 209       PSParallelCompact::fill_and_update_region(this, region_index);
 210     }
 211   } while (!region_stack()->is_empty());
 212 }