1 /*
   2  * Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP
  26 #define SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP
  27 
  28 #include "gc/shared/taskqueue.hpp"
  29 #include "memory/allocation.hpp"
  30 #include "utilities/stack.hpp"
  31 
  32 class MutableSpace;
  33 class PSOldGen;
  34 class ParCompactionManager;
  35 class ObjectStartArray;
  36 class ParallelCompactData;
  37 class ParMarkBitMap;
  38 
  39 class ParCompactionManager : public CHeapObj<mtGC> {
  40   friend class ParallelTaskTerminator;
  41   friend class ParMarkBitMap;
  42   friend class PSParallelCompact;
  43   friend class StealRegionCompactionTask;
  44   friend class UpdateAndFillClosure;
  45   friend class RefProcTaskExecutor;
  46   friend class IdleGCTask;
  47 
  48  public:
  49 
  50 // ------------------------  Don't putback if not needed
  51   // Actions that the compaction manager should take.
  52   enum Action {
  53     Update,
  54     Copy,
  55     UpdateAndCopy,
  56     CopyAndUpdate,
  57     NotValid
  58   };
  59 // ------------------------  End don't putback if not needed
  60 
  61  private:
  62   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
  63   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
  64   typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
  65   typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC>      ObjArrayTaskQueueSet;
  66   #undef QUEUE_SIZE
  67 
  68   static ParCompactionManager** _manager_array;
  69   static OopTaskQueueSet*       _stack_array;
  70   static ObjArrayTaskQueueSet*  _objarray_queues;
  71   static ObjectStartArray*      _start_array;
  72   static RegionTaskQueueSet*    _region_array;
  73   static PSOldGen*              _old_gen;
  74 
  75 private:
  76   OverflowTaskQueue<oop, mtGC>        _marking_stack;
  77   ObjArrayTaskQueue             _objarray_stack;
  78 
  79   // Is there a way to reuse the _marking_stack for the
  80   // saving empty regions?  For now just create a different
  81   // type of TaskQueue.
  82   RegionTaskQueue*             _region_stack;
  83 
  84   static RegionTaskQueue**     _region_list;
  85   // Index in _region_list for current _region_stack.
  86   uint _region_stack_index;
  87 
  88   // Indexes of recycled region stacks/overflow stacks
  89   // Stacks of regions to be compacted are embedded in the tasks doing
  90   // the compaction.  A thread that executes the task extracts the
  91   // region stack and drains it.  These threads keep these region
  92   // stacks for use during compaction task stealing.  If a thread
  93   // gets a second draining task, it pushed its current region stack
  94   // index into the array _recycled_stack_index and gets a new
  95   // region stack from the task.  A thread that is executing a
  96   // compaction stealing task without ever having executing a
  97   // draining task, will get a region stack from _recycled_stack_index.
  98   //
  99   // Array of indexes into the array of region stacks.
 100   static uint*                    _recycled_stack_index;
 101   // The index into _recycled_stack_index of the last region stack index
 102   // pushed.  If -1, there are no entries into _recycled_stack_index.
 103   static int                      _recycled_top;
 104   // The index into _recycled_stack_index of the last region stack index
 105   // popped.  If -1, there has not been any entry popped.
 106   static int                      _recycled_bottom;
 107 
 108   static ParMarkBitMap* _mark_bitmap;
 109 
 110   Action _action;
 111 
 112   HeapWord* _last_query_beg;
 113   oop _last_query_obj;
 114   size_t _last_query_ret;
 115 
 116   static PSOldGen* old_gen()             { return _old_gen; }
 117   static ObjectStartArray* start_array() { return _start_array; }
 118   static OopTaskQueueSet* stack_array()  { return _stack_array; }
 119 
 120   static void initialize(ParMarkBitMap* mbm);
 121 
 122  protected:
 123   // Array of tasks.  Needed by the ParallelTaskTerminator.
 124   static RegionTaskQueueSet* region_array()      { return _region_array; }
 125   OverflowTaskQueue<oop, mtGC>*  marking_stack()       { return &_marking_stack; }
 126 
 127   // Pushes onto the marking stack.  If the marking stack is full,
 128   // pushes onto the overflow stack.
 129   void stack_push(oop obj);
 130   // Do not implement an equivalent stack_pop.  Deal with the
 131   // marking stack and overflow stack directly.
 132 
 133  public:
 134   void reset_bitmap_query_cache() {
 135     _last_query_beg = NULL;
 136     _last_query_obj = NULL;
 137     _last_query_ret = 0;
 138   }
 139 
 140   Action action() { return _action; }
 141   void set_action(Action v) { _action = v; }
 142 
 143   // Bitmap query support, cache last query and result
 144   HeapWord* last_query_begin() { return _last_query_beg; }
 145   oop last_query_object() { return _last_query_obj; }
 146   size_t last_query_return() { return _last_query_ret; }
 147 
 148   void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
 149   void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
 150   void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
 151 
 152   static void reset_all_bitmap_query_caches();
 153 
 154   RegionTaskQueue* region_stack()                { return _region_stack; }
 155   void set_region_stack(RegionTaskQueue* v)       { _region_stack = v; }
 156 
 157   inline static ParCompactionManager* manager_array(uint index);
 158 
 159   inline static RegionTaskQueue* region_list(int index) {
 160     return _region_list[index];
 161   }
 162 
 163   uint region_stack_index() { return _region_stack_index; }
 164   void set_region_stack_index(uint v) { _region_stack_index = v; }
 165 
 166   // Pop and push unique reusable stack index
 167   static int pop_recycled_stack_index();
 168   static void push_recycled_stack_index(uint v);
 169   static void reset_recycled_stack_index() {
 170     _recycled_bottom = _recycled_top = -1;
 171   }
 172 
 173   ParCompactionManager();
 174   ~ParCompactionManager();
 175 
 176   // Pushes onto the region stack at the given index.  If the
 177   // region stack is full,
 178   // pushes onto the region overflow stack.
 179   static void region_list_push(uint stack_index, size_t region_index);
 180   static void verify_region_list_empty(uint stack_index);
 181   ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
 182 
 183   // void drain_stacks();
 184 
 185   bool should_update();
 186   bool should_copy();
 187 
 188   // Save for later processing.  Must not fail.
 189   inline void push(oop obj);
 190   inline void push_objarray(oop objarray, size_t index);
 191   inline void push_region(size_t index);
 192 
 193   // Check mark and maybe push on marking stack.
 194   template <typename T> inline void mark_and_push(T* p);
 195 
 196   inline void follow_klass(Klass* klass);
 197 
 198   void follow_class_loader(ClassLoaderData* klass);
 199 
 200   // Access function for compaction managers
 201   static ParCompactionManager* gc_thread_compaction_manager(uint index);
 202 
 203   static bool steal(int queue_num, int* seed, oop& t);
 204   static bool steal_objarray(int queue_num, int* seed, ObjArrayTask& t);
 205   static bool steal(int queue_num, int* seed, size_t& region);
 206 
 207   // Process tasks remaining on any marking stack
 208   void follow_marking_stacks();
 209   inline bool marking_stacks_empty() const;
 210 
 211   // Process tasks remaining on any stack
 212   void drain_region_stacks();
 213 
 214   void follow_contents(oop obj);
 215   void follow_contents(objArrayOop array, int index);
 216 
 217   void update_contents(oop obj);
 218 
 219   class MarkAndPushClosure: public ExtendedOopClosure {
 220    private:
 221     ParCompactionManager* _compaction_manager;
 222    public:
 223     MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
 224 
 225     template <typename T> void do_oop_nv(T* p);
 226     virtual void do_oop(oop* p);
 227     virtual void do_oop(narrowOop* p);
 228 
 229     // This closure provides its own oop verification code.
 230     debug_only(virtual bool should_verify_oops() { return false; })
 231   };
 232 
 233   class FollowStackClosure: public VoidClosure {
 234    private:
 235     ParCompactionManager* _compaction_manager;
 236    public:
 237     FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
 238     virtual void do_void();
 239   };
 240 
 241   // The one and only place to start following the classes.
 242   // Should only be applied to the ClassLoaderData klasses list.
 243   class FollowKlassClosure : public KlassClosure {
 244    private:
 245     MarkAndPushClosure* _mark_and_push_closure;
 246    public:
 247     FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
 248         _mark_and_push_closure(mark_and_push_closure) { }
 249     void do_klass(Klass* klass);
 250   };
 251 };
 252 
 253 inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
 254   assert(_manager_array != NULL, "access of NULL manager_array");
 255   assert(index <= ParallelGCThreads, "out of range manager_array access");
 256   return _manager_array[index];
 257 }
 258 
 259 bool ParCompactionManager::marking_stacks_empty() const {
 260   return _marking_stack.is_empty() && _objarray_stack.is_empty();
 261 }
 262 
 263 #endif // SHARE_VM_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP