1 /*
   2  * Copyright (c) 2005, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP
  26 #define SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP
  27 
  28 #include "gc/parallel/psParallelCompact.hpp"
  29 #include "gc/shared/taskqueue.hpp"
  30 #include "memory/allocation.hpp"
  31 #include "utilities/stack.hpp"
  32 
  33 class MutableSpace;
  34 class PSOldGen;
  35 class ParCompactionManager;
  36 class ObjectStartArray;
  37 class ParallelCompactData;
  38 class ParMarkBitMap;
  39 
  40 class ParCompactionManager : public CHeapObj<mtGC> {
  41   friend class ParallelTaskTerminator;
  42   friend class ParMarkBitMap;
  43   friend class PSParallelCompact;
  44   friend class CompactionWithStealingTask;
  45   friend class UpdateAndFillClosure;
  46   friend class RefProcTaskExecutor;
  47   friend class PCRefProcTask;
  48   friend class MarkFromRootsTask;
  49   friend class UpdateDensePrefixAndCompactionTask;
  50 
  51  public:
  52 
  53 // ------------------------  Don't putback if not needed
  54   // Actions that the compaction manager should take.
  55   enum Action {
  56     Update,
  57     Copy,
  58     UpdateAndCopy,
  59     CopyAndUpdate,
  60     NotValid
  61   };
  62 // ------------------------  End don't putback if not needed
  63 
  64  private:
  65   // 32-bit:  4K * 8 = 32KiB; 64-bit:  8K * 16 = 128KiB
  66   #define QUEUE_SIZE (1 << NOT_LP64(12) LP64_ONLY(13))
  67   typedef OverflowTaskQueue<ObjArrayTask, mtGC, QUEUE_SIZE> ObjArrayTaskQueue;
  68   typedef GenericTaskQueueSet<ObjArrayTaskQueue, mtGC>      ObjArrayTaskQueueSet;
  69   #undef QUEUE_SIZE
  70 
  71   static ParCompactionManager** _manager_array;
  72   static OopTaskQueueSet*       _stack_array;
  73   static ObjArrayTaskQueueSet*  _objarray_queues;
  74   static ObjectStartArray*      _start_array;
  75   static RegionTaskQueueSet*    _region_array;
  76   static PSOldGen*              _old_gen;
  77 
  78 private:
  79   OverflowTaskQueue<oop, mtGC>        _marking_stack;
  80   ObjArrayTaskQueue             _objarray_stack;
  81   size_t                        _shadow_record;
  82 
  83   // Is there a way to reuse the _marking_stack for the
  84   // saving empty regions?  For now just create a different
  85   // type of TaskQueue.
  86   RegionTaskQueue              _region_stack;
  87 
  88   static ParMarkBitMap* _mark_bitmap;
  89 
  90   // The shadow region array, we use it in a LIFO fashion, so
  91   // that we can reuse shadow regions for better data locality
  92   // and utilization
  93   static GrowableArray<size_t>* _shadow_region_array;
  94 
  95   // This Monitor provides mutual exclusive access of _shadow_region_array
  96   static Monitor*               _shadow_region_monitor;
  97 
  98   Action _action;
  99 
 100   HeapWord* _last_query_beg;
 101   oop _last_query_obj;
 102   size_t _last_query_ret;
 103 
 104   static PSOldGen* old_gen()             { return _old_gen; }
 105   static ObjectStartArray* start_array() { return _start_array; }
 106   static OopTaskQueueSet* stack_array()  { return _stack_array; }
 107 
 108   static void initialize(ParMarkBitMap* mbm);
 109 
 110  protected:
 111   // Array of tasks.  Needed by the ParallelTaskTerminator.
 112   static RegionTaskQueueSet* region_array()      { return _region_array; }
 113   OverflowTaskQueue<oop, mtGC>*  marking_stack()       { return &_marking_stack; }
 114 
 115   // Pushes onto the marking stack.  If the marking stack is full,
 116   // pushes onto the overflow stack.
 117   void stack_push(oop obj);
 118   // Do not implement an equivalent stack_pop.  Deal with the
 119   // marking stack and overflow stack directly.
 120 
 121  public:
 122   static size_t acquire_shadow_region(PSParallelCompact::RegionData* region_ptr);
 123   static void release_shadow_region(size_t shadow_region);
 124 
 125   static void add_shadow_region(size_t shadow_region);
 126   static void remove_all_shadow_regions();
 127 
 128   inline size_t shadow_record() { return _shadow_record; }
 129   inline void set_shadow_record(size_t record) { _shadow_record = record; }
 130   inline size_t next_shadow_record(size_t workers) { _shadow_record += workers; return shadow_record(); }
 131 
 132   void reset_bitmap_query_cache() {
 133     _last_query_beg = NULL;
 134     _last_query_obj = NULL;
 135     _last_query_ret = 0;
 136   }
 137 
 138   Action action() { return _action; }
 139   void set_action(Action v) { _action = v; }
 140 
 141   // Bitmap query support, cache last query and result
 142   HeapWord* last_query_begin() { return _last_query_beg; }
 143   oop last_query_object() { return _last_query_obj; }
 144   size_t last_query_return() { return _last_query_ret; }
 145 
 146   void set_last_query_begin(HeapWord *new_beg) { _last_query_beg = new_beg; }
 147   void set_last_query_object(oop new_obj) { _last_query_obj = new_obj; }
 148   void set_last_query_return(size_t new_ret) { _last_query_ret = new_ret; }
 149 
 150   static void reset_all_bitmap_query_caches();
 151 
 152   RegionTaskQueue* region_stack()                { return &_region_stack; }
 153 
 154   inline static ParCompactionManager* manager_array(uint index);
 155 
 156   ParCompactionManager();
 157 
 158   // Pushes onto the region stack at the given index.  If the
 159   // region stack is full,
 160   // pushes onto the region overflow stack.
 161   static void verify_region_list_empty(uint stack_index);
 162   ParMarkBitMap* mark_bitmap() { return _mark_bitmap; }
 163 
 164   // void drain_stacks();
 165 
 166   bool should_update();
 167   bool should_copy();
 168 
 169   // Save for later processing.  Must not fail.
 170   inline void push(oop obj);
 171   inline void push_objarray(oop objarray, size_t index);
 172   inline void push_region(size_t index);
 173 
 174   // Check mark and maybe push on marking stack.
 175   template <typename T> inline void mark_and_push(T* p);
 176 
 177   inline void follow_klass(Klass* klass);
 178 
 179   void follow_class_loader(ClassLoaderData* klass);
 180 
 181   // Access function for compaction managers
 182   static ParCompactionManager* gc_thread_compaction_manager(uint index);
 183 
 184   static bool steal(int queue_num, oop& t);
 185   static bool steal_objarray(int queue_num, ObjArrayTask& t);
 186   static bool steal(int queue_num, size_t& region);
 187 
 188   // Process tasks remaining on any marking stack
 189   void follow_marking_stacks();
 190   inline bool marking_stacks_empty() const;
 191 
 192   // Process tasks remaining on any stack
 193   void drain_region_stacks();
 194 
 195   void follow_contents(oop obj);
 196   void follow_array(objArrayOop array, int index);
 197 
 198   void update_contents(oop obj);
 199 
 200   class FollowStackClosure: public VoidClosure {
 201    private:
 202     ParCompactionManager* _compaction_manager;
 203    public:
 204     FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
 205     virtual void do_void();
 206   };
 207 };
 208 
 209 inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {
 210   assert(_manager_array != NULL, "access of NULL manager_array");
 211   assert(index <= ParallelGCThreads, "out of range manager_array access");
 212   return _manager_array[index];
 213 }
 214 
 215 bool ParCompactionManager::marking_stacks_empty() const {
 216   return _marking_stack.is_empty() && _objarray_stack.is_empty();
 217 }
 218 
 219 #endif // SHARE_GC_PARALLEL_PSCOMPACTIONMANAGER_HPP