1 /*
  2  * Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
 26 #define SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP
 27 
 28 #include "gc/parallel/psPromotionLAB.hpp"
 29 #include "gc/shared/copyFailedInfo.hpp"
 30 #include "gc/shared/gcTrace.hpp"
 31 #include "gc/shared/preservedMarks.hpp"
 32 #include "gc/shared/taskqueue.hpp"
 33 #include "memory/padded.hpp"
 34 #include "utilities/globalDefinitions.hpp"
 35 
 36 //
 37 // psPromotionManager is used by a single thread to manage object survival
 38 // during a scavenge. The promotion manager contains thread local data only.
 39 //
 40 // NOTE! Be careful when allocating the stacks on cheap. If you are going
 41 // to use a promotion manager in more than one thread, the stacks MUST be
 42 // on cheap. This can lead to memory leaks, though, as they are not auto
 43 // deallocated.
 44 //
 45 // FIX ME FIX ME Add a destructor, and don't rely on the user to drain/flush/deallocate!
 46 //
 47 
 48 class MutableSpace;
 49 class PSOldGen;
 50 class ParCompactionManager;
 51 
 52 class PSPromotionManager {
 53   friend class PSScavenge;
 54   friend class ScavengeRootsTask;
 55   friend class PSRefProcTaskExecutor;
 56   friend class PSRefProcTask;
 57 
 58  private:
 59   static PaddedEnd<PSPromotionManager>* _manager_array;
 60   static OopStarTaskQueueSet*           _stack_array_depth;
 61   static PreservedMarksSet*             _preserved_marks_set;
 62   static PSOldGen*                      _old_gen;
 63   static MutableSpace*                  _young_space;
 64 
 65 #if TASKQUEUE_STATS
 66   size_t                              _masked_pushes;
 67   size_t                              _masked_steals;
 68   size_t                              _arrays_chunked;
 69   size_t                              _array_chunks_processed;
 70 
 71   void print_local_stats(outputStream* const out, uint i) const;
 72   static void print_taskqueue_stats();
 73 
 74   void reset_stats();
 75 #endif // TASKQUEUE_STATS
 76 
 77   PSYoungPromotionLAB                 _young_lab;
 78   PSOldPromotionLAB                   _old_lab;
 79   bool                                _young_gen_is_full;
 80   bool                                _old_gen_is_full;
 81 
 82   OopStarTaskQueue                    _claimed_stack_depth;
 83   OverflowTaskQueue<oop, mtGC>        _claimed_stack_breadth;
 84 
 85   bool                                _totally_drain;
 86   uint                                _target_stack_size;
 87 
 88   uint                                _array_chunk_size;
 89   uint                                _min_array_size_for_chunking;
 90 
 91   PreservedMarks*                     _preserved_marks;
 92   PromotionFailedInfo                 _promotion_failed_info;
 93 
 94   // Accessors
 95   static PSOldGen* old_gen()         { return _old_gen; }
 96   static MutableSpace* young_space() { return _young_space; }
 97 
 98   inline static PSPromotionManager* manager_array(uint index);
 99   template <class T> inline void claim_or_forward_internal_depth(T* p);
100 
101   // On the task queues we push reference locations as well as
102   // partially-scanned arrays (in the latter case, we push an oop to
103   // the from-space image of the array and the length on the
104   // from-space image indicates how many entries on the array we still
105   // need to scan; this is basically how ParNew does partial array
106   // scanning too). To be able to distinguish between reference
107   // locations and partially-scanned array oops we simply mask the
108   // latter oops with 0x01. The next three methods do the masking,
109   // unmasking, and checking whether the oop is masked or not. Notice
110   // that the signature of the mask and unmask methods looks a bit
111   // strange, as they accept and return different types (oop and
112   // oop*). This is because of the difference in types between what
113   // the task queue holds (oop*) and oops to partially-scanned arrays
114   // (oop). We do all the necessary casting in the mask / unmask
115   // methods to avoid sprinkling the rest of the code with more casts.
116 
117   // These are added to the taskqueue so PS_CHUNKED_ARRAY_OOP_MASK (or any
118   // future masks) can't conflict with COMPRESSED_OOP_MASK
119 #define PS_CHUNKED_ARRAY_OOP_MASK  0x2
120 
121   bool is_oop_masked(StarTask p) {
122     // If something is marked chunked it's always treated like wide oop*
123     return (((intptr_t)(oop*)p) & PS_CHUNKED_ARRAY_OOP_MASK) ==
124                                   PS_CHUNKED_ARRAY_OOP_MASK;
125   }
126 
127   oop* mask_chunked_array_oop(oop obj) {
128     assert(!is_oop_masked((oop*) obj), "invariant");
129     oop* ret = (oop*) (cast_from_oop<uintptr_t>(obj) | PS_CHUNKED_ARRAY_OOP_MASK);
130     assert(is_oop_masked(ret), "invariant");
131     return ret;
132   }
133 
134   oop unmask_chunked_array_oop(StarTask p) {
135     assert(is_oop_masked(p), "invariant");
136     assert(!p.is_narrow(), "chunked array oops cannot be narrow");
137     oop *chunk = (oop*)p;  // cast p to oop (uses conversion operator)
138     oop ret = oop((oop*)((uintptr_t)chunk & ~PS_CHUNKED_ARRAY_OOP_MASK));
139     assert(!is_oop_masked((oop*) ret), "invariant");
140     return ret;
141   }
142 
143   template <class T> void  process_array_chunk_work(oop obj,
144                                                     int start, int end);
145   void process_array_chunk(oop old);
146 
147   template <class T> void push_depth(T* p);
148 
149   inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size,
150                                     uint age, bool tenured,
151                                     const PSPromotionLAB* lab);
152 
153  protected:
154   static OopStarTaskQueueSet* stack_array_depth()   { return _stack_array_depth; }
155  public:
156   // Static
157   static void initialize();
158 
159   static void pre_scavenge();
160   static bool post_scavenge(YoungGCTracer& gc_tracer);
161 
162   static PSPromotionManager* gc_thread_promotion_manager(uint index);
163   static PSPromotionManager* vm_thread_promotion_manager();
164 
165   static bool steal_depth(int queue_num, StarTask& t);
166 
167   PSPromotionManager();
168 
169   // Accessors
170   OopStarTaskQueue* claimed_stack_depth() {
171     return &_claimed_stack_depth;
172   }
173 
174   bool young_gen_is_full()             { return _young_gen_is_full; }
175 
176   bool old_gen_is_full()               { return _old_gen_is_full; }
177   void set_old_gen_is_full(bool state) { _old_gen_is_full = state; }
178 
179   // Promotion methods
180   template<bool promote_immediately> oop copy_to_survivor_space(oop o);
181   oop oop_promotion_failed(oop obj, markOop obj_mark);
182 
183   void reset();
184   void register_preserved_marks(PreservedMarks* preserved_marks);
185   static void restore_preserved_marks();
186 
187   void flush_labs();
188   void drain_stacks(bool totally_drain) {
189     drain_stacks_depth(totally_drain);
190   }
191  public:
192   void drain_stacks_cond_depth() {
193     if (claimed_stack_depth()->size() > _target_stack_size) {
194       drain_stacks_depth(false);
195     }
196   }
197   void drain_stacks_depth(bool totally_drain);
198 
199   bool stacks_empty() {
200     return claimed_stack_depth()->is_empty();
201   }
202 
203   inline void process_popped_location_depth(StarTask p);
204 
205   static bool should_scavenge(oop* p, bool check_to_space = false);
206   static bool should_scavenge(narrowOop* p, bool check_to_space = false);
207 
208   template <class T, bool promote_immediately>
209   void copy_and_push_safe_barrier(T* p);
210 
211   template <class T> inline void claim_or_forward_depth(T* p);
212 
213   TASKQUEUE_STATS_ONLY(inline void record_steal(StarTask& p);)
214 
215   void push_contents(oop obj);
216 };
217 
218 #endif // SHARE_GC_PARALLEL_PSPROMOTIONMANAGER_HPP