1 /*
  2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
 26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
 27 
 28 #include "gc/g1/dirtyCardQueue.hpp"
 29 #include "gc/g1/g1CardTable.hpp"
 30 #include "gc/g1/g1CollectedHeap.hpp"
 31 #include "gc/g1/g1OopClosures.hpp"
 32 #include "gc/g1/g1Policy.hpp"
 33 #include "gc/g1/g1RemSet.hpp"
 34 #include "gc/shared/ageTable.hpp"
 35 #include "memory/allocation.hpp"
 36 #include "oops/oop.hpp"
 37 
 38 class G1PLABAllocator;
 39 class G1EvacuationRootClosures;
 40 class HeapRegion;
 41 class outputStream;
 42 
 43 class G1ParScanThreadState : public CHeapObj<mtGC> {
 44  private:
 45   G1CollectedHeap* _g1h;
 46   RefToScanQueue*  _refs;
 47   DirtyCardQueue   _dcq;
 48   G1CardTable*     _ct;
 49   G1EvacuationRootClosures* _closures;
 50 
 51   G1PLABAllocator*  _plab_allocator;
 52 
 53   AgeTable          _age_table;
 54   InCSetState       _dest[InCSetState::Num];
 55   // Local tenuring threshold.
 56   uint              _tenuring_threshold;
 57   G1ScanEvacuatedObjClosure  _scanner;
 58 
 59   int  _hash_seed;
 60   uint _worker_id;
 61 
 62   // Map from young-age-index (0 == not young, 1 is youngest) to
 63   // surviving words. base is what we get back from the malloc call
 64   size_t* _surviving_young_words_base;
 65   // this points into the array, as we use the first few entries for padding
 66   size_t* _surviving_young_words;
 67 
 68   // Indicates whether in the last generation (old) there is no more space
 69   // available for allocation.
 70   bool _old_gen_is_full;
 71 
 72 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
 73 
 74   DirtyCardQueue& dirty_card_queue()             { return _dcq;  }
 75   G1CardTable* ct()                              { return _ct; }
 76 
 77   InCSetState dest(InCSetState original) const {
 78     assert(original.is_valid(),
 79            "Original state invalid: " CSETSTATE_FORMAT, original.value());
 80     assert(_dest[original.value()].is_valid_gen(),
 81            "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
 82     return _dest[original.value()];
 83   }
 84 
 85  public:
 86   G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
 87   virtual ~G1ParScanThreadState();
 88 
 89   void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
 90 
 91 #ifdef ASSERT
 92   bool queue_is_empty() const { return _refs->is_empty(); }
 93 
 94   bool verify_ref(narrowOop* ref) const;
 95   bool verify_ref(oop* ref) const;
 96   bool verify_task(StarTask ref) const;
 97 #endif // ASSERT
 98 
 99   template <class T> void do_oop_ext(T* ref);
100   template <class T> void push_on_queue(T* ref);
101 
102   template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
103     assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
104     // If the field originates from the to-space, we don't need to include it
105     // in the remembered set updates.
106     if (!from->is_young()) {
107       size_t card_index = ct()->index_for(p);
108       // If the card hasn't been added to the buffer, do it.
109       if (ct()->mark_card_deferred(card_index)) {
110         dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
111       }
112     }
113   }
114 
115   G1EvacuationRootClosures* closures() { return _closures; }
116   uint worker_id() { return _worker_id; }
117 
118   // Returns the current amount of waste due to alignment or not being able to fit
119   // objects within LABs and the undo waste.
120   virtual void waste(size_t& wasted, size_t& undo_wasted);
121 
122   size_t* surviving_young_words() {
123     // We add one to hide entry 0 which accumulates surviving words for
124     // age -1 regions (i.e. non-young ones)
125     return _surviving_young_words + 1;
126   }
127 
128   void flush(size_t* surviving_young_words);
129 
130  private:
131   #define G1_PARTIAL_ARRAY_MASK 0x2
132 
133   inline bool has_partial_array_mask(oop* ref) const {
134     return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
135   }
136 
137   // We never encode partial array oops as narrowOop*, so return false immediately.
138   // This allows the compiler to create optimized code when popping references from
139   // the work queue.
140   inline bool has_partial_array_mask(narrowOop* ref) const {
141     assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
142     return false;
143   }
144 
145   // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
146   // We always encode partial arrays as regular oop, to allow the
147   // specialization for has_partial_array_mask() for narrowOops above.
148   // This means that unintentional use of this method with narrowOops are caught
149   // by the compiler.
150   inline oop* set_partial_array_mask(oop obj) const {
151     assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
152     return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
153   }
154 
155   inline oop clear_partial_array_mask(oop* ref) const {
156     return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
157   }
158 
159   inline void do_oop_partial_array(oop* p);
160 
161   // This method is applied to the fields of the objects that have just been copied.
162   template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
163 
164   template <class T> inline void deal_with_reference(T* ref_to_scan);
165 
166   inline void dispatch_reference(StarTask ref);
167 
168   // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
169   // allocate into dest. State is the original (source) cset state for the object
170   // that is allocated for. Previous_plab_refill_failed indicates whether previously
171   // a PLAB refill into "state" failed.
172   // Returns a non-NULL pointer if successful, and updates dest if required.
173   // Also determines whether we should continue to try to allocate into the various
174   // generations or just end trying to allocate.
175   HeapWord* allocate_in_next_plab(InCSetState const state,
176                                   InCSetState* dest,
177                                   size_t word_sz,
178                                   AllocationContext_t const context,
179                                   bool previous_plab_refill_failed);
180 
181   inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
182 
183   void report_promotion_event(InCSetState const dest_state,
184                               oop const old, size_t word_sz, uint age,
185                               HeapWord * const obj_ptr, const AllocationContext_t context) const;
186  public:
187 
188   oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
189 
190   void trim_queue();
191 
192   inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
193 
194   // An attempt to evacuate "obj" has failed; take necessary steps.
195   oop handle_evacuation_failure_par(oop obj, markOop m);
196 };
197 
198 class G1ParScanThreadStateSet : public StackObj {
199   G1CollectedHeap* _g1h;
200   G1ParScanThreadState** _states;
201   size_t* _surviving_young_words_total;
202   size_t _young_cset_length;
203   uint _n_workers;
204   bool _flushed;
205 
206  public:
207   G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
208   ~G1ParScanThreadStateSet();
209 
210   void flush();
211 
212   G1ParScanThreadState* state_for_worker(uint worker_id);
213 
214   const size_t* surviving_young_words() const;
215 
216  private:
217   G1ParScanThreadState* new_par_scan_state(uint worker_id, size_t young_cset_length);
218 };
219 
220 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP