1 /* 2 * Copyright (c) 2014, 2016, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP 26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP 27 28 #include "gc/g1/dirtyCardQueue.hpp" 29 #include "gc/g1/g1CardTable.hpp" 30 #include "gc/g1/g1CollectedHeap.hpp" 31 #include "gc/g1/g1OopClosures.hpp" 32 #include "gc/g1/g1Policy.hpp" 33 #include "gc/g1/g1RemSet.hpp" 34 #include "gc/shared/ageTable.hpp" 35 #include "memory/allocation.hpp" 36 #include "oops/oop.hpp" 37 38 class G1PLABAllocator; 39 class G1EvacuationRootClosures; 40 class HeapRegion; 41 class outputStream; 42 43 class G1ParScanThreadState : public CHeapObj<mtGC> { 44 private: 45 G1CollectedHeap* _g1h; 46 RefToScanQueue* _refs; 47 DirtyCardQueue _dcq; 48 G1CardTable* _ct; 49 G1EvacuationRootClosures* _closures; 50 51 G1PLABAllocator* _plab_allocator; 52 53 AgeTable _age_table; 54 InCSetState _dest[InCSetState::Num]; 55 // Local tenuring threshold. 56 uint _tenuring_threshold; 57 G1ParScanClosure _scanner; 58 59 int _hash_seed; 60 uint _worker_id; 61 62 // Map from young-age-index (0 == not young, 1 is youngest) to 63 // surviving words. base is what we get back from the malloc call 64 size_t* _surviving_young_words_base; 65 // this points into the array, as we use the first few entries for padding 66 size_t* _surviving_young_words; 67 68 // Indicates whether in the last generation (old) there is no more space 69 // available for allocation. 70 bool _old_gen_is_full; 71 72 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t)) 73 74 DirtyCardQueue& dirty_card_queue() { return _dcq; } 75 G1CardTable* ct() { return _ct; } 76 77 InCSetState dest(InCSetState original) const { 78 assert(original.is_valid(), 79 "Original state invalid: " CSETSTATE_FORMAT, original.value()); 80 assert(_dest[original.value()].is_valid_gen(), 81 "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value()); 82 return _dest[original.value()]; 83 } 84 85 public: 86 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length); 87 virtual ~G1ParScanThreadState(); 88 89 void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); } 90 91 #ifdef ASSERT 92 bool queue_is_empty() const { return _refs->is_empty(); } 93 94 bool verify_ref(narrowOop* ref) const; 95 bool verify_ref(oop* ref) const; 96 bool verify_task(StarTask ref) const; 97 #endif // ASSERT 98 99 template <class T> void do_oop_ext(T* ref); 100 template <class T> void push_on_queue(T* ref); 101 102 template <class T> void update_rs(HeapRegion* from, T* p, oop o) { 103 // If the new value of the field points to the same region or 104 // is the to-space, we don't need to include it in the Rset updates. 105 if (!HeapRegion::is_in_same_region(p, o) && !from->is_young()) { 106 size_t card_index = ct()->index_for(p); 107 // If the card hasn't been added to the buffer, do it. 108 if (ct()->mark_card_deferred(card_index)) { 109 dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index)); 110 } 111 } 112 } 113 114 G1EvacuationRootClosures* closures() { return _closures; } 115 uint worker_id() { return _worker_id; } 116 117 // Returns the current amount of waste due to alignment or not being able to fit 118 // objects within LABs and the undo waste. 119 virtual void waste(size_t& wasted, size_t& undo_wasted); 120 121 size_t* surviving_young_words() { 122 // We add one to hide entry 0 which accumulates surviving words for 123 // age -1 regions (i.e. non-young ones) 124 return _surviving_young_words + 1; 125 } 126 127 void flush(size_t* surviving_young_words); 128 129 private: 130 #define G1_PARTIAL_ARRAY_MASK 0x2 131 132 inline bool has_partial_array_mask(oop* ref) const { 133 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK; 134 } 135 136 // We never encode partial array oops as narrowOop*, so return false immediately. 137 // This allows the compiler to create optimized code when popping references from 138 // the work queue. 139 inline bool has_partial_array_mask(narrowOop* ref) const { 140 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*"); 141 return false; 142 } 143 144 // Only implement set_partial_array_mask() for regular oops, not for narrowOops. 145 // We always encode partial arrays as regular oop, to allow the 146 // specialization for has_partial_array_mask() for narrowOops above. 147 // This means that unintentional use of this method with narrowOops are caught 148 // by the compiler. 149 inline oop* set_partial_array_mask(oop obj) const { 150 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!"); 151 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK); 152 } 153 154 inline oop clear_partial_array_mask(oop* ref) const { 155 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK); 156 } 157 158 inline void do_oop_partial_array(oop* p); 159 160 // This method is applied to the fields of the objects that have just been copied. 161 template <class T> inline void do_oop_evac(T* p, HeapRegion* from); 162 163 template <class T> inline void deal_with_reference(T* ref_to_scan); 164 165 inline void dispatch_reference(StarTask ref); 166 167 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to 168 // allocate into dest. State is the original (source) cset state for the object 169 // that is allocated for. Previous_plab_refill_failed indicates whether previously 170 // a PLAB refill into "state" failed. 171 // Returns a non-NULL pointer if successful, and updates dest if required. 172 // Also determines whether we should continue to try to allocate into the various 173 // generations or just end trying to allocate. 174 HeapWord* allocate_in_next_plab(InCSetState const state, 175 InCSetState* dest, 176 size_t word_sz, 177 AllocationContext_t const context, 178 bool previous_plab_refill_failed); 179 180 inline InCSetState next_state(InCSetState const state, markOop const m, uint& age); 181 182 void report_promotion_event(InCSetState const dest_state, 183 oop const old, size_t word_sz, uint age, 184 HeapWord * const obj_ptr, const AllocationContext_t context) const; 185 public: 186 187 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark); 188 189 void trim_queue(); 190 191 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues); 192 193 // An attempt to evacuate "obj" has failed; take necessary steps. 194 oop handle_evacuation_failure_par(oop obj, markOop m); 195 }; 196 197 class G1ParScanThreadStateSet : public StackObj { 198 G1CollectedHeap* _g1h; 199 G1ParScanThreadState** _states; 200 size_t* _surviving_young_words_total; 201 size_t* _cards_scanned; 202 size_t _total_cards_scanned; 203 size_t _young_cset_length; 204 uint _n_workers; 205 bool _flushed; 206 207 public: 208 G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) : 209 _g1h(g1h), 210 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), 211 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)), 212 _cards_scanned(NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC)), 213 _total_cards_scanned(0), 214 _young_cset_length(young_cset_length), 215 _n_workers(n_workers), 216 _flushed(false) { 217 for (uint i = 0; i < n_workers; ++i) { 218 _states[i] = NULL; 219 } 220 memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t)); 221 memset(_cards_scanned, 0, n_workers * sizeof(size_t)); 222 } 223 224 ~G1ParScanThreadStateSet() { 225 assert(_flushed, "thread local state from the per thread states should have been flushed"); 226 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 227 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 228 FREE_C_HEAP_ARRAY(size_t, _cards_scanned); 229 } 230 231 void flush(); 232 233 G1ParScanThreadState* state_for_worker(uint worker_id); 234 235 void add_cards_scanned(uint worker_id, size_t cards_scanned); 236 size_t total_cards_scanned() const; 237 const size_t* surviving_young_words() const; 238 239 private: 240 G1ParScanThreadState* new_par_scan_state(uint worker_id, size_t young_cset_length); 241 }; 242 243 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP