1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP 27 28 #include "gc/g1/g1CollectedHeap.inline.hpp" 29 #include "gc/g1/g1ParScanThreadState.hpp" 30 #include "gc/g1/g1RemSet.hpp" 31 #include "oops/access.inline.hpp" 32 #include "oops/oop.inline.hpp" 33 34 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) { 35 // Reference should not be NULL here as such are never pushed to the task queue. 36 oop obj = RawAccess<IS_NOT_NULL>::oop_load(p); 37 38 // Although we never intentionally push references outside of the collection 39 // set, due to (benign) races in the claim mechanism during RSet scanning more 40 // than one thread might claim the same card. So the same card may be 41 // processed multiple times, and so we might get references into old gen here. 42 // So we need to redo this check. 43 const InCSetState in_cset_state = _g1h->in_cset_state(obj); 44 // References pushed onto the work stack should never point to a humongous region 45 // as they are not added to the collection set due to above precondition. 46 assert(!in_cset_state.is_humongous(), 47 "Obj " PTR_FORMAT " should not refer to humongous region %u from " PTR_FORMAT, 48 p2i(obj), _g1h->addr_to_region((HeapWord*)obj), p2i(p)); 49 50 if (!in_cset_state.is_in_cset()) { 51 // In this case somebody else already did all the work. 52 return; 53 } 54 55 markOop m = obj->mark_raw(); 56 if (m->is_marked()) { 57 obj = (oop) m->decode_pointer(); 58 } else { 59 obj = copy_to_survivor_space(in_cset_state, obj, m); 60 } 61 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 62 63 assert(obj != NULL, "Must be"); 64 if (HeapRegion::is_in_same_region(p, obj)) { 65 return; 66 } 67 HeapRegion* from = _g1h->heap_region_containing(p); 68 if (!from->is_young()) { 69 enqueue_card_if_tracked(p, obj); 70 } 71 } 72 73 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) { 74 assert(verify_ref(ref), "sanity"); 75 _refs->push(ref); 76 } 77 78 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { 79 assert(has_partial_array_mask(p), "invariant"); 80 oop from_obj = clear_partial_array_mask(p); 81 82 assert(_g1h->is_in_reserved(from_obj), "must be in heap."); 83 assert(from_obj->is_objArray(), "must be obj array"); 84 objArrayOop from_obj_array = objArrayOop(from_obj); 85 // The from-space object contains the real length. 86 int length = from_obj_array->length(); 87 88 assert(from_obj->is_forwarded(), "must be forwarded"); 89 oop to_obj = from_obj->forwardee(); 90 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 91 objArrayOop to_obj_array = objArrayOop(to_obj); 92 // We keep track of the next start index in the length field of the 93 // to-space object. 94 int next_index = to_obj_array->length(); 95 assert(0 <= next_index && next_index < length, 96 "invariant, next index: %d, length: %d", next_index, length); 97 98 int start = next_index; 99 int end = length; 100 int remainder = end - start; 101 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. 102 if (remainder > 2 * ParGCArrayScanChunk) { 103 end = start + ParGCArrayScanChunk; 104 to_obj_array->set_length(end); 105 // Push the remainder before we process the range in case another 106 // worker has run out of things to do and can steal it. 107 oop* from_obj_p = set_partial_array_mask(from_obj); 108 push_on_queue(from_obj_p); 109 } else { 110 assert(length == end, "sanity"); 111 // We'll process the final range for this object. Restore the length 112 // so that the heap remains parsable in case of evacuation failure. 113 to_obj_array->set_length(end); 114 } 115 116 HeapRegion* hr = _g1h->heap_region_containing(to_obj); 117 _scanner.set_scanning_in_young(hr->is_young()); 118 // Process indexes [start,end). It will also process the header 119 // along with the first chunk (i.e., the chunk with start == 0). 120 // Note that at this point the length field of to_obj_array is not 121 // correct given that we are using it to keep track of the next 122 // start index. oop_iterate_range() (thankfully!) ignores the length 123 // field and only relies on the start / end parameters. It does 124 // however return the size of the object which will be incorrect. So 125 // we have to ignore it even if we wanted to use it. 126 to_obj_array->oop_iterate_range(&_scanner, start, end); 127 } 128 129 inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) { 130 if (!has_partial_array_mask(ref_to_scan)) { 131 do_oop_evac(ref_to_scan); 132 } else { 133 do_oop_partial_array(ref_to_scan); 134 } 135 } 136 137 inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) { 138 assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays."); 139 do_oop_evac(ref_to_scan); 140 } 141 142 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { 143 assert(verify_task(ref), "sanity"); 144 if (ref.is_narrow()) { 145 deal_with_reference((narrowOop*)ref); 146 } else { 147 deal_with_reference((oop*)ref); 148 } 149 } 150 151 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { 152 StarTask stolen_task; 153 while (task_queues->steal(_worker_id, stolen_task)) { 154 assert(verify_task(stolen_task), "sanity"); 155 dispatch_reference(stolen_task); 156 157 // We've just processed a reference and we might have made 158 // available new entries on the queues. So we have to make sure 159 // we drain the queues as necessary. 160 trim_queue(); 161 } 162 } 163 164 inline bool G1ParScanThreadState::needs_partial_trimming() const { 165 return !_refs->overflow_empty() || _refs->size() > _stack_trim_upper_threshold; 166 } 167 168 inline bool G1ParScanThreadState::is_partially_trimmed() const { 169 return _refs->overflow_empty() && _refs->size() <= _stack_trim_lower_threshold; 170 } 171 172 inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) { 173 StarTask ref; 174 // Drain the overflow stack first, so other threads can potentially steal. 175 while (_refs->pop_overflow(ref)) { 176 if (!_refs->try_push_to_taskqueue(ref)) { 177 dispatch_reference(ref); 178 } 179 } 180 181 while (_refs->pop_local(ref, threshold)) { 182 dispatch_reference(ref); 183 } 184 } 185 186 inline void G1ParScanThreadState::trim_queue_partially() { 187 if (!needs_partial_trimming()) { 188 return; 189 } 190 191 const Ticks start = Ticks::now(); 192 do { 193 trim_queue_to_threshold(_stack_trim_lower_threshold); 194 } while (!is_partially_trimmed()); 195 _trim_ticks += Ticks::now() - start; 196 } 197 198 inline Tickspan G1ParScanThreadState::trim_ticks() const { 199 return _trim_ticks; 200 } 201 202 inline void G1ParScanThreadState::reset_trim_ticks() { 203 _trim_ticks = Tickspan(); 204 } 205 206 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP