1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP 27 28 #include "gc/g1/g1ParScanThreadState.hpp" 29 #include "gc/g1/g1RemSet.hpp" 30 #include "oops/access.inline.hpp" 31 #include "oops/oop.inline.hpp" 32 #include "utilities/ticks.inline.hpp" 33 34 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) { 35 // Reference should not be NULL here as such are never pushed to the task queue. 36 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p); 37 38 // Although we never intentionally push references outside of the collection 39 // set, due to (benign) races in the claim mechanism during RSet scanning more 40 // than one thread might claim the same card. So the same card may be 41 // processed multiple times, and so we might get references into old gen here. 42 // So we need to redo this check. 43 const InCSetState in_cset_state = _g1h->in_cset_state(obj); 44 if (in_cset_state.is_in_cset()) { 45 markOop m = obj->mark_raw(); 46 if (m->is_marked()) { 47 obj = (oop) m->decode_pointer(); 48 } else { 49 obj = copy_to_survivor_space(in_cset_state, obj, m); 50 } 51 RawAccess<OOP_NOT_NULL>::oop_store(p, obj); 52 } else if (in_cset_state.is_humongous()) { 53 _g1h->set_humongous_is_live(obj); 54 } else { 55 assert(in_cset_state.is_default(), 56 "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value()); 57 } 58 59 assert(obj != NULL, "Must be"); 60 if (!HeapRegion::is_in_same_region(p, obj)) { 61 HeapRegion* from = _g1h->heap_region_containing(p); 62 update_rs(from, p, obj); 63 } 64 } 65 66 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) { 67 assert(verify_ref(ref), "sanity"); 68 _refs->push(ref); 69 } 70 71 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { 72 assert(has_partial_array_mask(p), "invariant"); 73 oop from_obj = clear_partial_array_mask(p); 74 75 assert(_g1h->is_in_reserved(from_obj), "must be in heap."); 76 assert(from_obj->is_objArray(), "must be obj array"); 77 objArrayOop from_obj_array = objArrayOop(from_obj); 78 // The from-space object contains the real length. 79 int length = from_obj_array->length(); 80 81 assert(from_obj->is_forwarded(), "must be forwarded"); 82 oop to_obj = from_obj->forwardee(); 83 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 84 objArrayOop to_obj_array = objArrayOop(to_obj); 85 // We keep track of the next start index in the length field of the 86 // to-space object. 87 int next_index = to_obj_array->length(); 88 assert(0 <= next_index && next_index < length, 89 "invariant, next index: %d, length: %d", next_index, length); 90 91 int start = next_index; 92 int end = length; 93 int remainder = end - start; 94 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. 95 if (remainder > 2 * ParGCArrayScanChunk) { 96 end = start + ParGCArrayScanChunk; 97 to_obj_array->set_length(end); 98 // Push the remainder before we process the range in case another 99 // worker has run out of things to do and can steal it. 100 oop* from_obj_p = set_partial_array_mask(from_obj); 101 push_on_queue(from_obj_p); 102 } else { 103 assert(length == end, "sanity"); 104 // We'll process the final range for this object. Restore the length 105 // so that the heap remains parsable in case of evacuation failure. 106 to_obj_array->set_length(end); 107 } 108 _scanner.set_region(_g1h->heap_region_containing(to_obj)); 109 // Process indexes [start,end). It will also process the header 110 // along with the first chunk (i.e., the chunk with start == 0). 111 // Note that at this point the length field of to_obj_array is not 112 // correct given that we are using it to keep track of the next 113 // start index. oop_iterate_range() (thankfully!) ignores the length 114 // field and only relies on the start / end parameters. It does 115 // however return the size of the object which will be incorrect. So 116 // we have to ignore it even if we wanted to use it. 117 to_obj_array->oop_iterate_range(&_scanner, start, end); 118 } 119 120 inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) { 121 if (!has_partial_array_mask(ref_to_scan)) { 122 do_oop_evac(ref_to_scan); 123 } else { 124 do_oop_partial_array(ref_to_scan); 125 } 126 } 127 128 inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) { 129 assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays."); 130 do_oop_evac(ref_to_scan); 131 } 132 133 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) { 134 assert(verify_task(ref), "sanity"); 135 if (ref.is_narrow()) { 136 deal_with_reference((narrowOop*)ref); 137 } else { 138 deal_with_reference((oop*)ref); 139 } 140 } 141 142 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) { 143 StarTask stolen_task; 144 while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) { 145 assert(verify_task(stolen_task), "sanity"); 146 dispatch_reference(stolen_task); 147 148 // We've just processed a reference and we might have made 149 // available new entries on the queues. So we have to make sure 150 // we drain the queues as necessary. 151 trim_queue(); 152 } 153 } 154 155 inline bool G1ParScanThreadState::should_start_trim_queue_partially() const { 156 return !_refs->overflow_empty() || _refs->size() > _stack_drain_upper_threshold; 157 } 158 159 inline bool G1ParScanThreadState::should_end_trim_queue_partially() const { 160 return _refs->overflow_empty() && _refs->size() <= _stack_drain_lower_threshold; 161 } 162 163 inline void G1ParScanThreadState::trim_queue_partially_internal() { 164 StarTask ref; 165 do { 166 // Drain the overflow stack first, so other threads can potentially steal. 167 while (_refs->pop_overflow(ref)) { 168 if (!_refs->try_push_to_taskqueue(ref)) { 169 dispatch_reference(ref); 170 } 171 } 172 173 while (_refs->pop_local(ref, _stack_drain_lower_threshold)) { 174 dispatch_reference(ref); 175 } 176 } while (!should_end_trim_queue_partially()); 177 } 178 179 inline Tickspan G1ParScanThreadState::trim_queue_partially() { 180 Tickspan result; 181 if (should_start_trim_queue_partially()) { 182 const Ticks start = Ticks::now(); 183 trim_queue_partially_internal(); 184 result = Ticks::now() - start; 185 } 186 return result; 187 } 188 189 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP