1 /* 2 * Copyright (c) 2015, Red Hat, Inc. and/or its affiliates. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #ifndef SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP 25 #define SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP 26 27 #include "classfile/javaClasses.inline.hpp" 28 #include "gc/shenandoah/brooksPointer.hpp" 29 #include "gc/shenandoah/shenandoahAsserts.hpp" 30 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp" 31 #include "gc/shenandoah/shenandoahConcurrentMark.hpp" 32 #include "gc/shenandoah/shenandoahStringDedup.hpp" 33 #include "memory/iterator.inline.hpp" 34 #include "oops/oop.inline.hpp" 35 #include "runtime/prefetch.inline.hpp" 36 37 template <class T, bool COUNT_LIVENESS> 38 void ShenandoahConcurrentMark::do_task(ShenandoahObjToScanQueue* q, T* cl, jushort* live_data, ShenandoahMarkTask* task) { 39 oop obj = task->obj(); 40 41 shenandoah_assert_not_forwarded_except(NULL, obj, _heap->is_concurrent_traversal_in_progress() && _heap->cancelled_concgc()); 42 shenandoah_assert_marked_next(NULL, obj); 43 shenandoah_assert_not_in_cset_except(NULL, obj, _heap->cancelled_concgc()); 44 45 if (task->is_not_chunked()) { 46 if (COUNT_LIVENESS) count_liveness(live_data, obj); 47 if (obj->is_instance()) { 48 // Case 1: Normal oop, process as usual. 49 obj->oop_iterate(cl); 50 } else if (obj->is_objArray()) { 51 // Case 2: Object array instance and no chunk is set. Must be the first 52 // time we visit it, start the chunked processing. 53 do_chunked_array_start<T>(q, cl, obj); 54 } else { 55 // Case 3: Primitive array. Do nothing, no oops there. We use the same 56 // performance tweak TypeArrayKlass::oop_oop_iterate_impl is using: 57 // We skip iterating over the klass pointer since we know that 58 // Universe::TypeArrayKlass never moves. 59 assert (obj->is_typeArray(), "should be type array"); 60 } 61 } else { 62 // Case 4: Array chunk, has sensible chunk id. Process it. 63 do_chunked_array<T>(q, cl, obj, task->chunk(), task->pow()); 64 } 65 } 66 67 inline void ShenandoahConcurrentMark::count_liveness(jushort* live_data, oop obj) { 68 size_t region_idx = _heap->heap_region_index_containing(obj); 69 jushort cur = live_data[region_idx]; 70 int size = obj->size() + BrooksPointer::word_size(); 71 int max = (1 << (sizeof(jushort) * 8)) - 1; 72 if (size >= max) { 73 // too big, add to region data directly 74 _heap->regions()->get(region_idx)->increase_live_data_words(size); 75 } else { 76 int new_val = cur + size; 77 if (new_val >= max) { 78 // overflow, flush to region data 79 _heap->regions()->get(region_idx)->increase_live_data_words(new_val); 80 live_data[region_idx] = 0; 81 } else { 82 // still good, remember in locals 83 live_data[region_idx] = (jushort) new_val; 84 } 85 } 86 } 87 88 template <class T> 89 inline void ShenandoahConcurrentMark::do_chunked_array_start(ShenandoahObjToScanQueue* q, T* cl, oop obj) { 90 assert(obj->is_objArray(), "expect object array"); 91 objArrayOop array = objArrayOop(obj); 92 int len = array->length(); 93 94 if (len <= (int) ObjArrayMarkingStride*2) { 95 // A few slices only, process directly 96 array->oop_iterate_range(cl, 0, len); 97 } else { 98 int bits = log2_long(len); 99 // Compensate for non-power-of-two arrays, cover the array in excess: 100 if (len != (1 << bits)) bits++; 101 102 // Only allow full chunks on the queue. This frees do_chunked_array() from checking from/to 103 // boundaries against array->length(), touching the array header on every chunk. 104 // 105 // To do this, we cut the prefix in full-sized chunks, and submit them on the queue. 106 // If the array is not divided in chunk sizes, then there would be an irregular tail, 107 // which we will process separately. 108 109 int last_idx = 0; 110 111 int chunk = 1; 112 int pow = bits; 113 114 // Handle overflow 115 if (pow >= 31) { 116 assert (pow == 31, "sanity"); 117 pow--; 118 chunk = 2; 119 last_idx = (1 << pow); 120 bool pushed = q->push(ShenandoahMarkTask(array, 1, pow)); 121 assert(pushed, "overflow queue should always succeed pushing"); 122 } 123 124 // Split out tasks, as suggested in ObjArrayChunkedTask docs. Record the last 125 // successful right boundary to figure out the irregular tail. 126 while ((1 << pow) > (int)ObjArrayMarkingStride && 127 (chunk*2 < ShenandoahMarkTask::chunk_size())) { 128 pow--; 129 int left_chunk = chunk*2 - 1; 130 int right_chunk = chunk*2; 131 int left_chunk_end = left_chunk * (1 << pow); 132 if (left_chunk_end < len) { 133 bool pushed = q->push(ShenandoahMarkTask(array, left_chunk, pow)); 134 assert(pushed, "overflow queue should always succeed pushing"); 135 chunk = right_chunk; 136 last_idx = left_chunk_end; 137 } else { 138 chunk = left_chunk; 139 } 140 } 141 142 // Process the irregular tail, if present 143 int from = last_idx; 144 if (from < len) { 145 array->oop_iterate_range(cl, from, len); 146 } 147 } 148 } 149 150 template <class T> 151 inline void ShenandoahConcurrentMark::do_chunked_array(ShenandoahObjToScanQueue* q, T* cl, oop obj, int chunk, int pow) { 152 assert(obj->is_objArray(), "expect object array"); 153 objArrayOop array = objArrayOop(obj); 154 155 assert (ObjArrayMarkingStride > 0, "sanity"); 156 157 // Split out tasks, as suggested in ObjArrayChunkedTask docs. Avoid pushing tasks that 158 // are known to start beyond the array. 159 while ((1 << pow) > (int)ObjArrayMarkingStride && (chunk*2 < ShenandoahMarkTask::chunk_size())) { 160 pow--; 161 chunk *= 2; 162 bool pushed = q->push(ShenandoahMarkTask(array, chunk - 1, pow)); 163 assert(pushed, "overflow queue should always succeed pushing"); 164 } 165 166 int chunk_size = 1 << pow; 167 168 int from = (chunk - 1) * chunk_size; 169 int to = chunk * chunk_size; 170 171 #ifdef ASSERT 172 int len = array->length(); 173 assert (0 <= from && from < len, "from is sane: %d/%d", from, len); 174 assert (0 < to && to <= len, "to is sane: %d/%d", to, len); 175 #endif 176 177 array->oop_iterate_range(cl, from, to); 178 } 179 180 inline bool ShenandoahConcurrentMark::try_queue(ShenandoahObjToScanQueue* q, ShenandoahMarkTask &task) { 181 return (q->pop_buffer(task) || 182 q->pop_local(task) || 183 q->pop_overflow(task)); 184 } 185 186 class ShenandoahSATBBufferClosure : public SATBBufferClosure { 187 private: 188 ShenandoahObjToScanQueue* _queue; 189 ShenandoahHeap* _heap; 190 public: 191 ShenandoahSATBBufferClosure(ShenandoahObjToScanQueue* q) : 192 _queue(q), _heap(ShenandoahHeap::heap()) 193 { 194 } 195 196 void do_buffer(void** buffer, size_t size) { 197 for (size_t i = 0; i < size; ++i) { 198 oop* p = (oop*) &buffer[i]; 199 ShenandoahConcurrentMark::mark_through_ref<oop, RESOLVE>(p, _heap, _queue); 200 } 201 } 202 }; 203 204 inline bool ShenandoahConcurrentMark::try_draining_satb_buffer(ShenandoahObjToScanQueue *q, ShenandoahMarkTask &task) { 205 ShenandoahSATBBufferClosure cl(q); 206 SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set(); 207 bool had_refs = satb_mq_set.apply_closure_to_completed_buffer(&cl); 208 return had_refs && try_queue(q, task); 209 } 210 211 template<class T, UpdateRefsMode UPDATE_REFS> 212 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q) { 213 ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, heap, q, NULL); 214 } 215 216 template<class T, UpdateRefsMode UPDATE_REFS, bool STRING_DEDUP> 217 inline void ShenandoahConcurrentMark::mark_through_ref(T *p, ShenandoahHeap* heap, ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq) { 218 T o = oopDesc::load_heap_oop(p); 219 if (! oopDesc::is_null(o)) { 220 oop obj = oopDesc::decode_heap_oop_not_null(o); 221 switch (UPDATE_REFS) { 222 case NONE: 223 break; 224 case RESOLVE: 225 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 226 break; 227 case SIMPLE: 228 // We piggy-back reference updating to the marking tasks. 229 obj = heap->update_with_forwarded_not_null(p, obj); 230 break; 231 case CONCURRENT: 232 obj = heap->maybe_update_with_forwarded_not_null(p, obj); 233 break; 234 default: 235 ShouldNotReachHere(); 236 } 237 238 // Note: Only when concurrently updating references can obj become NULL here. 239 // It happens when a mutator thread beats us by writing another value. In that 240 // case we don't need to do anything else. 241 if (UPDATE_REFS != CONCURRENT || !oopDesc::is_null(obj)) { 242 shenandoah_assert_not_forwarded(p, obj); 243 shenandoah_assert_not_in_cset_except(p, obj, heap->cancelled_concgc()); 244 245 if (heap->mark_next(obj)) { 246 bool pushed = q->push(ShenandoahMarkTask(obj)); 247 assert(pushed, "overflow queue should always succeed pushing"); 248 249 if (STRING_DEDUP && ShenandoahStringDedup::is_candidate(obj)) { 250 assert(ShenandoahStringDedup::is_enabled(), "Must be enabled"); 251 assert(dq != NULL, "Dedup queue not set"); 252 ShenandoahStringDedup::enqueue_candidate(obj, dq); 253 } 254 } 255 256 shenandoah_assert_marked_next(p, obj); 257 } 258 } 259 } 260 261 #endif // SHARE_VM_GC_SHENANDOAH_SHENANDOAHCONCURRENTMARK_INLINE_HPP