1 /* 2 * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1CollectionSet.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1RootClosures.hpp" 32 #include "gc/g1/g1StringDedup.hpp" 33 #include "gc/shared/gcTrace.hpp" 34 #include "gc/shared/taskqueue.inline.hpp" 35 #include "memory/allocation.inline.hpp" 36 #include "oops/access.inline.hpp" 37 #include "oops/oop.inline.hpp" 38 #include "runtime/prefetch.inline.hpp" 39 40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length) 41 : _g1h(g1h), 42 _refs(g1h->task_queue(worker_id)), 43 _dcq(&g1h->dirty_card_queue_set()), 44 _ct(g1h->card_table()), 45 _closures(NULL), 46 _hash_seed(17), 47 _worker_id(worker_id), 48 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), 49 _age_table(false), 50 _scanner(g1h, this), 51 _old_gen_is_full(false) 52 { 53 // we allocate G1YoungSurvRateNumRegions plus one entries, since 54 // we "sacrifice" entry 0 to keep track of surviving bytes for 55 // non-young regions (where the age is -1) 56 // We also add a few elements at the beginning and at the end in 57 // an attempt to eliminate cache contention 58 size_t real_length = 1 + young_cset_length; 59 size_t array_length = PADDING_ELEM_NUM + 60 real_length + 61 PADDING_ELEM_NUM; 62 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 63 if (_surviving_young_words_base == NULL) 64 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, 65 "Not enough space for young surv histo."); 66 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 67 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 68 69 _plab_allocator = new G1PLABAllocator(_g1h->allocator()); 70 71 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet; 72 // The dest for Young is used when the objects are aged enough to 73 // need to be moved to the next space. 74 _dest[InCSetState::Young] = InCSetState::Old; 75 _dest[InCSetState::Old] = InCSetState::Old; 76 77 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h); 78 } 79 80 // Pass locally gathered statistics to global state. 81 void G1ParScanThreadState::flush(size_t* surviving_young_words) { 82 _dcq.flush(); 83 // Update allocation statistics. 84 _plab_allocator->flush_and_retire_stats(); 85 _g1h->g1_policy()->record_age_table(&_age_table); 86 87 uint length = _g1h->collection_set()->young_region_length(); 88 for (uint region_index = 0; region_index < length; region_index++) { 89 surviving_young_words[region_index] += _surviving_young_words[region_index]; 90 } 91 } 92 93 G1ParScanThreadState::~G1ParScanThreadState() { 94 delete _plab_allocator; 95 delete _closures; 96 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 97 } 98 99 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) { 100 _plab_allocator->waste(wasted, undo_wasted); 101 } 102 103 #ifdef ASSERT 104 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 105 assert(ref != NULL, "invariant"); 106 assert(UseCompressedOops, "sanity"); 107 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref)); 108 oop p = RawAccess<>::oop_load(ref); 109 assert(_g1h->is_in_g1_reserved(p), 110 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 111 return true; 112 } 113 114 bool G1ParScanThreadState::verify_ref(oop* ref) const { 115 assert(ref != NULL, "invariant"); 116 if (has_partial_array_mask(ref)) { 117 // Must be in the collection set--it's already been copied. 118 oop p = clear_partial_array_mask(ref); 119 assert(_g1h->is_in_cset(p), 120 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 121 } else { 122 oop p = RawAccess<>::oop_load(ref); 123 assert(_g1h->is_in_g1_reserved(p), 124 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 125 } 126 return true; 127 } 128 129 bool G1ParScanThreadState::verify_task(StarTask ref) const { 130 if (ref.is_narrow()) { 131 return verify_ref((narrowOop*) ref); 132 } else { 133 return verify_ref((oop*) ref); 134 } 135 } 136 #endif // ASSERT 137 138 void G1ParScanThreadState::trim_queue() { 139 StarTask ref; 140 do { 141 // Drain the overflow stack first, so other threads can steal. 142 while (_refs->pop_overflow(ref)) { 143 if (!_refs->try_push_to_taskqueue(ref)) { 144 dispatch_reference(ref); 145 } 146 } 147 148 while (_refs->pop_local(ref)) { 149 dispatch_reference(ref); 150 } 151 } while (!_refs->is_empty()); 152 } 153 154 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 155 InCSetState* dest, 156 size_t word_sz, 157 bool previous_plab_refill_failed) { 158 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value()); 159 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 160 161 // Right now we only have two types of regions (young / old) so 162 // let's keep the logic here simple. We can generalize it when necessary. 163 if (dest->is_young()) { 164 bool plab_refill_in_old_failed = false; 165 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, 166 word_sz, 167 &plab_refill_in_old_failed); 168 // Make sure that we won't attempt to copy any other objects out 169 // of a survivor region (given that apparently we cannot allocate 170 // any new ones) to avoid coming into this slow path again and again. 171 // Only consider failed PLAB refill here: failed inline allocations are 172 // typically large, so not indicative of remaining space. 173 if (previous_plab_refill_failed) { 174 _tenuring_threshold = 0; 175 } 176 177 if (obj_ptr != NULL) { 178 dest->set_old(); 179 } else { 180 // We just failed to allocate in old gen. The same idea as explained above 181 // for making survivor gen unavailable for allocation applies for old gen. 182 _old_gen_is_full = plab_refill_in_old_failed; 183 } 184 return obj_ptr; 185 } else { 186 _old_gen_is_full = previous_plab_refill_failed; 187 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 188 // no other space to try. 189 return NULL; 190 } 191 } 192 193 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 194 if (state.is_young()) { 195 age = !m->has_displaced_mark_helper() ? m->age() 196 : m->displaced_mark_helper()->age(); 197 if (age < _tenuring_threshold) { 198 return state; 199 } 200 } 201 return dest(state); 202 } 203 204 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, 205 oop const old, size_t word_sz, uint age, 206 HeapWord * const obj_ptr) const { 207 PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state); 208 if (alloc_buf->contains(obj_ptr)) { 209 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, 210 dest_state.value() == InCSetState::Old, 211 alloc_buf->word_sz()); 212 } else { 213 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age, 214 dest_state.value() == InCSetState::Old); 215 } 216 } 217 218 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, 219 oop const old, 220 markOop const old_mark) { 221 const size_t word_sz = old->size(); 222 HeapRegion* const from_region = _g1h->heap_region_containing(old); 223 // +1 to make the -1 indexes valid... 224 const int young_index = from_region->young_index_in_cset()+1; 225 assert( (from_region->is_young() && young_index > 0) || 226 (!from_region->is_young() && young_index == 0), "invariant" ); 227 228 uint age = 0; 229 InCSetState dest_state = next_state(state, old_mark, age); 230 // The second clause is to prevent premature evacuation failure in case there 231 // is still space in survivor, but old gen is full. 232 if (_old_gen_is_full && dest_state.is_old()) { 233 return handle_evacuation_failure_par(old, old_mark); 234 } 235 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz); 236 237 // PLAB allocations should succeed most of the time, so we'll 238 // normally check against NULL once and that's it. 239 if (obj_ptr == NULL) { 240 bool plab_refill_failed = false; 241 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed); 242 if (obj_ptr == NULL) { 243 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed); 244 if (obj_ptr == NULL) { 245 // This will either forward-to-self, or detect that someone else has 246 // installed a forwarding pointer. 247 return handle_evacuation_failure_par(old, old_mark); 248 } 249 } 250 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { 251 // The events are checked individually as part of the actual commit 252 report_promotion_event(dest_state, old, word_sz, age, obj_ptr); 253 } 254 } 255 256 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 257 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 258 259 #ifndef PRODUCT 260 // Should this evacuation fail? 261 if (_g1h->evacuation_should_fail()) { 262 // Doing this after all the allocation attempts also tests the 263 // undo_allocation() method too. 264 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); 265 return handle_evacuation_failure_par(old, old_mark); 266 } 267 #endif // !PRODUCT 268 269 // We're going to allocate linearly, so might as well prefetch ahead. 270 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 271 272 const oop obj = oop(obj_ptr); 273 const oop forward_ptr = old->forward_to_atomic(obj); 274 if (forward_ptr == NULL) { 275 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 276 277 if (dest_state.is_young()) { 278 if (age < markOopDesc::max_age) { 279 age++; 280 } 281 if (old_mark->has_displaced_mark_helper()) { 282 // In this case, we have to install the mark word first, 283 // otherwise obj looks to be forwarded (the old mark word, 284 // which contains the forward pointer, was copied) 285 obj->set_mark_raw(old_mark); 286 markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); 287 old_mark->set_displaced_mark_helper(new_mark); 288 } else { 289 obj->set_mark_raw(old_mark->set_age(age)); 290 } 291 _age_table.add(age, word_sz); 292 } else { 293 obj->set_mark_raw(old_mark); 294 } 295 296 if (G1StringDedup::is_enabled()) { 297 const bool is_from_young = state.is_young(); 298 const bool is_to_young = dest_state.is_young(); 299 assert(is_from_young == _g1h->heap_region_containing(old)->is_young(), 300 "sanity"); 301 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(), 302 "sanity"); 303 G1StringDedup::enqueue_from_evacuation(is_from_young, 304 is_to_young, 305 _worker_id, 306 obj); 307 } 308 309 _surviving_young_words[young_index] += word_sz; 310 311 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 312 // We keep track of the next start index in the length field of 313 // the to-space object. The actual length can be found in the 314 // length field of the from-space object. 315 arrayOop(obj)->set_length(0); 316 oop* old_p = set_partial_array_mask(old); 317 push_on_queue(old_p); 318 } else { 319 HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr); 320 _scanner.set_region(to_region); 321 obj->oop_iterate_backwards(&_scanner); 322 } 323 return obj; 324 } else { 325 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); 326 return forward_ptr; 327 } 328 } 329 330 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 331 assert(worker_id < _n_workers, "out of bounds access"); 332 if (_states[worker_id] == NULL) { 333 _states[worker_id] = new G1ParScanThreadState(_g1h, worker_id, _young_cset_length); 334 } 335 return _states[worker_id]; 336 } 337 338 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 339 assert(_flushed, "thread local state from the per thread states should have been flushed"); 340 return _surviving_young_words_total; 341 } 342 343 void G1ParScanThreadStateSet::flush() { 344 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 345 346 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 347 G1ParScanThreadState* pss = _states[worker_index]; 348 349 if (pss == NULL) { 350 continue; 351 } 352 353 pss->flush(_surviving_young_words_total); 354 delete pss; 355 _states[worker_index] = NULL; 356 } 357 _flushed = true; 358 } 359 360 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { 361 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 362 363 oop forward_ptr = old->forward_to_atomic(old); 364 if (forward_ptr == NULL) { 365 // Forward-to-self succeeded. We are the "owner" of the object. 366 HeapRegion* r = _g1h->heap_region_containing(old); 367 368 if (!r->evacuation_failed()) { 369 r->set_evacuation_failed(true); 370 _g1h->hr_printer()->evac_failure(r); 371 } 372 373 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 374 375 _scanner.set_region(r); 376 old->oop_iterate_backwards(&_scanner); 377 378 return old; 379 } else { 380 // Forward-to-self failed. Either someone else managed to allocate 381 // space for this object (old != forward_ptr) or they beat us in 382 // self-forwarding it (old == forward_ptr). 383 assert(old == forward_ptr || !_g1h->is_in_cset(forward_ptr), 384 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 385 "should not be in the CSet", 386 p2i(old), p2i(forward_ptr)); 387 return forward_ptr; 388 } 389 } 390 G1ParScanThreadStateSet::G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length) : 391 _g1h(g1h), 392 _states(NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC)), 393 _surviving_young_words_total(NEW_C_HEAP_ARRAY(size_t, young_cset_length, mtGC)), 394 _young_cset_length(young_cset_length), 395 _n_workers(n_workers), 396 _flushed(false) { 397 for (uint i = 0; i < n_workers; ++i) { 398 _states[i] = NULL; 399 } 400 memset(_surviving_young_words_total, 0, young_cset_length * sizeof(size_t)); 401 } 402 403 G1ParScanThreadStateSet::~G1ParScanThreadStateSet() { 404 assert(_flushed, "thread local state from the per thread states should have been flushed"); 405 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, _states); 406 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_total); 407 }