1 /* 2 * Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc/g1/g1Allocator.inline.hpp" 27 #include "gc/g1/g1CollectedHeap.inline.hpp" 28 #include "gc/g1/g1EvacuationClosures.hpp" 29 #include "gc/g1/g1OopClosures.inline.hpp" 30 #include "gc/g1/g1ParScanThreadState.inline.hpp" 31 #include "gc/g1/g1StringDedup.hpp" 32 #include "gc/shared/taskqueue.inline.hpp" 33 #include "oops/oop.inline.hpp" 34 #include "runtime/prefetch.inline.hpp" 35 36 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length) 37 : _g1h(g1h), 38 _refs(g1h->task_queue(worker_id)), 39 _dcq(&g1h->dirty_card_queue_set()), 40 _ct_bs(g1h->g1_barrier_set()), 41 _closures(NULL), 42 _hash_seed(17), 43 _worker_id(worker_id), 44 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()), 45 _age_table(false), 46 _scanner(g1h), 47 _old_gen_is_full(false) 48 { 49 _scanner.set_par_scan_thread_state(this); 50 // we allocate G1YoungSurvRateNumRegions plus one entries, since 51 // we "sacrifice" entry 0 to keep track of surviving bytes for 52 // non-young regions (where the age is -1) 53 // We also add a few elements at the beginning and at the end in 54 // an attempt to eliminate cache contention 55 size_t real_length = 1 + young_cset_length; 56 size_t array_length = PADDING_ELEM_NUM + 57 real_length + 58 PADDING_ELEM_NUM; 59 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC); 60 if (_surviving_young_words_base == NULL) 61 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR, 62 "Not enough space for young surv histo."); 63 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM; 64 memset(_surviving_young_words, 0, real_length * sizeof(size_t)); 65 66 _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator()); 67 68 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet; 69 // The dest for Young is used when the objects are aged enough to 70 // need to be moved to the next space. 71 _dest[InCSetState::Young] = InCSetState::Old; 72 _dest[InCSetState::Old] = InCSetState::Old; 73 74 _closures = new_root_closures(); 75 } 76 77 G1EvacuationRootClosures* G1ParScanThreadState::new_root_closures() { 78 if (_g1h->collector_state()->during_initial_mark_pause()) { 79 if (ClassUnloadingWithConcurrentMark) { 80 return new G1InitalMarkClosures<G1MarkPromotedFromRoot>(_g1h, this); 81 } else { 82 return new G1InitalMarkClosures<G1MarkFromRoot>(_g1h, this); 83 } 84 } else { 85 return new G1EvacuationClosures(_g1h, this, _g1h->collector_state()->gcs_are_young()); 86 } 87 } 88 89 // Pass locally gathered statistics to global state. 90 void G1ParScanThreadState::flush(size_t* surviving_young_words) { 91 _dcq.flush(); 92 // Update allocation statistics. 93 _plab_allocator->flush_and_retire_stats(); 94 _g1h->g1_policy()->record_age_table(&_age_table); 95 96 uint length = _g1h->g1_policy()->young_cset_region_length(); 97 for (uint region_index = 0; region_index < length; region_index++) { 98 surviving_young_words[region_index] += _surviving_young_words[region_index]; 99 } 100 } 101 102 G1ParScanThreadState::~G1ParScanThreadState() { 103 delete _plab_allocator; 104 delete _closures; 105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 106 } 107 108 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) { 109 _plab_allocator->waste(wasted, undo_wasted); 110 } 111 112 #ifdef ASSERT 113 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 114 assert(ref != NULL, "invariant"); 115 assert(UseCompressedOops, "sanity"); 116 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref)); 117 oop p = oopDesc::load_decode_heap_oop(ref); 118 assert(_g1h->is_in_g1_reserved(p), 119 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 120 return true; 121 } 122 123 bool G1ParScanThreadState::verify_ref(oop* ref) const { 124 assert(ref != NULL, "invariant"); 125 if (has_partial_array_mask(ref)) { 126 // Must be in the collection set--it's already been copied. 127 oop p = clear_partial_array_mask(ref); 128 assert(_g1h->obj_in_cs(p), 129 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 130 } else { 131 oop p = oopDesc::load_decode_heap_oop(ref); 132 assert(_g1h->is_in_g1_reserved(p), 133 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 134 } 135 return true; 136 } 137 138 bool G1ParScanThreadState::verify_task(StarTask ref) const { 139 if (ref.is_narrow()) { 140 return verify_ref((narrowOop*) ref); 141 } else { 142 return verify_ref((oop*) ref); 143 } 144 } 145 #endif // ASSERT 146 147 void G1ParScanThreadState::trim_queue() { 148 StarTask ref; 149 do { 150 // Drain the overflow stack first, so other threads can steal. 151 while (_refs->pop_overflow(ref)) { 152 dispatch_reference(ref); 153 } 154 155 while (_refs->pop_local(ref)) { 156 dispatch_reference(ref); 157 } 158 } while (!_refs->is_empty()); 159 } 160 161 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 162 InCSetState* dest, 163 size_t word_sz, 164 AllocationContext_t const context, 165 bool previous_plab_refill_failed) { 166 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value()); 167 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 168 169 // Right now we only have two types of regions (young / old) so 170 // let's keep the logic here simple. We can generalize it when necessary. 171 if (dest->is_young()) { 172 bool plab_refill_in_old_failed = false; 173 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, 174 word_sz, 175 context, 176 &plab_refill_in_old_failed); 177 // Make sure that we won't attempt to copy any other objects out 178 // of a survivor region (given that apparently we cannot allocate 179 // any new ones) to avoid coming into this slow path again and again. 180 // Only consider failed PLAB refill here: failed inline allocations are 181 // typically large, so not indicative of remaining space. 182 if (previous_plab_refill_failed) { 183 _tenuring_threshold = 0; 184 } 185 186 if (obj_ptr != NULL) { 187 dest->set_old(); 188 } else { 189 // We just failed to allocate in old gen. The same idea as explained above 190 // for making survivor gen unavailable for allocation applies for old gen. 191 _old_gen_is_full = plab_refill_in_old_failed; 192 } 193 return obj_ptr; 194 } else { 195 _old_gen_is_full = previous_plab_refill_failed; 196 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 197 // no other space to try. 198 return NULL; 199 } 200 } 201 202 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 203 if (state.is_young()) { 204 age = !m->has_displaced_mark_helper() ? m->age() 205 : m->displaced_mark_helper()->age(); 206 if (age < _tenuring_threshold) { 207 return state; 208 } 209 } 210 return dest(state); 211 } 212 213 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, 214 oop const old, size_t word_sz, uint age, 215 HeapWord * const obj_ptr, 216 const AllocationContext_t context) const { 217 G1PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); 218 if (alloc_buf->contains(obj_ptr)) { 219 _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, 220 dest_state.value() == InCSetState::Old, 221 alloc_buf->word_sz()); 222 } else { 223 _g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz, age, 224 dest_state.value() == InCSetState::Old); 225 } 226 } 227 228 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, 229 oop const old, 230 markOop const old_mark) { 231 const size_t word_sz = old->size(); 232 HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); 233 // +1 to make the -1 indexes valid... 234 const int young_index = from_region->young_index_in_cset()+1; 235 assert( (from_region->is_young() && young_index > 0) || 236 (!from_region->is_young() && young_index == 0), "invariant" ); 237 const AllocationContext_t context = from_region->allocation_context(); 238 239 uint age = 0; 240 InCSetState dest_state = next_state(state, old_mark, age); 241 // The second clause is to prevent premature evacuation failure in case there 242 // is still space in survivor, but old gen is full. 243 if (_old_gen_is_full && dest_state.is_old()) { 244 return handle_evacuation_failure_par(old, old_mark); 245 } 246 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); 247 248 // PLAB allocations should succeed most of the time, so we'll 249 // normally check against NULL once and that's it. 250 if (obj_ptr == NULL) { 251 bool plab_refill_failed = false; 252 obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed); 253 if (obj_ptr == NULL) { 254 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed); 255 if (obj_ptr == NULL) { 256 // This will either forward-to-self, or detect that someone else has 257 // installed a forwarding pointer. 258 return handle_evacuation_failure_par(old, old_mark); 259 } 260 } 261 if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { 262 // The events are checked individually as part of the actual commit 263 report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context); 264 } 265 } 266 267 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); 268 assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); 269 270 #ifndef PRODUCT 271 // Should this evacuation fail? 272 if (_g1h->evacuation_should_fail()) { 273 // Doing this after all the allocation attempts also tests the 274 // undo_allocation() method too. 275 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 276 return handle_evacuation_failure_par(old, old_mark); 277 } 278 #endif // !PRODUCT 279 280 // We're going to allocate linearly, so might as well prefetch ahead. 281 Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); 282 283 const oop obj = oop(obj_ptr); 284 const oop forward_ptr = old->forward_to_atomic(obj); 285 if (forward_ptr == NULL) { 286 Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); 287 288 if (dest_state.is_young()) { 289 if (age < markOopDesc::max_age) { 290 age++; 291 } 292 if (old_mark->has_displaced_mark_helper()) { 293 // In this case, we have to install the mark word first, 294 // otherwise obj looks to be forwarded (the old mark word, 295 // which contains the forward pointer, was copied) 296 obj->set_mark(old_mark); 297 markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); 298 old_mark->set_displaced_mark_helper(new_mark); 299 } else { 300 obj->set_mark(old_mark->set_age(age)); 301 } 302 _age_table.add(age, word_sz); 303 } else { 304 obj->set_mark(old_mark); 305 } 306 307 if (G1StringDedup::is_enabled()) { 308 const bool is_from_young = state.is_young(); 309 const bool is_to_young = dest_state.is_young(); 310 assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), 311 "sanity"); 312 assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), 313 "sanity"); 314 G1StringDedup::enqueue_from_evacuation(is_from_young, 315 is_to_young, 316 _worker_id, 317 obj); 318 } 319 320 _surviving_young_words[young_index] += word_sz; 321 322 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { 323 // We keep track of the next start index in the length field of 324 // the to-space object. The actual length can be found in the 325 // length field of the from-space object. 326 arrayOop(obj)->set_length(0); 327 oop* old_p = set_partial_array_mask(old); 328 push_on_queue(old_p); 329 } else { 330 HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); 331 _scanner.set_region(to_region); 332 obj->oop_iterate_backwards(&_scanner); 333 } 334 return obj; 335 } else { 336 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); 337 return forward_ptr; 338 } 339 } 340 341 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { 342 assert(worker_id < _n_workers, "out of bounds access"); 343 return _states[worker_id]; 344 } 345 346 void G1ParScanThreadStateSet::add_cards_scanned(uint worker_id, size_t cards_scanned) { 347 assert(worker_id < _n_workers, "out of bounds access"); 348 _cards_scanned[worker_id] += cards_scanned; 349 } 350 351 size_t G1ParScanThreadStateSet::total_cards_scanned() const { 352 assert(_flushed, "thread local state from the per thread states should have been flushed"); 353 return _total_cards_scanned; 354 } 355 356 const size_t* G1ParScanThreadStateSet::surviving_young_words() const { 357 assert(_flushed, "thread local state from the per thread states should have been flushed"); 358 return _surviving_young_words_total; 359 } 360 361 void G1ParScanThreadStateSet::flush() { 362 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 363 assert(_total_cards_scanned == 0, "should have been cleared"); 364 365 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 366 G1ParScanThreadState* pss = _states[worker_index]; 367 368 _total_cards_scanned += _cards_scanned[worker_index]; 369 370 pss->flush(_surviving_young_words_total); 371 delete pss; 372 _states[worker_index] = NULL; 373 } 374 _flushed = true; 375 } 376 377 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { 378 assert(_g1h->obj_in_cs(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 379 380 oop forward_ptr = old->forward_to_atomic(old); 381 if (forward_ptr == NULL) { 382 // Forward-to-self succeeded. We are the "owner" of the object. 383 HeapRegion* r = _g1h->heap_region_containing(old); 384 385 if (!r->evacuation_failed()) { 386 r->set_evacuation_failed(true); 387 _g1h->hr_printer()->evac_failure(r); 388 } 389 390 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 391 392 _scanner.set_region(r); 393 old->oop_iterate_backwards(&_scanner); 394 395 return old; 396 } else { 397 // Forward-to-self failed. Either someone else managed to allocate 398 // space for this object (old != forward_ptr) or they beat us in 399 // self-forwarding it (old == forward_ptr). 400 assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), 401 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 402 "should not be in the CSet", 403 p2i(old), p2i(forward_ptr)); 404 return forward_ptr; 405 } 406 } 407