80 81 uint length = _g1h->g1_policy()->young_cset_region_length(); 82 for (uint region_index = 0; region_index < length; region_index++) { 83 surviving_young_words[region_index] += _surviving_young_words[region_index]; 84 } 85 } 86 87 G1ParScanThreadState::~G1ParScanThreadState() { 88 delete _plab_allocator; 89 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 90 } 91 92 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) { 93 _plab_allocator->waste(wasted, undo_wasted); 94 } 95 96 #ifdef ASSERT 97 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 98 assert(ref != NULL, "invariant"); 99 assert(UseCompressedOops, "sanity"); 100 assert(!has_partial_array_mask(ref), err_msg("ref=" PTR_FORMAT, p2i(ref))); 101 oop p = oopDesc::load_decode_heap_oop(ref); 102 assert(_g1h->is_in_g1_reserved(p), 103 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 104 return true; 105 } 106 107 bool G1ParScanThreadState::verify_ref(oop* ref) const { 108 assert(ref != NULL, "invariant"); 109 if (has_partial_array_mask(ref)) { 110 // Must be in the collection set--it's already been copied. 111 oop p = clear_partial_array_mask(ref); 112 assert(_g1h->obj_in_cs(p), 113 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 114 } else { 115 oop p = oopDesc::load_decode_heap_oop(ref); 116 assert(_g1h->is_in_g1_reserved(p), 117 err_msg("ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p))); 118 } 119 return true; 120 } 121 122 bool G1ParScanThreadState::verify_task(StarTask ref) const { 123 if (ref.is_narrow()) { 124 return verify_ref((narrowOop*) ref); 125 } else { 126 return verify_ref((oop*) ref); 127 } 128 } 129 #endif // ASSERT 130 131 void G1ParScanThreadState::trim_queue() { 132 StarTask ref; 133 do { 134 // Drain the overflow stack first, so other threads can steal. 135 while (_refs->pop_overflow(ref)) { 136 dispatch_reference(ref); 137 } 138 139 while (_refs->pop_local(ref)) { 140 dispatch_reference(ref); 141 } 142 } while (!_refs->is_empty()); 143 } 144 145 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 146 InCSetState* dest, 147 size_t word_sz, 148 AllocationContext_t const context, 149 bool previous_plab_refill_failed) { 150 assert(state.is_in_cset_or_humongous(), err_msg("Unexpected state: " CSETSTATE_FORMAT, state.value())); 151 assert(dest->is_in_cset_or_humongous(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 152 153 // Right now we only have two types of regions (young / old) so 154 // let's keep the logic here simple. We can generalize it when necessary. 155 if (dest->is_young()) { 156 bool plab_refill_in_old_failed = false; 157 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, 158 word_sz, 159 context, 160 &plab_refill_in_old_failed); 161 // Make sure that we won't attempt to copy any other objects out 162 // of a survivor region (given that apparently we cannot allocate 163 // any new ones) to avoid coming into this slow path again and again. 164 // Only consider failed PLAB refill here: failed inline allocations are 165 // typically large, so not indicative of remaining space. 166 if (previous_plab_refill_failed) { 167 _tenuring_threshold = 0; 168 } 169 170 if (obj_ptr != NULL) { 171 dest->set_old(); 172 } else { 173 // We just failed to allocate in old gen. The same idea as explained above 174 // for making survivor gen unavailable for allocation applies for old gen. 175 _old_gen_is_full = plab_refill_in_old_failed; 176 } 177 return obj_ptr; 178 } else { 179 _old_gen_is_full = previous_plab_refill_failed; 180 assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value())); 181 // no other space to try. 182 return NULL; 183 } 184 } 185 186 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 187 if (state.is_young()) { 188 age = !m->has_displaced_mark_helper() ? m->age() 189 : m->displaced_mark_helper()->age(); 190 if (age < _tenuring_threshold) { 191 return state; 192 } 193 } 194 return dest(state); 195 } 196 197 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, 198 oop const old, size_t word_sz, uint age, 199 HeapWord * const obj_ptr, 200 const AllocationContext_t context) const { 342 return _surviving_young_words_total; 343 } 344 345 void G1ParScanThreadStateSet::flush() { 346 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 347 assert(_total_cards_scanned == 0, "should have been cleared"); 348 349 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 350 G1ParScanThreadState* pss = _states[worker_index]; 351 352 _total_cards_scanned += _cards_scanned[worker_index]; 353 354 pss->flush(_surviving_young_words_total); 355 delete pss; 356 _states[worker_index] = NULL; 357 } 358 _flushed = true; 359 } 360 361 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { 362 assert(_g1h->obj_in_cs(old), 363 err_msg("Object " PTR_FORMAT " should be in the CSet", p2i(old))); 364 365 oop forward_ptr = old->forward_to_atomic(old); 366 if (forward_ptr == NULL) { 367 // Forward-to-self succeeded. We are the "owner" of the object. 368 HeapRegion* r = _g1h->heap_region_containing(old); 369 370 if (!r->evacuation_failed()) { 371 r->set_evacuation_failed(true); 372 _g1h->hr_printer()->evac_failure(r); 373 } 374 375 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 376 377 _scanner.set_region(r); 378 old->oop_iterate_backwards(&_scanner); 379 380 return old; 381 } else { 382 // Forward-to-self failed. Either someone else managed to allocate 383 // space for this object (old != forward_ptr) or they beat us in 384 // self-forwarding it (old == forward_ptr). 385 assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), 386 err_msg("Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 387 "should not be in the CSet", 388 p2i(old), p2i(forward_ptr))); 389 return forward_ptr; 390 } 391 } 392 | 80 81 uint length = _g1h->g1_policy()->young_cset_region_length(); 82 for (uint region_index = 0; region_index < length; region_index++) { 83 surviving_young_words[region_index] += _surviving_young_words[region_index]; 84 } 85 } 86 87 G1ParScanThreadState::~G1ParScanThreadState() { 88 delete _plab_allocator; 89 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base); 90 } 91 92 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) { 93 _plab_allocator->waste(wasted, undo_wasted); 94 } 95 96 #ifdef ASSERT 97 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const { 98 assert(ref != NULL, "invariant"); 99 assert(UseCompressedOops, "sanity"); 100 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref)); 101 oop p = oopDesc::load_decode_heap_oop(ref); 102 assert(_g1h->is_in_g1_reserved(p), 103 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 104 return true; 105 } 106 107 bool G1ParScanThreadState::verify_ref(oop* ref) const { 108 assert(ref != NULL, "invariant"); 109 if (has_partial_array_mask(ref)) { 110 // Must be in the collection set--it's already been copied. 111 oop p = clear_partial_array_mask(ref); 112 assert(_g1h->obj_in_cs(p), 113 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 114 } else { 115 oop p = oopDesc::load_decode_heap_oop(ref); 116 assert(_g1h->is_in_g1_reserved(p), 117 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p)); 118 } 119 return true; 120 } 121 122 bool G1ParScanThreadState::verify_task(StarTask ref) const { 123 if (ref.is_narrow()) { 124 return verify_ref((narrowOop*) ref); 125 } else { 126 return verify_ref((oop*) ref); 127 } 128 } 129 #endif // ASSERT 130 131 void G1ParScanThreadState::trim_queue() { 132 StarTask ref; 133 do { 134 // Drain the overflow stack first, so other threads can steal. 135 while (_refs->pop_overflow(ref)) { 136 dispatch_reference(ref); 137 } 138 139 while (_refs->pop_local(ref)) { 140 dispatch_reference(ref); 141 } 142 } while (!_refs->is_empty()); 143 } 144 145 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, 146 InCSetState* dest, 147 size_t word_sz, 148 AllocationContext_t const context, 149 bool previous_plab_refill_failed) { 150 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value()); 151 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 152 153 // Right now we only have two types of regions (young / old) so 154 // let's keep the logic here simple. We can generalize it when necessary. 155 if (dest->is_young()) { 156 bool plab_refill_in_old_failed = false; 157 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, 158 word_sz, 159 context, 160 &plab_refill_in_old_failed); 161 // Make sure that we won't attempt to copy any other objects out 162 // of a survivor region (given that apparently we cannot allocate 163 // any new ones) to avoid coming into this slow path again and again. 164 // Only consider failed PLAB refill here: failed inline allocations are 165 // typically large, so not indicative of remaining space. 166 if (previous_plab_refill_failed) { 167 _tenuring_threshold = 0; 168 } 169 170 if (obj_ptr != NULL) { 171 dest->set_old(); 172 } else { 173 // We just failed to allocate in old gen. The same idea as explained above 174 // for making survivor gen unavailable for allocation applies for old gen. 175 _old_gen_is_full = plab_refill_in_old_failed; 176 } 177 return obj_ptr; 178 } else { 179 _old_gen_is_full = previous_plab_refill_failed; 180 assert(dest->is_old(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); 181 // no other space to try. 182 return NULL; 183 } 184 } 185 186 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { 187 if (state.is_young()) { 188 age = !m->has_displaced_mark_helper() ? m->age() 189 : m->displaced_mark_helper()->age(); 190 if (age < _tenuring_threshold) { 191 return state; 192 } 193 } 194 return dest(state); 195 } 196 197 void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, 198 oop const old, size_t word_sz, uint age, 199 HeapWord * const obj_ptr, 200 const AllocationContext_t context) const { 342 return _surviving_young_words_total; 343 } 344 345 void G1ParScanThreadStateSet::flush() { 346 assert(!_flushed, "thread local state from the per thread states should be flushed once"); 347 assert(_total_cards_scanned == 0, "should have been cleared"); 348 349 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) { 350 G1ParScanThreadState* pss = _states[worker_index]; 351 352 _total_cards_scanned += _cards_scanned[worker_index]; 353 354 pss->flush(_surviving_young_words_total); 355 delete pss; 356 _states[worker_index] = NULL; 357 } 358 _flushed = true; 359 } 360 361 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { 362 assert(_g1h->obj_in_cs(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old)); 363 364 oop forward_ptr = old->forward_to_atomic(old); 365 if (forward_ptr == NULL) { 366 // Forward-to-self succeeded. We are the "owner" of the object. 367 HeapRegion* r = _g1h->heap_region_containing(old); 368 369 if (!r->evacuation_failed()) { 370 r->set_evacuation_failed(true); 371 _g1h->hr_printer()->evac_failure(r); 372 } 373 374 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); 375 376 _scanner.set_region(r); 377 old->oop_iterate_backwards(&_scanner); 378 379 return old; 380 } else { 381 // Forward-to-self failed. Either someone else managed to allocate 382 // space for this object (old != forward_ptr) or they beat us in 383 // self-forwarding it (old == forward_ptr). 384 assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), 385 "Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " 386 "should not be in the CSet", 387 p2i(old), p2i(forward_ptr)); 388 return forward_ptr; 389 } 390 } 391 |