< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.cpp

Print this page

        

*** 151,172 **** } HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state, InCSetState* dest, size_t word_sz, - AllocationContext_t const context, bool previous_plab_refill_failed) { assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value()); assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value()); // Right now we only have two types of regions (young / old) so // let's keep the logic here simple. We can generalize it when necessary. if (dest->is_young()) { bool plab_refill_in_old_failed = false; HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old, word_sz, - context, &plab_refill_in_old_failed); // Make sure that we won't attempt to copy any other objects out // of a survivor region (given that apparently we cannot allocate // any new ones) to avoid coming into this slow path again and again. // Only consider failed PLAB refill here: failed inline allocations are --- 151,170 ----
*** 202,214 **** return dest(state); } void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, oop const old, size_t word_sz, uint age, ! HeapWord * const obj_ptr, ! const AllocationContext_t context) const { ! PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state, context); if (alloc_buf->contains(obj_ptr)) { _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, dest_state.value() == InCSetState::Old, alloc_buf->word_sz()); } else { --- 200,211 ---- return dest(state); } void G1ParScanThreadState::report_promotion_event(InCSetState const dest_state, oop const old, size_t word_sz, uint age, ! HeapWord * const obj_ptr) const { ! PLAB* alloc_buf = _plab_allocator->alloc_buffer(dest_state); if (alloc_buf->contains(obj_ptr)) { _g1h->_gc_tracer_stw->report_promotion_in_new_plab_event(old->klass(), word_sz, age, dest_state.value() == InCSetState::Old, alloc_buf->word_sz()); } else {
*** 224,260 **** HeapRegion* const from_region = _g1h->heap_region_containing(old); // +1 to make the -1 indexes valid... const int young_index = from_region->young_index_in_cset()+1; assert( (from_region->is_young() && young_index > 0) || (!from_region->is_young() && young_index == 0), "invariant" ); - const AllocationContext_t context = from_region->allocation_context(); uint age = 0; InCSetState dest_state = next_state(state, old_mark, age); // The second clause is to prevent premature evacuation failure in case there // is still space in survivor, but old gen is full. if (_old_gen_is_full && dest_state.is_old()) { return handle_evacuation_failure_par(old, old_mark); } ! HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); // PLAB allocations should succeed most of the time, so we'll // normally check against NULL once and that's it. if (obj_ptr == NULL) { bool plab_refill_failed = false; ! obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context, &plab_refill_failed); if (obj_ptr == NULL) { ! obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed); if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. return handle_evacuation_failure_par(old, old_mark); } } if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { // The events are checked individually as part of the actual commit ! report_promotion_event(dest_state, old, word_sz, age, obj_ptr, context); } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); --- 221,256 ---- HeapRegion* const from_region = _g1h->heap_region_containing(old); // +1 to make the -1 indexes valid... const int young_index = from_region->young_index_in_cset()+1; assert( (from_region->is_young() && young_index > 0) || (!from_region->is_young() && young_index == 0), "invariant" ); uint age = 0; InCSetState dest_state = next_state(state, old_mark, age); // The second clause is to prevent premature evacuation failure in case there // is still space in survivor, but old gen is full. if (_old_gen_is_full && dest_state.is_old()) { return handle_evacuation_failure_par(old, old_mark); } ! HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz); // PLAB allocations should succeed most of the time, so we'll // normally check against NULL once and that's it. if (obj_ptr == NULL) { bool plab_refill_failed = false; ! obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, &plab_refill_failed); if (obj_ptr == NULL) { ! obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, plab_refill_failed); if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. return handle_evacuation_failure_par(old, old_mark); } } if (_g1h->_gc_tracer_stw->should_report_promotion_events()) { // The events are checked individually as part of the actual commit ! report_promotion_event(dest_state, old, word_sz, age, obj_ptr); } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
*** 262,272 **** #ifndef PRODUCT // Should this evacuation fail? if (_g1h->evacuation_should_fail()) { // Doing this after all the allocation attempts also tests the // undo_allocation() method too. ! _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return handle_evacuation_failure_par(old, old_mark); } #endif // !PRODUCT // We're going to allocate linearly, so might as well prefetch ahead. --- 258,268 ---- #ifndef PRODUCT // Should this evacuation fail? if (_g1h->evacuation_should_fail()) { // Doing this after all the allocation attempts also tests the // undo_allocation() method too. ! _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); return handle_evacuation_failure_par(old, old_mark); } #endif // !PRODUCT // We're going to allocate linearly, so might as well prefetch ahead.
*** 323,333 **** _scanner.set_region(to_region); obj->oop_iterate_backwards(&_scanner); } return obj; } else { ! _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return forward_ptr; } } G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) { --- 319,329 ---- _scanner.set_region(to_region); obj->oop_iterate_backwards(&_scanner); } return obj; } else { ! _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz); return forward_ptr; } } G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
< prev index next >