< prev index next >
src/hotspot/share/gc/g1/g1ParScanThreadState.cpp
Print this page
rev 60436 : imported patch allocate_copy_slow
*** 345,404 ****
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
dest_attr.type() == G1HeapRegionAttr::Old);
}
}
// Private inline function, for direct internal use and providing the
// implementation of the public not-inline function.
oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old,
markWord const old_mark) {
const size_t word_sz = old->size();
uint age = 0;
G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
- // The second clause is to prevent premature evacuation failure in case there
- // is still space in survivor, but old gen is full.
- if (_old_gen_is_full && dest_attr.is_old()) {
- return handle_evacuation_failure_par(old, old_mark);
- }
HeapRegion* const from_region = _g1h->heap_region_containing(old);
uint node_index = from_region->node_index();
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
! bool plab_refill_failed = false;
! obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_attr, word_sz, &plab_refill_failed, node_index);
! if (obj_ptr == NULL) {
! assert(region_attr.is_in_cset(), "Unexpected region attr type: %s", region_attr.get_type_str());
! obj_ptr = allocate_in_next_plab(&dest_attr, word_sz, plab_refill_failed, node_index);
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return handle_evacuation_failure_par(old, old_mark);
}
}
- update_numa_stats(node_index);
-
- if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
- // The events are checked individually as part of the actual commit
- report_promotion_event(dest_attr, old, word_sz, age, obj_ptr, node_index);
- }
- }
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
! _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
// We're going to allocate linearly, so might as well prefetch ahead.
--- 345,430 ----
_g1h->_gc_tracer_stw->report_promotion_outside_plab_event(old->klass(), word_sz * HeapWordSize, age,
dest_attr.type() == G1HeapRegionAttr::Old);
}
}
+ NOINLINE
+ HeapWord* G1ParScanThreadState::allocate_copy_slow(G1HeapRegionAttr* dest_attr,
+ oop old,
+ size_t word_sz,
+ uint age,
+ uint node_index) {
+ HeapWord* obj_ptr = NULL;
+ // Try slow-path allocation unless we're allocating old and old is already full.
+ if (!(dest_attr->is_old() && _old_gen_is_full)) {
+ bool plab_refill_failed = false;
+ obj_ptr = _plab_allocator->allocate_direct_or_new_plab(*dest_attr,
+ word_sz,
+ &plab_refill_failed,
+ node_index);
+ if (obj_ptr == NULL) {
+ obj_ptr = allocate_in_next_plab(dest_attr,
+ word_sz,
+ plab_refill_failed,
+ node_index);
+ }
+ }
+ if (obj_ptr != NULL) {
+ update_numa_stats(node_index);
+ if (_g1h->_gc_tracer_stw->should_report_promotion_events()) {
+ // The events are checked individually as part of the actual commit
+ report_promotion_event(*dest_attr, old, word_sz, age, obj_ptr, node_index);
+ }
+ }
+ return obj_ptr;
+ }
+
+ NOINLINE
+ void G1ParScanThreadState::undo_allocation(G1HeapRegionAttr dest_attr,
+ HeapWord* obj_ptr,
+ size_t word_sz,
+ uint node_index) {
+ _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
+ }
+
// Private inline function, for direct internal use and providing the
// implementation of the public not-inline function.
oop G1ParScanThreadState::do_copy_to_survivor_space(G1HeapRegionAttr const region_attr,
oop const old,
markWord const old_mark) {
+ assert(region_attr.is_in_cset(),
+ "Unexpected region attr type: %s", region_attr.get_type_str());
+
const size_t word_sz = old->size();
uint age = 0;
G1HeapRegionAttr dest_attr = next_region_attr(region_attr, old_mark, age);
HeapRegion* const from_region = _g1h->heap_region_containing(old);
uint node_index = from_region->node_index();
HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_attr, word_sz, node_index);
// PLAB allocations should succeed most of the time, so we'll
// normally check against NULL once and that's it.
if (obj_ptr == NULL) {
! obj_ptr = allocate_copy_slow(&dest_attr, old, word_sz, age, node_index);
if (obj_ptr == NULL) {
// This will either forward-to-self, or detect that someone else has
// installed a forwarding pointer.
return handle_evacuation_failure_par(old, old_mark);
}
}
assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap");
#ifndef PRODUCT
// Should this evacuation fail?
if (_g1h->evacuation_should_fail()) {
// Doing this after all the allocation attempts also tests the
// undo_allocation() method too.
! undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
return handle_evacuation_failure_par(old, old_mark);
}
#endif // !PRODUCT
// We're going to allocate linearly, so might as well prefetch ahead.
*** 407,420 ****
const oop obj = oop(obj_ptr);
const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
const uint young_index = from_region->young_index_in_cset();
-
assert((from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
if (dest_attr.is_young()) {
if (age < markWord::max_age) {
age++;
}
--- 433,448 ----
const oop obj = oop(obj_ptr);
const oop forward_ptr = old->forward_to_atomic(obj, old_mark, memory_order_relaxed);
if (forward_ptr == NULL) {
Copy::aligned_disjoint_words(cast_from_oop<HeapWord*>(old), obj_ptr, word_sz);
+ {
const uint young_index = from_region->young_index_in_cset();
assert((from_region->is_young() && young_index > 0) ||
(!from_region->is_young() && young_index == 0), "invariant" );
+ _surviving_young_words[young_index] += word_sz;
+ }
if (dest_attr.is_young()) {
if (age < markWord::max_age) {
age++;
}
*** 444,455 ****
is_to_young,
_worker_id,
obj);
}
- _surviving_young_words[young_index] += word_sz;
-
if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
// We keep track of the next start index in the length field of
// the to-space object. The actual length can be found in the
// length field of the from-space object.
arrayOop(obj)->set_length(0);
--- 472,481 ----
*** 526,535 ****
--- 552,562 ----
size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
_g1h->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanHR, worker_index, used_memory, G1GCPhaseTimes::ScanHRUsedMemory);
}
}
+ NOINLINE
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markWord m) {
assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
if (forward_ptr == NULL) {
< prev index next >