src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
Print this page
rev 6670 : fast reclaim main patch
*** 1924,1933 ****
--- 1924,1935 ----
_full_collection(false),
_free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
_secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
_old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
_humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
+ _humongous_is_live(),
+ _has_humongous_reclaim_candidates(false),
_free_regions_coming(false),
_young_list(new YoungList(this)),
_gc_time_stamp(0),
_retained_old_gc_alloc_region(NULL),
_survivor_plab_stats(YoungPLABSize, PLABWeight),
*** 2080,2089 ****
--- 2082,2092 ----
heap_word_size(init_byte_size));
_g1h = this;
_in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
+ _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
// Create the ConcurrentMark data structure and thread.
// (Must do this late, so that "max_regions" is defined.)
_cm = new ConcurrentMark(this, heap_rs);
if (_cm == NULL || !_cm->completed_initialization()) {
*** 2175,2184 ****
--- 2178,2191 ----
if (G1StringDedup::is_enabled()) {
G1StringDedup::stop();
}
}
+ void G1CollectedHeap::clear_humongous_is_live_table() {
+ _humongous_is_live.clear();
+ }
+
size_t G1CollectedHeap::conservative_max_heap_alignment() {
return HeapRegion::max_region_size();
}
void G1CollectedHeap::ref_processing_init() {
*** 3683,3692 ****
--- 3690,3703 ----
if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
(total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
}
+
+ if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
+ clear_humongous_is_live_table();
+ }
}
void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
if (G1SummarizeRSetStats &&
*** 3762,3771 ****
--- 3773,3826 ----
size_t G1CollectedHeap::cards_scanned() {
return g1_rem_set()->cardsScanned();
}
+ bool G1CollectedHeap::humongous_region_is_always_live(HeapRegion* region) {
+ assert(region->startsHumongous(), "Must start a humongous object");
+ return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
+ }
+
+ class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
+ private:
+ size_t _total_humongous;
+ size_t _candidate_humongous;
+ public:
+ RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
+ }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ if (!r->startsHumongous()) {
+ return false;
+ }
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+ bool is_candidate = !g1h->humongous_region_is_always_live(r);
+ if (is_candidate) {
+ // Do not even try to reclaim a humongous object that we already know will
+ // not be treated as live later. A young collection will not decrease the
+ // amount of remembered set entries for that region.
+ g1h->register_humongous_region_with_in_cset_fast_test(r->hrs_index());
+ _candidate_humongous++;
+ }
+ _total_humongous++;
+
+ return false;
+ }
+
+ size_t total_humongous() const { return _total_humongous; }
+ size_t candidate_humongous() const { return _candidate_humongous; }
+ };
+
+ void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
+ RegisterHumongousWithInCSetFastTestClosure cl;
+ heap_region_iterate(&cl);
+ g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
+ cl.candidate_humongous());
+ _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
+ }
+
void
G1CollectedHeap::setup_surviving_young_words() {
assert(_surviving_young_words == NULL, "pre-condition");
uint array_length = g1_policy()->young_cset_region_length();
_surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
*** 4049,4058 ****
--- 4104,4117 ----
g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
#endif // YOUNG_LIST_VERBOSE
g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
+ if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
+ register_humongous_regions_with_in_cset_fast_test();
+ }
+
_cm->note_start_of_gc();
// We should not verify the per-thread SATB buffers given that
// we have not filtered them yet (we'll do so during the
// GC). We also call this after finalize_cset() to
// ensure that the CSet has been finalized.
*** 4099,4108 ****
--- 4158,4170 ----
false /* verify_enqueued_buffers */,
true /* verify_thread_buffers */,
true /* verify_fingers */);
free_collection_set(g1_policy()->collection_set(), evacuation_info);
+ if (G1ReclaimDeadHumongousObjectsAtYoungGC && _has_humongous_reclaim_candidates) {
+ eagerly_reclaim_humongous_regions();
+ }
g1_policy()->clear_collection_set();
cleanup_surviving_young_words();
// Start a new incremental collection set for the next pause.
*** 4598,4627 ****
}
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
! if (_g1->in_cset_fast_test(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(obj);
}
! assert(forwardee != NULL, "forwardee should not be NULL");
oopDesc::encode_store_heap_oop(p, forwardee);
if (do_mark_object != G1MarkNone && forwardee != obj) {
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object(obj, forwardee);
}
if (barrier == G1BarrierKlass) {
do_klass_barrier(p, forwardee);
}
! } else {
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause then attempt to mark the object.
if (do_mark_object == G1MarkFromRoot) {
mark_object(obj);
}
--- 4660,4693 ----
}
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
assert(_worker_id == _par_scan_state->queue_num(), "sanity");
+ bool needs_marking = true;
! if (_g1->is_in_cset_or_humongous(obj)) {
oop forwardee;
if (obj->is_forwarded()) {
forwardee = obj->forwardee();
} else {
forwardee = _par_scan_state->copy_to_survivor_space(obj);
}
! if (forwardee != NULL) {
oopDesc::encode_store_heap_oop(p, forwardee);
if (do_mark_object != G1MarkNone && forwardee != obj) {
// If the object is self-forwarded we don't need to explicitly
// mark it, the evacuation failure protocol will do so.
mark_forwarded_object(obj, forwardee);
}
if (barrier == G1BarrierKlass) {
do_klass_barrier(p, forwardee);
}
! needs_marking = false;
! }
! }
! if (needs_marking) {
// The object is not in collection set. If we're a root scanning
// closure during an initial mark pause then attempt to mark the object.
if (do_mark_object == G1MarkFromRoot) {
mark_object(obj);
}
*** 5441,5456 ****
class G1KeepAliveClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
! void do_oop( oop* p) {
oop obj = *p;
! if (_g1->obj_in_cs(obj)) {
assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
}
}
};
// Copying Keep Alive closure - can be called from both
--- 5507,5528 ----
class G1KeepAliveClosure: public OopClosure {
G1CollectedHeap* _g1;
public:
G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
! void do_oop(oop* p) {
oop obj = *p;
! if (obj == NULL || !_g1->is_in_cset_or_humongous(obj)) {
! return;
! }
! if (_g1->is_in_cset(obj)) {
assert( obj->is_forwarded(), "invariant" );
*p = obj->forwardee();
+ } else {
+ assert(!obj->is_forwarded(), "invariant" );
+ _g1->set_humongous_is_live(obj);
}
}
};
// Copying Keep Alive closure - can be called from both
*** 5476,5486 ****
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
! if (_g1h->obj_in_cs(obj)) {
// If the referent object has been forwarded (either copied
// to a new location or to itself in the event of an
// evacuation failure) then we need to update the reference
// field and, if both reference and referent are in the G1
// heap, update the RSet for the referent.
--- 5548,5558 ----
virtual void do_oop( oop* p) { do_oop_work(p); }
template <class T> void do_oop_work(T* p) {
oop obj = oopDesc::load_decode_heap_oop(p);
! if (_g1h->is_in_cset_or_humongous(obj)) {
// If the referent object has been forwarded (either copied
// to a new location or to itself in the event of an
// evacuation failure) then we need to update the reference
// field and, if both reference and referent are in the G1
// heap, update the RSet for the referent.
*** 6426,6435 ****
--- 6498,6644 ----
decrement_summary_bytes(pre_used);
policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
}
+ class G1FreeHumongousRegionClosure : public HeapRegionClosure {
+ private:
+ FreeRegionList* _free_region_list;
+ HeapRegionSet* _proxy_set;
+ HeapRegionSetCount _humongous_regions_removed;
+ size_t _freed_bytes;
+ public:
+
+ G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
+ _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
+ }
+
+ virtual bool doHeapRegion(HeapRegion* r) {
+ if (!r->startsHumongous()) {
+ return false;
+ }
+
+ G1CollectedHeap* g1h = G1CollectedHeap::heap();
+
+ // The following checks whether the humongous object is live are sufficient.
+ // The main additional check (in addition to having a reference from the roots
+ // or the young gen) is whether the humongous object has a remembered set entry.
+ //
+ // A humongous object cannot be live if there is no remembered set for it
+ // because:
+ // - there can be no references from within humongous starts regions referencing
+ // the object because we never allocate other objects into them.
+ // (I.e. there are no intra-region references that may be missed by the
+ // remembered set)
+ // - as soon there is a remembered set entry to the humongous starts region
+ // (i.e. it has "escaped" to an old object) this remembered set entry will stay
+ // until the end of a concurrent mark.
+ //
+ // It is not required to check whether the object has been found dead by marking
+ // or not, in fact it would prevent reclamation within a concurrent cycle, as
+ // all objects allocated during that time are considered live.
+ // SATB marking is even more conservative than the remembered set.
+ // So if at this point in the collection there is no remembered set entry,
+ // nobody has a reference to it.
+ // At the start of collection we flush all refinement logs, and remembered sets
+ // are completely up-to-date wrt to references to the humongous object.
+ //
+ // Other implementation considerations:
+ // - never consider object arrays: while they are a valid target, they have not
+ // been observed to be used as temporary objects.
+ // - they would also pose considerable effort for cleaning up the the remembered
+ // sets.
+ // While this cleanup is not strictly necessary to be done (or done instantly),
+ // given that their occurrence is very low, this saves us this additional
+ // complexity.
+ if (g1h->humongous_is_live(r->hrs_index()) ||
+ g1h->humongous_region_is_always_live(r)) {
+
+ if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+ gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
+ r->isHumongous(),
+ r->hrs_index(),
+ r->rem_set()->occupied(),
+ r->rem_set()->strong_code_roots_list_length(),
+ g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
+ g1h->humongous_is_live(r->hrs_index()),
+ oop(r->bottom())->is_objArray()
+ );
+ }
+
+ return false;
+ }
+
+ guarantee(!((oop)(r->bottom()))->is_objArray(),
+ err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
+ r->bottom()));
+
+ if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
+ gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
+ r->isHumongous(),
+ r->bottom(),
+ r->hrs_index(),
+ r->region_num(),
+ r->rem_set()->occupied(),
+ r->rem_set()->strong_code_roots_list_length(),
+ g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
+ g1h->humongous_is_live(r->hrs_index()),
+ oop(r->bottom())->is_objArray()
+ );
+ }
+ _freed_bytes += r->used();
+ r->set_containing_set(NULL);
+ _humongous_regions_removed.increment(1u, r->capacity());
+ g1h->free_humongous_region(r, _free_region_list, false);
+
+ return false;
+ }
+
+ HeapRegionSetCount& humongous_free_count() {
+ return _humongous_regions_removed;
+ }
+
+ size_t bytes_freed() const {
+ return _freed_bytes;
+ }
+
+ size_t humongous_reclaimed() const {
+ return _humongous_regions_removed.length();
+ }
+ };
+
+ void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
+ assert_at_safepoint(true);
+ guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Feature must be enabled");
+ guarantee(_has_humongous_reclaim_candidates, "Should not reach here if no candidates for eager reclaim were found.");
+
+ double start_time = os::elapsedTime();
+
+ FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
+
+ G1FreeHumongousRegionClosure cl(&local_cleanup_list);
+ heap_region_iterate(&cl);
+
+ HeapRegionSetCount empty_set;
+ remove_from_old_sets(empty_set, cl.humongous_free_count());
+
+ G1HRPrinter* hr_printer = _g1h->hr_printer();
+ if (hr_printer->is_active()) {
+ FreeRegionListIterator iter(&local_cleanup_list);
+ while (iter.more_available()) {
+ HeapRegion* hr = iter.get_next();
+ hr_printer->cleanup(hr);
+ }
+ }
+
+ prepend_to_freelist(&local_cleanup_list);
+ decrement_summary_bytes(cl.bytes_freed());
+
+ g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
+ cl.humongous_reclaimed());
+ }
+
// This routine is similar to the above but does not record
// any policy statistics or update free lists; we are abandoning
// the current incremental collection set in preparation of a
// full collection. After the full GC we will start to build up
// the incremental collection set again.