1729
1730 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1731 CollectedHeap(),
1732 _g1_policy(policy_),
1733 _dirty_card_queue_set(false),
1734 _into_cset_dirty_card_queue_set(false),
1735 _is_alive_closure_cm(this),
1736 _is_alive_closure_stw(this),
1737 _ref_processor_cm(NULL),
1738 _ref_processor_stw(NULL),
1739 _bot_shared(NULL),
1740 _evac_failure_scan_stack(NULL),
1741 _mark_in_progress(false),
1742 _cg1r(NULL),
1743 _g1mm(NULL),
1744 _refine_cte_cl(NULL),
1745 _full_collection(false),
1746 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1747 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1748 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1749 _humongous_is_live(),
1750 _has_humongous_reclaim_candidates(false),
1751 _free_regions_coming(false),
1752 _young_list(new YoungList(this)),
1753 _gc_time_stamp(0),
1754 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1755 _old_plab_stats(OldPLABSize, PLABWeight),
1756 _expand_heap_after_alloc_failure(true),
1757 _surviving_young_words(NULL),
1758 _old_marking_cycles_started(0),
1759 _old_marking_cycles_completed(0),
1760 _concurrent_cycle_started(false),
1761 _heap_summary_sent(false),
1762 _in_cset_fast_test(),
1763 _dirty_cards_region_list(NULL),
1764 _worker_cset_start_region(NULL),
1765 _worker_cset_start_region_time_stamp(NULL),
1766 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1767 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1768 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1769 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1922 g1_barrier_set()->initialize(cardtable_storage);
1923 // Do later initialization work for concurrent refinement.
1924 _cg1r->init(card_counts_storage);
1925
1926 // 6843694 - ensure that the maximum region index can fit
1927 // in the remembered set structures.
1928 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1929 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1930
1931 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1932 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1933 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1934 "too many cards per region");
1935
1936 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1937
1938 _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
1939
1940 _g1h = this;
1941
1942 _in_cset_fast_test.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1943 _humongous_is_live.initialize(_hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1944
1945 // Create the ConcurrentMark data structure and thread.
1946 // (Must do this late, so that "max_regions" is defined.)
1947 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1948 if (_cm == NULL || !_cm->completed_initialization()) {
1949 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
1950 return JNI_ENOMEM;
1951 }
1952 _cmThread = _cm->cmThread();
1953
1954 // Initialize the from_card cache structure of HeapRegionRemSet.
1955 HeapRegionRemSet::init_heap(max_regions());
1956
1957 // Now expand into the initial heap size.
1958 if (!expand(init_byte_size)) {
1959 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1960 return JNI_ENOMEM;
1961 }
1962
1963 // Perform any initialization actions delegated to the policy.
2011 // Do create of the monitoring and management support so that
2012 // values in the heap have been properly initialized.
2013 _g1mm = new G1MonitoringSupport(this);
2014
2015 G1StringDedup::initialize();
2016
2017 return JNI_OK;
2018 }
2019
2020 void G1CollectedHeap::stop() {
2021 // Stop all concurrent threads. We do this to make sure these threads
2022 // do not continue to execute and access resources (e.g. gclog_or_tty)
2023 // that are destroyed during shutdown.
2024 _cg1r->stop();
2025 _cmThread->stop();
2026 if (G1StringDedup::is_enabled()) {
2027 G1StringDedup::stop();
2028 }
2029 }
2030
2031 void G1CollectedHeap::clear_humongous_is_live_table() {
2032 guarantee(G1EagerReclaimHumongousObjects, "Should only be called if true");
2033 _humongous_is_live.clear();
2034 }
2035
2036 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2037 return HeapRegion::max_region_size();
2038 }
2039
2040 void G1CollectedHeap::post_initialize() {
2041 CollectedHeap::post_initialize();
2042 ref_processing_init();
2043 }
2044
2045 void G1CollectedHeap::ref_processing_init() {
2046 // Reference processing in G1 currently works as follows:
2047 //
2048 // * There are two reference processor instances. One is
2049 // used to record and process discovered references
2050 // during concurrent marking; the other is used to
2051 // record and process references during STW pauses
2052 // (both full and incremental).
2053 // * Both ref processors need to 'span' the entire heap as
2054 // the regions in the collection set may be dotted around.
2055 //
3396 JavaThread *curr = Threads::first();
3397 while (curr != NULL) {
3398 DirtyCardQueue& dcq = curr->dirty_card_queue();
3399 extra_cards += dcq.size();
3400 curr = curr->next();
3401 }
3402 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3403 size_t buffer_size = dcqs.buffer_size();
3404 size_t buffer_num = dcqs.completed_buffers_num();
3405
3406 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3407 // in bytes - not the number of 'entries'. We need to convert
3408 // into a number of cards.
3409 return (buffer_size * buffer_num + extra_cards) / oopSize;
3410 }
3411
3412 size_t G1CollectedHeap::cards_scanned() {
3413 return g1_rem_set()->cardsScanned();
3414 }
3415
3416 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3417 HeapRegion* region = region_at(index);
3418 assert(region->is_starts_humongous(), "Must start a humongous object");
3419 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3420 }
3421
3422 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3423 private:
3424 size_t _total_humongous;
3425 size_t _candidate_humongous;
3426
3427 DirtyCardQueue _dcq;
3428
3429 bool humongous_region_is_candidate(uint index) {
3430 HeapRegion* region = G1CollectedHeap::heap()->region_at(index);
3431 assert(region->is_starts_humongous(), "Must start a humongous object");
3432 HeapRegionRemSet* const rset = region->rem_set();
3433 bool const allow_stale_refs = G1EagerReclaimHumongousObjectsWithStaleRefs;
3434 return !oop(region->bottom())->is_objArray() &&
3435 ((allow_stale_refs && rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)) ||
3436 (!allow_stale_refs && rset->is_empty()));
3437 }
3438
3439 public:
3440 RegisterHumongousWithInCSetFastTestClosure()
3441 : _total_humongous(0),
3442 _candidate_humongous(0),
3443 _dcq(&JavaThread::dirty_card_queue_set()) {
3444 }
3445
3446 virtual bool doHeapRegion(HeapRegion* r) {
3447 if (!r->is_starts_humongous()) {
3448 return false;
3449 }
3450 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3451
3452 uint region_idx = r->hrm_index();
3453 bool is_candidate = humongous_region_is_candidate(region_idx);
3454 // Is_candidate already filters out humongous object with large remembered sets.
3455 // If we have a humongous object with a few remembered sets, we simply flush these
3456 // remembered set entries into the DCQS. That will result in automatic
3457 // re-evaluation of their remembered set entries during the following evacuation
3458 // phase.
3459 if (is_candidate) {
3460 if (!r->rem_set()->is_empty()) {
3461 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3462 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3463 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3464 HeapRegionRemSetIterator hrrs(r->rem_set());
3465 size_t card_index;
3466 while (hrrs.has_next(card_index)) {
3467 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3468 // The remembered set might contain references to already freed
3469 // regions. Filter out such entries to avoid failing card table
3470 // verification.
3471 if (!g1h->heap_region_containing(bs->addr_for(card_ptr))->is_free()) {
3472 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3473 *card_ptr = CardTableModRefBS::dirty_card_val();
3474 _dcq.enqueue(card_ptr);
3475 }
3476 }
3477 }
3478 r->rem_set()->clear_locked();
3479 }
3480 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3481 g1h->register_humongous_region_with_cset(region_idx);
3482 _candidate_humongous++;
3483 }
3484 _total_humongous++;
3485
3486 return false;
3487 }
3488
3489 size_t total_humongous() const { return _total_humongous; }
3490 size_t candidate_humongous() const { return _candidate_humongous; }
3491
3492 void flush_rem_set_entries() { _dcq.flush(); }
3493 };
3494
3495 void G1CollectedHeap::register_humongous_regions_with_cset() {
3496 if (!G1EagerReclaimHumongousObjects) {
3497 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3498 return;
3499 }
3500 double time = os::elapsed_counter();
3501
3502 RegisterHumongousWithInCSetFastTestClosure cl;
3503 heap_region_iterate(&cl);
3504
3505 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3506 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3507 cl.total_humongous(),
3508 cl.candidate_humongous());
3509 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3510
3511 if (_has_humongous_reclaim_candidates || G1TraceEagerReclaimHumongousObjects) {
3512 clear_humongous_is_live_table();
3513 }
3514
3515 // Finally flush all remembered set entries to re-check into the global DCQS.
3516 cl.flush_rem_set_entries();
3517 }
3518
3519 void
3520 G1CollectedHeap::setup_surviving_young_words() {
3521 assert(_surviving_young_words == NULL, "pre-condition");
3522 uint array_length = g1_policy()->young_cset_region_length();
3523 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3524 if (_surviving_young_words == NULL) {
3525 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3526 "Not enough space for young surv words summary.");
3527 }
3528 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3529 #ifdef ASSERT
3530 for (uint i = 0; i < array_length; ++i) {
3531 assert( _surviving_young_words[i] == 0, "memset above" );
3532 }
3533 #endif // !ASSERT
3534 }
5956 // remembered set)
5957 // - as soon there is a remembered set entry to the humongous starts region
5958 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5959 // until the end of a concurrent mark.
5960 //
5961 // It is not required to check whether the object has been found dead by marking
5962 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5963 // all objects allocated during that time are considered live.
5964 // SATB marking is even more conservative than the remembered set.
5965 // So if at this point in the collection there is no remembered set entry,
5966 // nobody has a reference to it.
5967 // At the start of collection we flush all refinement logs, and remembered sets
5968 // are completely up-to-date wrt to references to the humongous object.
5969 //
5970 // Other implementation considerations:
5971 // - never consider object arrays at this time because they would pose
5972 // considerable effort for cleaning up the the remembered sets. This is
5973 // required because stale remembered sets might reference locations that
5974 // are currently allocated into.
5975 uint region_idx = r->hrm_index();
5976 if (g1h->humongous_is_live(region_idx) ||
5977 g1h->humongous_region_is_always_live(region_idx)) {
5978
5979 if (G1TraceEagerReclaimHumongousObjects) {
5980 gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
5981 region_idx,
5982 obj->size()*HeapWordSize,
5983 r->bottom(),
5984 r->region_num(),
5985 r->rem_set()->occupied(),
5986 r->rem_set()->strong_code_roots_list_length(),
5987 next_bitmap->isMarked(r->bottom()),
5988 g1h->humongous_is_live(region_idx),
5989 obj->is_objArray()
5990 );
5991 }
5992
5993 return false;
5994 }
5995
5996 guarantee(!obj->is_objArray(),
5997 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
5998 r->bottom()));
5999
6000 if (G1TraceEagerReclaimHumongousObjects) {
6001 gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d live-other %d obj array %d",
6002 region_idx,
6003 obj->size()*HeapWordSize,
6004 r->bottom(),
6005 r->region_num(),
6006 r->rem_set()->occupied(),
6007 r->rem_set()->strong_code_roots_list_length(),
6008 next_bitmap->isMarked(r->bottom()),
6009 g1h->humongous_is_live(region_idx),
6010 obj->is_objArray()
6011 );
6012 }
6013 // Need to clear mark bit of the humongous object if already set.
6014 if (next_bitmap->isMarked(r->bottom())) {
6015 next_bitmap->clear(r->bottom());
6016 }
6017 _freed_bytes += r->used();
6018 r->set_containing_set(NULL);
6019 _humongous_regions_removed.increment(1u, r->capacity());
6020 g1h->free_humongous_region(r, _free_region_list, false);
6021
6022 return false;
6023 }
6024
6025 HeapRegionSetCount& humongous_free_count() {
6026 return _humongous_regions_removed;
6027 }
6028
6029 size_t bytes_freed() const {
6030 return _freed_bytes;
|
1729
1730 G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
1731 CollectedHeap(),
1732 _g1_policy(policy_),
1733 _dirty_card_queue_set(false),
1734 _into_cset_dirty_card_queue_set(false),
1735 _is_alive_closure_cm(this),
1736 _is_alive_closure_stw(this),
1737 _ref_processor_cm(NULL),
1738 _ref_processor_stw(NULL),
1739 _bot_shared(NULL),
1740 _evac_failure_scan_stack(NULL),
1741 _mark_in_progress(false),
1742 _cg1r(NULL),
1743 _g1mm(NULL),
1744 _refine_cte_cl(NULL),
1745 _full_collection(false),
1746 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1747 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1748 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1749 _humongous_reclaim_candidates(),
1750 _has_humongous_reclaim_candidates(false),
1751 _free_regions_coming(false),
1752 _young_list(new YoungList(this)),
1753 _gc_time_stamp(0),
1754 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1755 _old_plab_stats(OldPLABSize, PLABWeight),
1756 _expand_heap_after_alloc_failure(true),
1757 _surviving_young_words(NULL),
1758 _old_marking_cycles_started(0),
1759 _old_marking_cycles_completed(0),
1760 _concurrent_cycle_started(false),
1761 _heap_summary_sent(false),
1762 _in_cset_fast_test(),
1763 _dirty_cards_region_list(NULL),
1764 _worker_cset_start_region(NULL),
1765 _worker_cset_start_region_time_stamp(NULL),
1766 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1767 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1768 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1769 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1922 g1_barrier_set()->initialize(cardtable_storage);
1923 // Do later initialization work for concurrent refinement.
1924 _cg1r->init(card_counts_storage);
1925
1926 // 6843694 - ensure that the maximum region index can fit
1927 // in the remembered set structures.
1928 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
1929 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
1930
1931 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
1932 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
1933 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
1934 "too many cards per region");
1935
1936 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
1937
1938 _bot_shared = new G1BlockOffsetSharedArray(reserved_region(), bot_storage);
1939
1940 _g1h = this;
1941
1942 _in_cset_fast_test.initialize(
1943 _hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1944 _humongous_reclaim_candidates.initialize(
1945 _hrm.reserved().start(), _hrm.reserved().end(), HeapRegion::GrainBytes);
1946
1947 // Create the ConcurrentMark data structure and thread.
1948 // (Must do this late, so that "max_regions" is defined.)
1949 _cm = new ConcurrentMark(this, prev_bitmap_storage, next_bitmap_storage);
1950 if (_cm == NULL || !_cm->completed_initialization()) {
1951 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
1952 return JNI_ENOMEM;
1953 }
1954 _cmThread = _cm->cmThread();
1955
1956 // Initialize the from_card cache structure of HeapRegionRemSet.
1957 HeapRegionRemSet::init_heap(max_regions());
1958
1959 // Now expand into the initial heap size.
1960 if (!expand(init_byte_size)) {
1961 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1962 return JNI_ENOMEM;
1963 }
1964
1965 // Perform any initialization actions delegated to the policy.
2013 // Do create of the monitoring and management support so that
2014 // values in the heap have been properly initialized.
2015 _g1mm = new G1MonitoringSupport(this);
2016
2017 G1StringDedup::initialize();
2018
2019 return JNI_OK;
2020 }
2021
2022 void G1CollectedHeap::stop() {
2023 // Stop all concurrent threads. We do this to make sure these threads
2024 // do not continue to execute and access resources (e.g. gclog_or_tty)
2025 // that are destroyed during shutdown.
2026 _cg1r->stop();
2027 _cmThread->stop();
2028 if (G1StringDedup::is_enabled()) {
2029 G1StringDedup::stop();
2030 }
2031 }
2032
2033 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2034 return HeapRegion::max_region_size();
2035 }
2036
2037 void G1CollectedHeap::post_initialize() {
2038 CollectedHeap::post_initialize();
2039 ref_processing_init();
2040 }
2041
2042 void G1CollectedHeap::ref_processing_init() {
2043 // Reference processing in G1 currently works as follows:
2044 //
2045 // * There are two reference processor instances. One is
2046 // used to record and process discovered references
2047 // during concurrent marking; the other is used to
2048 // record and process references during STW pauses
2049 // (both full and incremental).
2050 // * Both ref processors need to 'span' the entire heap as
2051 // the regions in the collection set may be dotted around.
2052 //
3393 JavaThread *curr = Threads::first();
3394 while (curr != NULL) {
3395 DirtyCardQueue& dcq = curr->dirty_card_queue();
3396 extra_cards += dcq.size();
3397 curr = curr->next();
3398 }
3399 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3400 size_t buffer_size = dcqs.buffer_size();
3401 size_t buffer_num = dcqs.completed_buffers_num();
3402
3403 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3404 // in bytes - not the number of 'entries'. We need to convert
3405 // into a number of cards.
3406 return (buffer_size * buffer_num + extra_cards) / oopSize;
3407 }
3408
3409 size_t G1CollectedHeap::cards_scanned() {
3410 return g1_rem_set()->cardsScanned();
3411 }
3412
3413 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3414 private:
3415 size_t _total_humongous;
3416 size_t _candidate_humongous;
3417
3418 DirtyCardQueue _dcq;
3419
3420 // We don't nominate objects with many remembered set entries, on
3421 // the assumption that such objects are likely still live.
3422 bool is_remset_small(HeapRegion* region) const {
3423 HeapRegionRemSet* const rset = region->rem_set();
3424 return G1EagerReclaimHumongousObjectsWithStaleRefs
3425 ? rset->occupancy_less_or_equal_than(G1RSetSparseRegionEntries)
3426 : rset->is_empty();
3427 }
3428
3429 bool is_typeArray_region(HeapRegion* region) const {
3430 return oop(region->bottom())->is_typeArray();
3431 }
3432
3433 bool humongous_region_is_candidate(G1CollectedHeap* heap, HeapRegion* region) const {
3434 assert(region->is_starts_humongous(), "Must start a humongous object");
3435
3436 // In order to maintain SATB invariants, during concurrent mark
3437 // we should only nominate an object containing references if it
3438 // was allocated after the start of marking, as such an object
3439 // doesn't need to have its references scanned.
3440 //
3441 // Also, we must not reclaim an object that is in the concurrent
3442 // mark stack. Objects allocated since the start of marking are
3443 // never added to the mark stack.
3444 //
3445 // However, we presently only nominate is_typeArray() objects.
3446 // A humongous object containing references induces remembered
3447 // set entries on other regions. In order to reclaim such an
3448 // object, those remembered sets would need to be cleaned up.
3449 //
3450 // We also treat is_typeArray() objects specially, allowing them
3451 // to be reclaimed even if allocated before the start of
3452 // concurrent mark. For this we rely on mark stack insertion to
3453 // exclude is_typeArray() objects, preventing reclaiming an object
3454 // that is in the mark stack. Frequent allocation and drop of
3455 // large binary blobs is an important use case for eager reclaim,
3456 // and this special handling may reduce needed headroom.
3457
3458 return is_typeArray_region(region) && is_remset_small(region);
3459 }
3460
3461 public:
3462 RegisterHumongousWithInCSetFastTestClosure()
3463 : _total_humongous(0),
3464 _candidate_humongous(0),
3465 _dcq(&JavaThread::dirty_card_queue_set()) {
3466 }
3467
3468 virtual bool doHeapRegion(HeapRegion* r) {
3469 if (!r->is_starts_humongous()) {
3470 return false;
3471 }
3472 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3473
3474 if (!humongous_region_is_candidate(g1h, r)) {
3475 g1h->remove_humongous_reclaim_candidate(r->hrm_index());
3476 } else {
3477 // Is_candidate already filters out humongous object with large remembered sets.
3478 // If we have a humongous object with a few remembered sets, we simply flush these
3479 // remembered set entries into the DCQS. That will result in automatic
3480 // re-evaluation of their remembered set entries during the following evacuation
3481 // phase.
3482 if (!r->rem_set()->is_empty()) {
3483 guarantee(r->rem_set()->occupancy_less_or_equal_than(G1RSetSparseRegionEntries),
3484 "Found a not-small remembered set here. This is inconsistent with previous assumptions.");
3485 G1SATBCardTableLoggingModRefBS* bs = g1h->g1_barrier_set();
3486 HeapRegionRemSetIterator hrrs(r->rem_set());
3487 size_t card_index;
3488 while (hrrs.has_next(card_index)) {
3489 jbyte* card_ptr = (jbyte*)bs->byte_for_index(card_index);
3490 // The remembered set might contain references to already freed
3491 // regions. Filter out such entries to avoid failing card table
3492 // verification.
3493 if (!g1h->heap_region_containing(bs->addr_for(card_ptr))->is_free()) {
3494 if (*card_ptr != CardTableModRefBS::dirty_card_val()) {
3495 *card_ptr = CardTableModRefBS::dirty_card_val();
3496 _dcq.enqueue(card_ptr);
3497 }
3498 }
3499 }
3500 r->rem_set()->clear_locked();
3501 }
3502 assert(r->rem_set()->is_empty(), "At this point any humongous candidate remembered set must be empty.");
3503 uint rindex = r->hrm_index();
3504 g1h->add_humongous_reclaim_candidate(rindex);
3505 g1h->register_humongous_region_with_cset(rindex);
3506 _candidate_humongous++;
3507 }
3508 _total_humongous++;
3509
3510 return false;
3511 }
3512
3513 size_t total_humongous() const { return _total_humongous; }
3514 size_t candidate_humongous() const { return _candidate_humongous; }
3515
3516 void flush_rem_set_entries() { _dcq.flush(); }
3517 };
3518
3519 void G1CollectedHeap::register_humongous_regions_with_cset() {
3520 if (!G1EagerReclaimHumongousObjects) {
3521 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0.0, 0, 0);
3522 return;
3523 }
3524 double time = os::elapsed_counter();
3525
3526 // Collect reclaim candidate information and register candidates with cset.
3527 RegisterHumongousWithInCSetFastTestClosure cl;
3528 heap_region_iterate(&cl);
3529
3530 time = ((double)(os::elapsed_counter() - time) / os::elapsed_frequency()) * 1000.0;
3531 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(time,
3532 cl.total_humongous(),
3533 cl.candidate_humongous());
3534 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3535
3536 // Finally flush all remembered set entries to re-check into the global DCQS.
3537 cl.flush_rem_set_entries();
3538 }
3539
3540 void
3541 G1CollectedHeap::setup_surviving_young_words() {
3542 assert(_surviving_young_words == NULL, "pre-condition");
3543 uint array_length = g1_policy()->young_cset_region_length();
3544 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3545 if (_surviving_young_words == NULL) {
3546 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3547 "Not enough space for young surv words summary.");
3548 }
3549 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3550 #ifdef ASSERT
3551 for (uint i = 0; i < array_length; ++i) {
3552 assert( _surviving_young_words[i] == 0, "memset above" );
3553 }
3554 #endif // !ASSERT
3555 }
5977 // remembered set)
5978 // - as soon there is a remembered set entry to the humongous starts region
5979 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
5980 // until the end of a concurrent mark.
5981 //
5982 // It is not required to check whether the object has been found dead by marking
5983 // or not, in fact it would prevent reclamation within a concurrent cycle, as
5984 // all objects allocated during that time are considered live.
5985 // SATB marking is even more conservative than the remembered set.
5986 // So if at this point in the collection there is no remembered set entry,
5987 // nobody has a reference to it.
5988 // At the start of collection we flush all refinement logs, and remembered sets
5989 // are completely up-to-date wrt to references to the humongous object.
5990 //
5991 // Other implementation considerations:
5992 // - never consider object arrays at this time because they would pose
5993 // considerable effort for cleaning up the the remembered sets. This is
5994 // required because stale remembered sets might reference locations that
5995 // are currently allocated into.
5996 uint region_idx = r->hrm_index();
5997 if (!g1h->is_humongous_reclaim_candidate(region_idx) ||
5998 !r->rem_set()->is_empty()) {
5999
6000 if (G1TraceEagerReclaimHumongousObjects) {
6001 gclog_or_tty->print_cr("Live humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6002 region_idx,
6003 obj->size()*HeapWordSize,
6004 r->bottom(),
6005 r->region_num(),
6006 r->rem_set()->occupied(),
6007 r->rem_set()->strong_code_roots_list_length(),
6008 next_bitmap->isMarked(r->bottom()),
6009 g1h->is_humongous_reclaim_candidate(region_idx),
6010 obj->is_typeArray()
6011 );
6012 }
6013
6014 return false;
6015 }
6016
6017 guarantee(obj->is_typeArray(),
6018 err_msg("Only eagerly reclaiming type arrays is supported, but the object "
6019 PTR_FORMAT " is not.",
6020 r->bottom()));
6021
6022 if (G1TraceEagerReclaimHumongousObjects) {
6023 gclog_or_tty->print_cr("Dead humongous region %u size "SIZE_FORMAT" start "PTR_FORMAT" length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is marked %d reclaim candidate %d type array %d",
6024 region_idx,
6025 obj->size()*HeapWordSize,
6026 r->bottom(),
6027 r->region_num(),
6028 r->rem_set()->occupied(),
6029 r->rem_set()->strong_code_roots_list_length(),
6030 next_bitmap->isMarked(r->bottom()),
6031 g1h->is_humongous_reclaim_candidate(region_idx),
6032 obj->is_typeArray()
6033 );
6034 }
6035 // Need to clear mark bit of the humongous object if already set.
6036 if (next_bitmap->isMarked(r->bottom())) {
6037 next_bitmap->clear(r->bottom());
6038 }
6039 _freed_bytes += r->used();
6040 r->set_containing_set(NULL);
6041 _humongous_regions_removed.increment(1u, r->capacity());
6042 g1h->free_humongous_region(r, _free_region_list, false);
6043
6044 return false;
6045 }
6046
6047 HeapRegionSetCount& humongous_free_count() {
6048 return _humongous_regions_removed;
6049 }
6050
6051 size_t bytes_freed() const {
6052 return _freed_bytes;
|