1909 SharedHeap(policy_),
1910 _g1_policy(policy_),
1911 _dirty_card_queue_set(false),
1912 _into_cset_dirty_card_queue_set(false),
1913 _is_alive_closure_cm(this),
1914 _is_alive_closure_stw(this),
1915 _ref_processor_cm(NULL),
1916 _ref_processor_stw(NULL),
1917 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1918 _bot_shared(NULL),
1919 _evac_failure_scan_stack(NULL),
1920 _mark_in_progress(false),
1921 _cg1r(NULL), _summary_bytes_used(0),
1922 _g1mm(NULL),
1923 _refine_cte_cl(NULL),
1924 _full_collection(false),
1925 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1926 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1927 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1928 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1929 _free_regions_coming(false),
1930 _young_list(new YoungList(this)),
1931 _gc_time_stamp(0),
1932 _retained_old_gc_alloc_region(NULL),
1933 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1934 _old_plab_stats(OldPLABSize, PLABWeight),
1935 _expand_heap_after_alloc_failure(true),
1936 _surviving_young_words(NULL),
1937 _old_marking_cycles_started(0),
1938 _old_marking_cycles_completed(0),
1939 _concurrent_cycle_started(false),
1940 _in_cset_fast_test(),
1941 _dirty_cards_region_list(NULL),
1942 _worker_cset_start_region(NULL),
1943 _worker_cset_start_region_time_stamp(NULL),
1944 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1945 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1946 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1947 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1948
2065 _cg1r->init();
2066
2067 // 6843694 - ensure that the maximum region index can fit
2068 // in the remembered set structures.
2069 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2070 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2071
2072 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2073 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2074 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2075 "too many cards per region");
2076
2077 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2078
2079 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2080 heap_word_size(init_byte_size));
2081
2082 _g1h = this;
2083
2084 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2085
2086 // Create the ConcurrentMark data structure and thread.
2087 // (Must do this late, so that "max_regions" is defined.)
2088 _cm = new ConcurrentMark(this, heap_rs);
2089 if (_cm == NULL || !_cm->completed_initialization()) {
2090 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2091 return JNI_ENOMEM;
2092 }
2093 _cmThread = _cm->cmThread();
2094
2095 // Initialize the from_card cache structure of HeapRegionRemSet.
2096 HeapRegionRemSet::init_heap(max_regions());
2097
2098 // Now expand into the initial heap size.
2099 if (!expand(init_byte_size)) {
2100 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2101 return JNI_ENOMEM;
2102 }
2103
2104 // Perform any initialization actions delegated to the policy.
2160 // Do create of the monitoring and management support so that
2161 // values in the heap have been properly initialized.
2162 _g1mm = new G1MonitoringSupport(this);
2163
2164 G1StringDedup::initialize();
2165
2166 return JNI_OK;
2167 }
2168
2169 void G1CollectedHeap::stop() {
2170 // Stop all concurrent threads. We do this to make sure these threads
2171 // do not continue to execute and access resources (e.g. gclog_or_tty)
2172 // that are destroyed during shutdown.
2173 _cg1r->stop();
2174 _cmThread->stop();
2175 if (G1StringDedup::is_enabled()) {
2176 G1StringDedup::stop();
2177 }
2178 }
2179
2180 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2181 return HeapRegion::max_region_size();
2182 }
2183
2184 void G1CollectedHeap::ref_processing_init() {
2185 // Reference processing in G1 currently works as follows:
2186 //
2187 // * There are two reference processor instances. One is
2188 // used to record and process discovered references
2189 // during concurrent marking; the other is used to
2190 // record and process references during STW pauses
2191 // (both full and incremental).
2192 // * Both ref processors need to 'span' the entire heap as
2193 // the regions in the collection set may be dotted around.
2194 //
2195 // * For the concurrent marking ref processor:
2196 // * Reference discovery is enabled at initial marking.
2197 // * Reference discovery is disabled and the discovered
2198 // references processed etc during remarking.
2199 // * Reference discovery is MT (see below).
3668 }
3669 #endif // PRODUCT
3670
3671 G1CollectedHeap* G1CollectedHeap::heap() {
3672 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3673 "not a garbage-first heap");
3674 return _g1h;
3675 }
3676
3677 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3678 // always_do_update_barrier = false;
3679 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3680 // Fill TLAB's and such
3681 accumulate_statistics_all_tlabs();
3682 ensure_parsability(true);
3683
3684 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3685 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3686 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3687 }
3688 }
3689
3690 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3691
3692 if (G1SummarizeRSetStats &&
3693 (G1SummarizeRSetStatsPeriod > 0) &&
3694 // we are at the end of the GC. Total collections has already been increased.
3695 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3696 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3697 }
3698
3699 // FIXME: what is this about?
3700 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3701 // is set.
3702 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3703 "derived pointer present"));
3704 // always_do_update_barrier = true;
3705
3706 resize_all_tlabs();
3707
3747 JavaThread *curr = Threads::first();
3748 while (curr != NULL) {
3749 DirtyCardQueue& dcq = curr->dirty_card_queue();
3750 extra_cards += dcq.size();
3751 curr = curr->next();
3752 }
3753 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3754 size_t buffer_size = dcqs.buffer_size();
3755 size_t buffer_num = dcqs.completed_buffers_num();
3756
3757 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3758 // in bytes - not the number of 'entries'. We need to convert
3759 // into a number of cards.
3760 return (buffer_size * buffer_num + extra_cards) / oopSize;
3761 }
3762
3763 size_t G1CollectedHeap::cards_scanned() {
3764 return g1_rem_set()->cardsScanned();
3765 }
3766
3767 void
3768 G1CollectedHeap::setup_surviving_young_words() {
3769 assert(_surviving_young_words == NULL, "pre-condition");
3770 uint array_length = g1_policy()->young_cset_region_length();
3771 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3772 if (_surviving_young_words == NULL) {
3773 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3774 "Not enough space for young surv words summary.");
3775 }
3776 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3777 #ifdef ASSERT
3778 for (uint i = 0; i < array_length; ++i) {
3779 assert( _surviving_young_words[i] == 0, "memset above" );
3780 }
3781 #endif // !ASSERT
3782 }
3783
3784 void
3785 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3786 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4034 }
4035 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4036
4037 #if YOUNG_LIST_VERBOSE
4038 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4039 _young_list->print();
4040 #endif // YOUNG_LIST_VERBOSE
4041
4042 if (g1_policy()->during_initial_mark_pause()) {
4043 concurrent_mark()->checkpointRootsInitialPre();
4044 }
4045
4046 #if YOUNG_LIST_VERBOSE
4047 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4048 _young_list->print();
4049 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4050 #endif // YOUNG_LIST_VERBOSE
4051
4052 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4053
4054 _cm->note_start_of_gc();
4055 // We should not verify the per-thread SATB buffers given that
4056 // we have not filtered them yet (we'll do so during the
4057 // GC). We also call this after finalize_cset() to
4058 // ensure that the CSet has been finalized.
4059 _cm->verify_no_cset_oops(true /* verify_stacks */,
4060 true /* verify_enqueued_buffers */,
4061 false /* verify_thread_buffers */,
4062 true /* verify_fingers */);
4063
4064 if (_hr_printer.is_active()) {
4065 HeapRegion* hr = g1_policy()->collection_set();
4066 while (hr != NULL) {
4067 G1HRPrinter::RegionType type;
4068 if (!hr->is_young()) {
4069 type = G1HRPrinter::Old;
4070 } else if (hr->is_survivor()) {
4071 type = G1HRPrinter::Survivor;
4072 } else {
4073 type = G1HRPrinter::Eden;
4084
4085 setup_surviving_young_words();
4086
4087 // Initialize the GC alloc regions.
4088 init_gc_alloc_regions(evacuation_info);
4089
4090 // Actually do the work...
4091 evacuate_collection_set(evacuation_info);
4092
4093 // We do this to mainly verify the per-thread SATB buffers
4094 // (which have been filtered by now) since we didn't verify
4095 // them earlier. No point in re-checking the stacks / enqueued
4096 // buffers given that the CSet has not changed since last time
4097 // we checked.
4098 _cm->verify_no_cset_oops(false /* verify_stacks */,
4099 false /* verify_enqueued_buffers */,
4100 true /* verify_thread_buffers */,
4101 true /* verify_fingers */);
4102
4103 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4104 g1_policy()->clear_collection_set();
4105
4106 cleanup_surviving_young_words();
4107
4108 // Start a new incremental collection set for the next pause.
4109 g1_policy()->start_incremental_cset_building();
4110
4111 clear_cset_fast_test();
4112
4113 _young_list->reset_sampled_info();
4114
4115 // Don't check the whole heap at this point as the
4116 // GC alloc regions from this pause have been tagged
4117 // as survivors and moved on to the survivor list.
4118 // Survivor regions will fail the !is_young() check.
4119 assert(check_young_list_empty(false /* check_heap */),
4120 "young list should be empty");
4121
4122 #if YOUNG_LIST_VERBOSE
4123 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4583
4584 template <class T>
4585 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4586 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4587 _scanned_klass->record_modified_oops();
4588 }
4589 }
4590
4591 template <G1Barrier barrier, G1Mark do_mark_object>
4592 template <class T>
4593 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4594 T heap_oop = oopDesc::load_heap_oop(p);
4595
4596 if (oopDesc::is_null(heap_oop)) {
4597 return;
4598 }
4599
4600 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4601
4602 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4603
4604 if (_g1->in_cset_fast_test(obj)) {
4605 oop forwardee;
4606 if (obj->is_forwarded()) {
4607 forwardee = obj->forwardee();
4608 } else {
4609 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4610 }
4611 assert(forwardee != NULL, "forwardee should not be NULL");
4612 oopDesc::encode_store_heap_oop(p, forwardee);
4613 if (do_mark_object != G1MarkNone && forwardee != obj) {
4614 // If the object is self-forwarded we don't need to explicitly
4615 // mark it, the evacuation failure protocol will do so.
4616 mark_forwarded_object(obj, forwardee);
4617 }
4618
4619 if (barrier == G1BarrierKlass) {
4620 do_klass_barrier(p, forwardee);
4621 }
4622 } else {
4623 // The object is not in collection set. If we're a root scanning
4624 // closure during an initial mark pause then attempt to mark the object.
4625 if (do_mark_object == G1MarkFromRoot) {
4626 mark_object(obj);
4627 }
4628 }
4629
4630 if (barrier == G1BarrierEvac) {
4631 _par_scan_state->update_rs(_from, p, _worker_id);
4632 }
4633 }
4634
4635 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4636 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4637
4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
4639 protected:
4640 G1CollectedHeap* _g1h;
4641 G1ParScanThreadState* _par_scan_state;
4642 RefToScanQueueSet* _queues;
5426 bool do_object_b(oop p) {
5427 if (p != NULL) {
5428 return true;
5429 }
5430 return false;
5431 }
5432 };
5433
5434 bool G1STWIsAliveClosure::do_object_b(oop p) {
5435 // An object is reachable if it is outside the collection set,
5436 // or is inside and copied.
5437 return !_g1->obj_in_cs(p) || p->is_forwarded();
5438 }
5439
5440 // Non Copying Keep Alive closure
5441 class G1KeepAliveClosure: public OopClosure {
5442 G1CollectedHeap* _g1;
5443 public:
5444 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5445 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5446 void do_oop( oop* p) {
5447 oop obj = *p;
5448
5449 if (_g1->obj_in_cs(obj)) {
5450 assert( obj->is_forwarded(), "invariant" );
5451 *p = obj->forwardee();
5452 }
5453 }
5454 };
5455
5456 // Copying Keep Alive closure - can be called from both
5457 // serial and parallel code as long as different worker
5458 // threads utilize different G1ParScanThreadState instances
5459 // and different queues.
5460
5461 class G1CopyingKeepAliveClosure: public OopClosure {
5462 G1CollectedHeap* _g1h;
5463 OopClosure* _copy_non_heap_obj_cl;
5464 G1ParScanThreadState* _par_scan_state;
5465
5466 public:
5467 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5468 OopClosure* non_heap_obj_cl,
5469 G1ParScanThreadState* pss):
5470 _g1h(g1h),
5471 _copy_non_heap_obj_cl(non_heap_obj_cl),
5472 _par_scan_state(pss)
5473 {}
5474
5475 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5476 virtual void do_oop( oop* p) { do_oop_work(p); }
5477
5478 template <class T> void do_oop_work(T* p) {
5479 oop obj = oopDesc::load_decode_heap_oop(p);
5480
5481 if (_g1h->obj_in_cs(obj)) {
5482 // If the referent object has been forwarded (either copied
5483 // to a new location or to itself in the event of an
5484 // evacuation failure) then we need to update the reference
5485 // field and, if both reference and referent are in the G1
5486 // heap, update the RSet for the referent.
5487 //
5488 // If the referent has not been forwarded then we have to keep
5489 // it alive by policy. Therefore we have copy the referent.
5490 //
5491 // If the reference field is in the G1 heap then we can push
5492 // on the PSS queue. When the queue is drained (after each
5493 // phase of reference processing) the object and it's followers
5494 // will be copied, the reference field set to point to the
5495 // new location, and the RSet updated. Otherwise we need to
5496 // use the the non-heap or metadata closures directly to copy
5497 // the referent object and update the pointer, while avoiding
5498 // updating the RSet.
5499
5500 if (_g1h->is_in_g1_reserved(p)) {
5501 _par_scan_state->push_on_queue(p);
6411
6412 evacuation_info.set_regions_freed(local_free_list.length());
6413 policy->record_max_rs_lengths(rs_lengths);
6414 policy->cset_regions_freed();
6415
6416 double end_sec = os::elapsedTime();
6417 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6418
6419 if (non_young) {
6420 non_young_time_ms += elapsed_ms;
6421 } else {
6422 young_time_ms += elapsed_ms;
6423 }
6424
6425 prepend_to_freelist(&local_free_list);
6426 decrement_summary_bytes(pre_used);
6427 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6428 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6429 }
6430
6431 // This routine is similar to the above but does not record
6432 // any policy statistics or update free lists; we are abandoning
6433 // the current incremental collection set in preparation of a
6434 // full collection. After the full GC we will start to build up
6435 // the incremental collection set again.
6436 // This is only called when we're doing a full collection
6437 // and is immediately followed by the tearing down of the young list.
6438
6439 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6440 HeapRegion* cur = cs_head;
6441
6442 while (cur != NULL) {
6443 HeapRegion* next = cur->next_in_collection_set();
6444 assert(cur->in_collection_set(), "bad CS");
6445 cur->set_next_in_collection_set(NULL);
6446 cur->set_in_collection_set(false);
6447 cur->set_young_index_in_cset(-1);
6448 cur = next;
6449 }
6450 }
|
1909 SharedHeap(policy_),
1910 _g1_policy(policy_),
1911 _dirty_card_queue_set(false),
1912 _into_cset_dirty_card_queue_set(false),
1913 _is_alive_closure_cm(this),
1914 _is_alive_closure_stw(this),
1915 _ref_processor_cm(NULL),
1916 _ref_processor_stw(NULL),
1917 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1918 _bot_shared(NULL),
1919 _evac_failure_scan_stack(NULL),
1920 _mark_in_progress(false),
1921 _cg1r(NULL), _summary_bytes_used(0),
1922 _g1mm(NULL),
1923 _refine_cte_cl(NULL),
1924 _full_collection(false),
1925 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1926 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1927 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1928 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1929 _humongous_is_live(),
1930 _has_humongous_reclaim_candidates(false),
1931 _free_regions_coming(false),
1932 _young_list(new YoungList(this)),
1933 _gc_time_stamp(0),
1934 _retained_old_gc_alloc_region(NULL),
1935 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1936 _old_plab_stats(OldPLABSize, PLABWeight),
1937 _expand_heap_after_alloc_failure(true),
1938 _surviving_young_words(NULL),
1939 _old_marking_cycles_started(0),
1940 _old_marking_cycles_completed(0),
1941 _concurrent_cycle_started(false),
1942 _in_cset_fast_test(),
1943 _dirty_cards_region_list(NULL),
1944 _worker_cset_start_region(NULL),
1945 _worker_cset_start_region_time_stamp(NULL),
1946 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1947 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1948 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1949 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1950
2067 _cg1r->init();
2068
2069 // 6843694 - ensure that the maximum region index can fit
2070 // in the remembered set structures.
2071 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2072 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2073
2074 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2075 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2076 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2077 "too many cards per region");
2078
2079 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2080
2081 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2082 heap_word_size(init_byte_size));
2083
2084 _g1h = this;
2085
2086 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2087 _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2088
2089 // Create the ConcurrentMark data structure and thread.
2090 // (Must do this late, so that "max_regions" is defined.)
2091 _cm = new ConcurrentMark(this, heap_rs);
2092 if (_cm == NULL || !_cm->completed_initialization()) {
2093 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2094 return JNI_ENOMEM;
2095 }
2096 _cmThread = _cm->cmThread();
2097
2098 // Initialize the from_card cache structure of HeapRegionRemSet.
2099 HeapRegionRemSet::init_heap(max_regions());
2100
2101 // Now expand into the initial heap size.
2102 if (!expand(init_byte_size)) {
2103 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2104 return JNI_ENOMEM;
2105 }
2106
2107 // Perform any initialization actions delegated to the policy.
2163 // Do create of the monitoring and management support so that
2164 // values in the heap have been properly initialized.
2165 _g1mm = new G1MonitoringSupport(this);
2166
2167 G1StringDedup::initialize();
2168
2169 return JNI_OK;
2170 }
2171
2172 void G1CollectedHeap::stop() {
2173 // Stop all concurrent threads. We do this to make sure these threads
2174 // do not continue to execute and access resources (e.g. gclog_or_tty)
2175 // that are destroyed during shutdown.
2176 _cg1r->stop();
2177 _cmThread->stop();
2178 if (G1StringDedup::is_enabled()) {
2179 G1StringDedup::stop();
2180 }
2181 }
2182
2183 void G1CollectedHeap::clear_humongous_is_live_table() {
2184 _humongous_is_live.clear();
2185 }
2186
2187 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2188 return HeapRegion::max_region_size();
2189 }
2190
2191 void G1CollectedHeap::ref_processing_init() {
2192 // Reference processing in G1 currently works as follows:
2193 //
2194 // * There are two reference processor instances. One is
2195 // used to record and process discovered references
2196 // during concurrent marking; the other is used to
2197 // record and process references during STW pauses
2198 // (both full and incremental).
2199 // * Both ref processors need to 'span' the entire heap as
2200 // the regions in the collection set may be dotted around.
2201 //
2202 // * For the concurrent marking ref processor:
2203 // * Reference discovery is enabled at initial marking.
2204 // * Reference discovery is disabled and the discovered
2205 // references processed etc during remarking.
2206 // * Reference discovery is MT (see below).
3675 }
3676 #endif // PRODUCT
3677
3678 G1CollectedHeap* G1CollectedHeap::heap() {
3679 assert(_sh->kind() == CollectedHeap::G1CollectedHeap,
3680 "not a garbage-first heap");
3681 return _g1h;
3682 }
3683
3684 void G1CollectedHeap::gc_prologue(bool full /* Ignored */) {
3685 // always_do_update_barrier = false;
3686 assert(InlineCacheBuffer::is_empty(), "should have cleaned up ICBuffer");
3687 // Fill TLAB's and such
3688 accumulate_statistics_all_tlabs();
3689 ensure_parsability(true);
3690
3691 if (G1SummarizeRSetStats && (G1SummarizeRSetStatsPeriod > 0) &&
3692 (total_collections() % G1SummarizeRSetStatsPeriod == 0)) {
3693 g1_rem_set()->print_periodic_summary_info("Before GC RS summary");
3694 }
3695
3696 if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
3697 clear_humongous_is_live_table();
3698 }
3699 }
3700
3701 void G1CollectedHeap::gc_epilogue(bool full /* Ignored */) {
3702
3703 if (G1SummarizeRSetStats &&
3704 (G1SummarizeRSetStatsPeriod > 0) &&
3705 // we are at the end of the GC. Total collections has already been increased.
3706 ((total_collections() - 1) % G1SummarizeRSetStatsPeriod == 0)) {
3707 g1_rem_set()->print_periodic_summary_info("After GC RS summary");
3708 }
3709
3710 // FIXME: what is this about?
3711 // I'm ignoring the "fill_newgen()" call if "alloc_event_enabled"
3712 // is set.
3713 COMPILER2_PRESENT(assert(DerivedPointerTable::is_empty(),
3714 "derived pointer present"));
3715 // always_do_update_barrier = true;
3716
3717 resize_all_tlabs();
3718
3758 JavaThread *curr = Threads::first();
3759 while (curr != NULL) {
3760 DirtyCardQueue& dcq = curr->dirty_card_queue();
3761 extra_cards += dcq.size();
3762 curr = curr->next();
3763 }
3764 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3765 size_t buffer_size = dcqs.buffer_size();
3766 size_t buffer_num = dcqs.completed_buffers_num();
3767
3768 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3769 // in bytes - not the number of 'entries'. We need to convert
3770 // into a number of cards.
3771 return (buffer_size * buffer_num + extra_cards) / oopSize;
3772 }
3773
3774 size_t G1CollectedHeap::cards_scanned() {
3775 return g1_rem_set()->cardsScanned();
3776 }
3777
3778 bool G1CollectedHeap::humongous_region_is_always_live(HeapRegion* region) {
3779 assert(region->startsHumongous(), "Must start a humongous object");
3780 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3781 }
3782
3783 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3784 private:
3785 size_t _total_humongous;
3786 size_t _candidate_humongous;
3787 public:
3788 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3789 }
3790
3791 virtual bool doHeapRegion(HeapRegion* r) {
3792 if (!r->startsHumongous()) {
3793 return false;
3794 }
3795 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3796
3797 bool is_candidate = !g1h->humongous_region_is_always_live(r);
3798 if (is_candidate) {
3799 // Do not even try to reclaim a humongous object that we already know will
3800 // not be treated as live later. A young collection will not decrease the
3801 // amount of remembered set entries for that region.
3802 g1h->register_humongous_region_with_in_cset_fast_test(r->hrs_index());
3803 _candidate_humongous++;
3804 }
3805 _total_humongous++;
3806
3807 return false;
3808 }
3809
3810 size_t total_humongous() const { return _total_humongous; }
3811 size_t candidate_humongous() const { return _candidate_humongous; }
3812 };
3813
3814 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3815 RegisterHumongousWithInCSetFastTestClosure cl;
3816 heap_region_iterate(&cl);
3817 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
3818 cl.candidate_humongous());
3819 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3820 }
3821
3822 void
3823 G1CollectedHeap::setup_surviving_young_words() {
3824 assert(_surviving_young_words == NULL, "pre-condition");
3825 uint array_length = g1_policy()->young_cset_region_length();
3826 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3827 if (_surviving_young_words == NULL) {
3828 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3829 "Not enough space for young surv words summary.");
3830 }
3831 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3832 #ifdef ASSERT
3833 for (uint i = 0; i < array_length; ++i) {
3834 assert( _surviving_young_words[i] == 0, "memset above" );
3835 }
3836 #endif // !ASSERT
3837 }
3838
3839 void
3840 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3841 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4089 }
4090 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4091
4092 #if YOUNG_LIST_VERBOSE
4093 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4094 _young_list->print();
4095 #endif // YOUNG_LIST_VERBOSE
4096
4097 if (g1_policy()->during_initial_mark_pause()) {
4098 concurrent_mark()->checkpointRootsInitialPre();
4099 }
4100
4101 #if YOUNG_LIST_VERBOSE
4102 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4103 _young_list->print();
4104 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4105 #endif // YOUNG_LIST_VERBOSE
4106
4107 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4108
4109 if (G1ReclaimDeadHumongousObjectsAtYoungGC) {
4110 register_humongous_regions_with_in_cset_fast_test();
4111 }
4112
4113 _cm->note_start_of_gc();
4114 // We should not verify the per-thread SATB buffers given that
4115 // we have not filtered them yet (we'll do so during the
4116 // GC). We also call this after finalize_cset() to
4117 // ensure that the CSet has been finalized.
4118 _cm->verify_no_cset_oops(true /* verify_stacks */,
4119 true /* verify_enqueued_buffers */,
4120 false /* verify_thread_buffers */,
4121 true /* verify_fingers */);
4122
4123 if (_hr_printer.is_active()) {
4124 HeapRegion* hr = g1_policy()->collection_set();
4125 while (hr != NULL) {
4126 G1HRPrinter::RegionType type;
4127 if (!hr->is_young()) {
4128 type = G1HRPrinter::Old;
4129 } else if (hr->is_survivor()) {
4130 type = G1HRPrinter::Survivor;
4131 } else {
4132 type = G1HRPrinter::Eden;
4143
4144 setup_surviving_young_words();
4145
4146 // Initialize the GC alloc regions.
4147 init_gc_alloc_regions(evacuation_info);
4148
4149 // Actually do the work...
4150 evacuate_collection_set(evacuation_info);
4151
4152 // We do this to mainly verify the per-thread SATB buffers
4153 // (which have been filtered by now) since we didn't verify
4154 // them earlier. No point in re-checking the stacks / enqueued
4155 // buffers given that the CSet has not changed since last time
4156 // we checked.
4157 _cm->verify_no_cset_oops(false /* verify_stacks */,
4158 false /* verify_enqueued_buffers */,
4159 true /* verify_thread_buffers */,
4160 true /* verify_fingers */);
4161
4162 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4163 if (G1ReclaimDeadHumongousObjectsAtYoungGC && _has_humongous_reclaim_candidates) {
4164 eagerly_reclaim_humongous_regions();
4165 }
4166 g1_policy()->clear_collection_set();
4167
4168 cleanup_surviving_young_words();
4169
4170 // Start a new incremental collection set for the next pause.
4171 g1_policy()->start_incremental_cset_building();
4172
4173 clear_cset_fast_test();
4174
4175 _young_list->reset_sampled_info();
4176
4177 // Don't check the whole heap at this point as the
4178 // GC alloc regions from this pause have been tagged
4179 // as survivors and moved on to the survivor list.
4180 // Survivor regions will fail the !is_young() check.
4181 assert(check_young_list_empty(false /* check_heap */),
4182 "young list should be empty");
4183
4184 #if YOUNG_LIST_VERBOSE
4185 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4645
4646 template <class T>
4647 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4648 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4649 _scanned_klass->record_modified_oops();
4650 }
4651 }
4652
4653 template <G1Barrier barrier, G1Mark do_mark_object>
4654 template <class T>
4655 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4656 T heap_oop = oopDesc::load_heap_oop(p);
4657
4658 if (oopDesc::is_null(heap_oop)) {
4659 return;
4660 }
4661
4662 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4663
4664 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4665 bool needs_marking = true;
4666
4667 if (_g1->is_in_cset_or_humongous(obj)) {
4668 oop forwardee;
4669 if (obj->is_forwarded()) {
4670 forwardee = obj->forwardee();
4671 } else {
4672 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4673 }
4674 if (forwardee != NULL) {
4675 oopDesc::encode_store_heap_oop(p, forwardee);
4676 if (do_mark_object != G1MarkNone && forwardee != obj) {
4677 // If the object is self-forwarded we don't need to explicitly
4678 // mark it, the evacuation failure protocol will do so.
4679 mark_forwarded_object(obj, forwardee);
4680 }
4681
4682 if (barrier == G1BarrierKlass) {
4683 do_klass_barrier(p, forwardee);
4684 }
4685 needs_marking = false;
4686 }
4687 }
4688 if (needs_marking) {
4689 // The object is not in collection set. If we're a root scanning
4690 // closure during an initial mark pause then attempt to mark the object.
4691 if (do_mark_object == G1MarkFromRoot) {
4692 mark_object(obj);
4693 }
4694 }
4695
4696 if (barrier == G1BarrierEvac) {
4697 _par_scan_state->update_rs(_from, p, _worker_id);
4698 }
4699 }
4700
4701 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4702 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4703
4704 class G1ParEvacuateFollowersClosure : public VoidClosure {
4705 protected:
4706 G1CollectedHeap* _g1h;
4707 G1ParScanThreadState* _par_scan_state;
4708 RefToScanQueueSet* _queues;
5492 bool do_object_b(oop p) {
5493 if (p != NULL) {
5494 return true;
5495 }
5496 return false;
5497 }
5498 };
5499
5500 bool G1STWIsAliveClosure::do_object_b(oop p) {
5501 // An object is reachable if it is outside the collection set,
5502 // or is inside and copied.
5503 return !_g1->obj_in_cs(p) || p->is_forwarded();
5504 }
5505
5506 // Non Copying Keep Alive closure
5507 class G1KeepAliveClosure: public OopClosure {
5508 G1CollectedHeap* _g1;
5509 public:
5510 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5511 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5512 void do_oop(oop* p) {
5513 oop obj = *p;
5514
5515 if (obj == NULL || !_g1->is_in_cset_or_humongous(obj)) {
5516 return;
5517 }
5518 if (_g1->is_in_cset(obj)) {
5519 assert( obj->is_forwarded(), "invariant" );
5520 *p = obj->forwardee();
5521 } else {
5522 assert(!obj->is_forwarded(), "invariant" );
5523 _g1->set_humongous_is_live(obj);
5524 }
5525 }
5526 };
5527
5528 // Copying Keep Alive closure - can be called from both
5529 // serial and parallel code as long as different worker
5530 // threads utilize different G1ParScanThreadState instances
5531 // and different queues.
5532
5533 class G1CopyingKeepAliveClosure: public OopClosure {
5534 G1CollectedHeap* _g1h;
5535 OopClosure* _copy_non_heap_obj_cl;
5536 G1ParScanThreadState* _par_scan_state;
5537
5538 public:
5539 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5540 OopClosure* non_heap_obj_cl,
5541 G1ParScanThreadState* pss):
5542 _g1h(g1h),
5543 _copy_non_heap_obj_cl(non_heap_obj_cl),
5544 _par_scan_state(pss)
5545 {}
5546
5547 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5548 virtual void do_oop( oop* p) { do_oop_work(p); }
5549
5550 template <class T> void do_oop_work(T* p) {
5551 oop obj = oopDesc::load_decode_heap_oop(p);
5552
5553 if (_g1h->is_in_cset_or_humongous(obj)) {
5554 // If the referent object has been forwarded (either copied
5555 // to a new location or to itself in the event of an
5556 // evacuation failure) then we need to update the reference
5557 // field and, if both reference and referent are in the G1
5558 // heap, update the RSet for the referent.
5559 //
5560 // If the referent has not been forwarded then we have to keep
5561 // it alive by policy. Therefore we have copy the referent.
5562 //
5563 // If the reference field is in the G1 heap then we can push
5564 // on the PSS queue. When the queue is drained (after each
5565 // phase of reference processing) the object and it's followers
5566 // will be copied, the reference field set to point to the
5567 // new location, and the RSet updated. Otherwise we need to
5568 // use the the non-heap or metadata closures directly to copy
5569 // the referent object and update the pointer, while avoiding
5570 // updating the RSet.
5571
5572 if (_g1h->is_in_g1_reserved(p)) {
5573 _par_scan_state->push_on_queue(p);
6483
6484 evacuation_info.set_regions_freed(local_free_list.length());
6485 policy->record_max_rs_lengths(rs_lengths);
6486 policy->cset_regions_freed();
6487
6488 double end_sec = os::elapsedTime();
6489 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6490
6491 if (non_young) {
6492 non_young_time_ms += elapsed_ms;
6493 } else {
6494 young_time_ms += elapsed_ms;
6495 }
6496
6497 prepend_to_freelist(&local_free_list);
6498 decrement_summary_bytes(pre_used);
6499 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6500 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6501 }
6502
6503 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6504 private:
6505 FreeRegionList* _free_region_list;
6506 HeapRegionSet* _proxy_set;
6507 HeapRegionSetCount _humongous_regions_removed;
6508 size_t _freed_bytes;
6509 public:
6510
6511 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6512 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6513 }
6514
6515 virtual bool doHeapRegion(HeapRegion* r) {
6516 if (!r->startsHumongous()) {
6517 return false;
6518 }
6519
6520 G1CollectedHeap* g1h = G1CollectedHeap::heap();
6521
6522 // The following checks whether the humongous object is live are sufficient.
6523 // The main additional check (in addition to having a reference from the roots
6524 // or the young gen) is whether the humongous object has a remembered set entry.
6525 //
6526 // A humongous object cannot be live if there is no remembered set for it
6527 // because:
6528 // - there can be no references from within humongous starts regions referencing
6529 // the object because we never allocate other objects into them.
6530 // (I.e. there are no intra-region references that may be missed by the
6531 // remembered set)
6532 // - as soon there is a remembered set entry to the humongous starts region
6533 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6534 // until the end of a concurrent mark.
6535 //
6536 // It is not required to check whether the object has been found dead by marking
6537 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6538 // all objects allocated during that time are considered live.
6539 // SATB marking is even more conservative than the remembered set.
6540 // So if at this point in the collection there is no remembered set entry,
6541 // nobody has a reference to it.
6542 // At the start of collection we flush all refinement logs, and remembered sets
6543 // are completely up-to-date wrt to references to the humongous object.
6544 //
6545 // Other implementation considerations:
6546 // - never consider object arrays: while they are a valid target, they have not
6547 // been observed to be used as temporary objects.
6548 // - they would also pose considerable effort for cleaning up the the remembered
6549 // sets.
6550 // While this cleanup is not strictly necessary to be done (or done instantly),
6551 // given that their occurrence is very low, this saves us this additional
6552 // complexity.
6553 if (g1h->humongous_is_live(r->hrs_index()) ||
6554 g1h->humongous_region_is_always_live(r)) {
6555
6556 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6557 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6558 r->isHumongous(),
6559 r->hrs_index(),
6560 r->rem_set()->occupied(),
6561 r->rem_set()->strong_code_roots_list_length(),
6562 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6563 g1h->humongous_is_live(r->hrs_index()),
6564 oop(r->bottom())->is_objArray()
6565 );
6566 }
6567
6568 return false;
6569 }
6570
6571 guarantee(!((oop)(r->bottom()))->is_objArray(),
6572 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6573 r->bottom()));
6574
6575 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6576 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6577 r->isHumongous(),
6578 r->bottom(),
6579 r->hrs_index(),
6580 r->region_num(),
6581 r->rem_set()->occupied(),
6582 r->rem_set()->strong_code_roots_list_length(),
6583 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6584 g1h->humongous_is_live(r->hrs_index()),
6585 oop(r->bottom())->is_objArray()
6586 );
6587 }
6588 _freed_bytes += r->used();
6589 r->set_containing_set(NULL);
6590 _humongous_regions_removed.increment(1u, r->capacity());
6591 g1h->free_humongous_region(r, _free_region_list, false);
6592
6593 return false;
6594 }
6595
6596 HeapRegionSetCount& humongous_free_count() {
6597 return _humongous_regions_removed;
6598 }
6599
6600 size_t bytes_freed() const {
6601 return _freed_bytes;
6602 }
6603
6604 size_t humongous_reclaimed() const {
6605 return _humongous_regions_removed.length();
6606 }
6607 };
6608
6609 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6610 assert_at_safepoint(true);
6611 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Feature must be enabled");
6612 guarantee(_has_humongous_reclaim_candidates, "Should not reach here if no candidates for eager reclaim were found.");
6613
6614 double start_time = os::elapsedTime();
6615
6616 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6617
6618 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6619 heap_region_iterate(&cl);
6620
6621 HeapRegionSetCount empty_set;
6622 remove_from_old_sets(empty_set, cl.humongous_free_count());
6623
6624 G1HRPrinter* hr_printer = _g1h->hr_printer();
6625 if (hr_printer->is_active()) {
6626 FreeRegionListIterator iter(&local_cleanup_list);
6627 while (iter.more_available()) {
6628 HeapRegion* hr = iter.get_next();
6629 hr_printer->cleanup(hr);
6630 }
6631 }
6632
6633 prepend_to_freelist(&local_cleanup_list);
6634 decrement_summary_bytes(cl.bytes_freed());
6635
6636 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6637 cl.humongous_reclaimed());
6638 }
6639
6640 // This routine is similar to the above but does not record
6641 // any policy statistics or update free lists; we are abandoning
6642 // the current incremental collection set in preparation of a
6643 // full collection. After the full GC we will start to build up
6644 // the incremental collection set again.
6645 // This is only called when we're doing a full collection
6646 // and is immediately followed by the tearing down of the young list.
6647
6648 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6649 HeapRegion* cur = cs_head;
6650
6651 while (cur != NULL) {
6652 HeapRegion* next = cur->next_in_collection_set();
6653 assert(cur->in_collection_set(), "bad CS");
6654 cur->set_next_in_collection_set(NULL);
6655 cur->set_in_collection_set(false);
6656 cur->set_young_index_in_cset(-1);
6657 cur = next;
6658 }
6659 }
|