1909 SharedHeap(policy_),
1910 _g1_policy(policy_),
1911 _dirty_card_queue_set(false),
1912 _into_cset_dirty_card_queue_set(false),
1913 _is_alive_closure_cm(this),
1914 _is_alive_closure_stw(this),
1915 _ref_processor_cm(NULL),
1916 _ref_processor_stw(NULL),
1917 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1918 _bot_shared(NULL),
1919 _evac_failure_scan_stack(NULL),
1920 _mark_in_progress(false),
1921 _cg1r(NULL), _summary_bytes_used(0),
1922 _g1mm(NULL),
1923 _refine_cte_cl(NULL),
1924 _full_collection(false),
1925 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1926 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1927 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1928 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1929 _free_regions_coming(false),
1930 _young_list(new YoungList(this)),
1931 _gc_time_stamp(0),
1932 _retained_old_gc_alloc_region(NULL),
1933 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1934 _old_plab_stats(OldPLABSize, PLABWeight),
1935 _expand_heap_after_alloc_failure(true),
1936 _surviving_young_words(NULL),
1937 _old_marking_cycles_started(0),
1938 _old_marking_cycles_completed(0),
1939 _concurrent_cycle_started(false),
1940 _in_cset_fast_test(),
1941 _dirty_cards_region_list(NULL),
1942 _worker_cset_start_region(NULL),
1943 _worker_cset_start_region_time_stamp(NULL),
1944 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1945 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1946 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1947 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1948
2065 _cg1r->init();
2066
2067 // 6843694 - ensure that the maximum region index can fit
2068 // in the remembered set structures.
2069 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2070 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2071
2072 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2073 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2074 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2075 "too many cards per region");
2076
2077 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2078
2079 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2080 heap_word_size(init_byte_size));
2081
2082 _g1h = this;
2083
2084 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2085
2086 // Create the ConcurrentMark data structure and thread.
2087 // (Must do this late, so that "max_regions" is defined.)
2088 _cm = new ConcurrentMark(this, heap_rs);
2089 if (_cm == NULL || !_cm->completed_initialization()) {
2090 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2091 return JNI_ENOMEM;
2092 }
2093 _cmThread = _cm->cmThread();
2094
2095 // Initialize the from_card cache structure of HeapRegionRemSet.
2096 HeapRegionRemSet::init_heap(max_regions());
2097
2098 // Now expand into the initial heap size.
2099 if (!expand(init_byte_size)) {
2100 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2101 return JNI_ENOMEM;
2102 }
2103
2104 // Perform any initialization actions delegated to the policy.
2160 // Do create of the monitoring and management support so that
2161 // values in the heap have been properly initialized.
2162 _g1mm = new G1MonitoringSupport(this);
2163
2164 G1StringDedup::initialize();
2165
2166 return JNI_OK;
2167 }
2168
2169 void G1CollectedHeap::stop() {
2170 // Stop all concurrent threads. We do this to make sure these threads
2171 // do not continue to execute and access resources (e.g. gclog_or_tty)
2172 // that are destroyed during shutdown.
2173 _cg1r->stop();
2174 _cmThread->stop();
2175 if (G1StringDedup::is_enabled()) {
2176 G1StringDedup::stop();
2177 }
2178 }
2179
2180 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2181 return HeapRegion::max_region_size();
2182 }
2183
2184 void G1CollectedHeap::ref_processing_init() {
2185 // Reference processing in G1 currently works as follows:
2186 //
2187 // * There are two reference processor instances. One is
2188 // used to record and process discovered references
2189 // during concurrent marking; the other is used to
2190 // record and process references during STW pauses
2191 // (both full and incremental).
2192 // * Both ref processors need to 'span' the entire heap as
2193 // the regions in the collection set may be dotted around.
2194 //
2195 // * For the concurrent marking ref processor:
2196 // * Reference discovery is enabled at initial marking.
2197 // * Reference discovery is disabled and the discovered
2198 // references processed etc during remarking.
2199 // * Reference discovery is MT (see below).
3747 JavaThread *curr = Threads::first();
3748 while (curr != NULL) {
3749 DirtyCardQueue& dcq = curr->dirty_card_queue();
3750 extra_cards += dcq.size();
3751 curr = curr->next();
3752 }
3753 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3754 size_t buffer_size = dcqs.buffer_size();
3755 size_t buffer_num = dcqs.completed_buffers_num();
3756
3757 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3758 // in bytes - not the number of 'entries'. We need to convert
3759 // into a number of cards.
3760 return (buffer_size * buffer_num + extra_cards) / oopSize;
3761 }
3762
3763 size_t G1CollectedHeap::cards_scanned() {
3764 return g1_rem_set()->cardsScanned();
3765 }
3766
3767 void
3768 G1CollectedHeap::setup_surviving_young_words() {
3769 assert(_surviving_young_words == NULL, "pre-condition");
3770 uint array_length = g1_policy()->young_cset_region_length();
3771 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3772 if (_surviving_young_words == NULL) {
3773 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3774 "Not enough space for young surv words summary.");
3775 }
3776 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3777 #ifdef ASSERT
3778 for (uint i = 0; i < array_length; ++i) {
3779 assert( _surviving_young_words[i] == 0, "memset above" );
3780 }
3781 #endif // !ASSERT
3782 }
3783
3784 void
3785 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3786 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4034 }
4035 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4036
4037 #if YOUNG_LIST_VERBOSE
4038 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4039 _young_list->print();
4040 #endif // YOUNG_LIST_VERBOSE
4041
4042 if (g1_policy()->during_initial_mark_pause()) {
4043 concurrent_mark()->checkpointRootsInitialPre();
4044 }
4045
4046 #if YOUNG_LIST_VERBOSE
4047 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4048 _young_list->print();
4049 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4050 #endif // YOUNG_LIST_VERBOSE
4051
4052 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4053
4054 _cm->note_start_of_gc();
4055 // We should not verify the per-thread SATB buffers given that
4056 // we have not filtered them yet (we'll do so during the
4057 // GC). We also call this after finalize_cset() to
4058 // ensure that the CSet has been finalized.
4059 _cm->verify_no_cset_oops(true /* verify_stacks */,
4060 true /* verify_enqueued_buffers */,
4061 false /* verify_thread_buffers */,
4062 true /* verify_fingers */);
4063
4064 if (_hr_printer.is_active()) {
4065 HeapRegion* hr = g1_policy()->collection_set();
4066 while (hr != NULL) {
4067 G1HRPrinter::RegionType type;
4068 if (!hr->is_young()) {
4069 type = G1HRPrinter::Old;
4070 } else if (hr->is_survivor()) {
4071 type = G1HRPrinter::Survivor;
4072 } else {
4073 type = G1HRPrinter::Eden;
4084
4085 setup_surviving_young_words();
4086
4087 // Initialize the GC alloc regions.
4088 init_gc_alloc_regions(evacuation_info);
4089
4090 // Actually do the work...
4091 evacuate_collection_set(evacuation_info);
4092
4093 // We do this to mainly verify the per-thread SATB buffers
4094 // (which have been filtered by now) since we didn't verify
4095 // them earlier. No point in re-checking the stacks / enqueued
4096 // buffers given that the CSet has not changed since last time
4097 // we checked.
4098 _cm->verify_no_cset_oops(false /* verify_stacks */,
4099 false /* verify_enqueued_buffers */,
4100 true /* verify_thread_buffers */,
4101 true /* verify_fingers */);
4102
4103 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4104 g1_policy()->clear_collection_set();
4105
4106 cleanup_surviving_young_words();
4107
4108 // Start a new incremental collection set for the next pause.
4109 g1_policy()->start_incremental_cset_building();
4110
4111 clear_cset_fast_test();
4112
4113 _young_list->reset_sampled_info();
4114
4115 // Don't check the whole heap at this point as the
4116 // GC alloc regions from this pause have been tagged
4117 // as survivors and moved on to the survivor list.
4118 // Survivor regions will fail the !is_young() check.
4119 assert(check_young_list_empty(false /* check_heap */),
4120 "young list should be empty");
4121
4122 #if YOUNG_LIST_VERBOSE
4123 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4584 template <class T>
4585 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4586 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4587 _scanned_klass->record_modified_oops();
4588 }
4589 }
4590
4591 template <G1Barrier barrier, G1Mark do_mark_object>
4592 template <class T>
4593 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4594 T heap_oop = oopDesc::load_heap_oop(p);
4595
4596 if (oopDesc::is_null(heap_oop)) {
4597 return;
4598 }
4599
4600 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4601
4602 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4603
4604 if (_g1->in_cset_fast_test(obj)) {
4605 oop forwardee;
4606 if (obj->is_forwarded()) {
4607 forwardee = obj->forwardee();
4608 } else {
4609 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4610 }
4611 assert(forwardee != NULL, "forwardee should not be NULL");
4612 oopDesc::encode_store_heap_oop(p, forwardee);
4613 if (do_mark_object != G1MarkNone && forwardee != obj) {
4614 // If the object is self-forwarded we don't need to explicitly
4615 // mark it, the evacuation failure protocol will do so.
4616 mark_forwarded_object(obj, forwardee);
4617 }
4618
4619 if (barrier == G1BarrierKlass) {
4620 do_klass_barrier(p, forwardee);
4621 }
4622 } else {
4623 // The object is not in collection set. If we're a root scanning
4624 // closure during an initial mark pause then attempt to mark the object.
4625 if (do_mark_object == G1MarkFromRoot) {
4626 mark_object(obj);
4627 }
4628 }
4629
4630 if (barrier == G1BarrierEvac) {
4631 _par_scan_state->update_rs(_from, p, _worker_id);
4632 }
4633 }
4634
4635 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4636 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4637
4638 class G1ParEvacuateFollowersClosure : public VoidClosure {
4639 protected:
4640 G1CollectedHeap* _g1h;
4641 G1ParScanThreadState* _par_scan_state;
4642 RefToScanQueueSet* _queues;
5426 bool do_object_b(oop p) {
5427 if (p != NULL) {
5428 return true;
5429 }
5430 return false;
5431 }
5432 };
5433
5434 bool G1STWIsAliveClosure::do_object_b(oop p) {
5435 // An object is reachable if it is outside the collection set,
5436 // or is inside and copied.
5437 return !_g1->obj_in_cs(p) || p->is_forwarded();
5438 }
5439
5440 // Non Copying Keep Alive closure
5441 class G1KeepAliveClosure: public OopClosure {
5442 G1CollectedHeap* _g1;
5443 public:
5444 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5445 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5446 void do_oop( oop* p) {
5447 oop obj = *p;
5448
5449 if (_g1->obj_in_cs(obj)) {
5450 assert( obj->is_forwarded(), "invariant" );
5451 *p = obj->forwardee();
5452 }
5453 }
5454 };
5455
5456 // Copying Keep Alive closure - can be called from both
5457 // serial and parallel code as long as different worker
5458 // threads utilize different G1ParScanThreadState instances
5459 // and different queues.
5460
5461 class G1CopyingKeepAliveClosure: public OopClosure {
5462 G1CollectedHeap* _g1h;
5463 OopClosure* _copy_non_heap_obj_cl;
5464 G1ParScanThreadState* _par_scan_state;
5465
5466 public:
5467 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5468 OopClosure* non_heap_obj_cl,
5469 G1ParScanThreadState* pss):
5470 _g1h(g1h),
5471 _copy_non_heap_obj_cl(non_heap_obj_cl),
5472 _par_scan_state(pss)
5473 {}
5474
5475 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5476 virtual void do_oop( oop* p) { do_oop_work(p); }
5477
5478 template <class T> void do_oop_work(T* p) {
5479 oop obj = oopDesc::load_decode_heap_oop(p);
5480
5481 if (_g1h->obj_in_cs(obj)) {
5482 // If the referent object has been forwarded (either copied
5483 // to a new location or to itself in the event of an
5484 // evacuation failure) then we need to update the reference
5485 // field and, if both reference and referent are in the G1
5486 // heap, update the RSet for the referent.
5487 //
5488 // If the referent has not been forwarded then we have to keep
5489 // it alive by policy. Therefore we have copy the referent.
5490 //
5491 // If the reference field is in the G1 heap then we can push
5492 // on the PSS queue. When the queue is drained (after each
5493 // phase of reference processing) the object and it's followers
5494 // will be copied, the reference field set to point to the
5495 // new location, and the RSet updated. Otherwise we need to
5496 // use the the non-heap or metadata closures directly to copy
5497 // the referent object and update the pointer, while avoiding
5498 // updating the RSet.
5499
5500 if (_g1h->is_in_g1_reserved(p)) {
5501 _par_scan_state->push_on_queue(p);
6411
6412 evacuation_info.set_regions_freed(local_free_list.length());
6413 policy->record_max_rs_lengths(rs_lengths);
6414 policy->cset_regions_freed();
6415
6416 double end_sec = os::elapsedTime();
6417 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6418
6419 if (non_young) {
6420 non_young_time_ms += elapsed_ms;
6421 } else {
6422 young_time_ms += elapsed_ms;
6423 }
6424
6425 prepend_to_freelist(&local_free_list);
6426 decrement_summary_bytes(pre_used);
6427 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6428 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6429 }
6430
6431 // This routine is similar to the above but does not record
6432 // any policy statistics or update free lists; we are abandoning
6433 // the current incremental collection set in preparation of a
6434 // full collection. After the full GC we will start to build up
6435 // the incremental collection set again.
6436 // This is only called when we're doing a full collection
6437 // and is immediately followed by the tearing down of the young list.
6438
6439 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6440 HeapRegion* cur = cs_head;
6441
6442 while (cur != NULL) {
6443 HeapRegion* next = cur->next_in_collection_set();
6444 assert(cur->in_collection_set(), "bad CS");
6445 cur->set_next_in_collection_set(NULL);
6446 cur->set_in_collection_set(false);
6447 cur->set_young_index_in_cset(-1);
6448 cur = next;
6449 }
6450 }
|
1909 SharedHeap(policy_),
1910 _g1_policy(policy_),
1911 _dirty_card_queue_set(false),
1912 _into_cset_dirty_card_queue_set(false),
1913 _is_alive_closure_cm(this),
1914 _is_alive_closure_stw(this),
1915 _ref_processor_cm(NULL),
1916 _ref_processor_stw(NULL),
1917 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1918 _bot_shared(NULL),
1919 _evac_failure_scan_stack(NULL),
1920 _mark_in_progress(false),
1921 _cg1r(NULL), _summary_bytes_used(0),
1922 _g1mm(NULL),
1923 _refine_cte_cl(NULL),
1924 _full_collection(false),
1925 _free_list("Master Free List", new MasterFreeRegionListMtSafeChecker()),
1926 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1927 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1928 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1929 _humongous_is_live(),
1930 _has_humongous_reclaim_candidates(false),
1931 _free_regions_coming(false),
1932 _young_list(new YoungList(this)),
1933 _gc_time_stamp(0),
1934 _retained_old_gc_alloc_region(NULL),
1935 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1936 _old_plab_stats(OldPLABSize, PLABWeight),
1937 _expand_heap_after_alloc_failure(true),
1938 _surviving_young_words(NULL),
1939 _old_marking_cycles_started(0),
1940 _old_marking_cycles_completed(0),
1941 _concurrent_cycle_started(false),
1942 _in_cset_fast_test(),
1943 _dirty_cards_region_list(NULL),
1944 _worker_cset_start_region(NULL),
1945 _worker_cset_start_region_time_stamp(NULL),
1946 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1947 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1948 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1949 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1950
2067 _cg1r->init();
2068
2069 // 6843694 - ensure that the maximum region index can fit
2070 // in the remembered set structures.
2071 const uint max_region_idx = (1U << (sizeof(RegionIdx_t)*BitsPerByte-1)) - 1;
2072 guarantee((max_regions() - 1) <= max_region_idx, "too many regions");
2073
2074 size_t max_cards_per_region = ((size_t)1 << (sizeof(CardIdx_t)*BitsPerByte-1)) - 1;
2075 guarantee(HeapRegion::CardsPerRegion > 0, "make sure it's initialized");
2076 guarantee(HeapRegion::CardsPerRegion < max_cards_per_region,
2077 "too many cards per region");
2078
2079 FreeRegionList::set_unrealistically_long_length(max_regions() + 1);
2080
2081 _bot_shared = new G1BlockOffsetSharedArray(_reserved,
2082 heap_word_size(init_byte_size));
2083
2084 _g1h = this;
2085
2086 _in_cset_fast_test.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2087 _humongous_is_live.initialize(_g1_reserved.start(), _g1_reserved.end(), HeapRegion::GrainBytes);
2088
2089 // Create the ConcurrentMark data structure and thread.
2090 // (Must do this late, so that "max_regions" is defined.)
2091 _cm = new ConcurrentMark(this, heap_rs);
2092 if (_cm == NULL || !_cm->completed_initialization()) {
2093 vm_shutdown_during_initialization("Could not create/initialize ConcurrentMark");
2094 return JNI_ENOMEM;
2095 }
2096 _cmThread = _cm->cmThread();
2097
2098 // Initialize the from_card cache structure of HeapRegionRemSet.
2099 HeapRegionRemSet::init_heap(max_regions());
2100
2101 // Now expand into the initial heap size.
2102 if (!expand(init_byte_size)) {
2103 vm_shutdown_during_initialization("Failed to allocate initial heap.");
2104 return JNI_ENOMEM;
2105 }
2106
2107 // Perform any initialization actions delegated to the policy.
2163 // Do create of the monitoring and management support so that
2164 // values in the heap have been properly initialized.
2165 _g1mm = new G1MonitoringSupport(this);
2166
2167 G1StringDedup::initialize();
2168
2169 return JNI_OK;
2170 }
2171
2172 void G1CollectedHeap::stop() {
2173 // Stop all concurrent threads. We do this to make sure these threads
2174 // do not continue to execute and access resources (e.g. gclog_or_tty)
2175 // that are destroyed during shutdown.
2176 _cg1r->stop();
2177 _cmThread->stop();
2178 if (G1StringDedup::is_enabled()) {
2179 G1StringDedup::stop();
2180 }
2181 }
2182
2183 void G1CollectedHeap::clear_humongous_is_live_table() {
2184 guarantee(G1ReclaimDeadHumongousObjectsAtYoungGC, "Should only be called if true");
2185 _humongous_is_live.clear();
2186 }
2187
2188 size_t G1CollectedHeap::conservative_max_heap_alignment() {
2189 return HeapRegion::max_region_size();
2190 }
2191
2192 void G1CollectedHeap::ref_processing_init() {
2193 // Reference processing in G1 currently works as follows:
2194 //
2195 // * There are two reference processor instances. One is
2196 // used to record and process discovered references
2197 // during concurrent marking; the other is used to
2198 // record and process references during STW pauses
2199 // (both full and incremental).
2200 // * Both ref processors need to 'span' the entire heap as
2201 // the regions in the collection set may be dotted around.
2202 //
2203 // * For the concurrent marking ref processor:
2204 // * Reference discovery is enabled at initial marking.
2205 // * Reference discovery is disabled and the discovered
2206 // references processed etc during remarking.
2207 // * Reference discovery is MT (see below).
3755 JavaThread *curr = Threads::first();
3756 while (curr != NULL) {
3757 DirtyCardQueue& dcq = curr->dirty_card_queue();
3758 extra_cards += dcq.size();
3759 curr = curr->next();
3760 }
3761 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
3762 size_t buffer_size = dcqs.buffer_size();
3763 size_t buffer_num = dcqs.completed_buffers_num();
3764
3765 // PtrQueueSet::buffer_size() and PtrQueue:size() return sizes
3766 // in bytes - not the number of 'entries'. We need to convert
3767 // into a number of cards.
3768 return (buffer_size * buffer_num + extra_cards) / oopSize;
3769 }
3770
3771 size_t G1CollectedHeap::cards_scanned() {
3772 return g1_rem_set()->cardsScanned();
3773 }
3774
3775 bool G1CollectedHeap::humongous_region_is_always_live(uint index) {
3776 HeapRegion* region = region_at(index);
3777 assert(region->startsHumongous(), "Must start a humongous object");
3778 return oop(region->bottom())->is_objArray() || !region->rem_set()->is_empty();
3779 }
3780
3781 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
3782 private:
3783 size_t _total_humongous;
3784 size_t _candidate_humongous;
3785 public:
3786 RegisterHumongousWithInCSetFastTestClosure() : _total_humongous(0), _candidate_humongous(0) {
3787 }
3788
3789 virtual bool doHeapRegion(HeapRegion* r) {
3790 if (!r->startsHumongous()) {
3791 return false;
3792 }
3793 G1CollectedHeap* g1h = G1CollectedHeap::heap();
3794
3795 uint region_idx = r->hrs_index();
3796 bool is_candidate = !g1h->humongous_region_is_always_live(region_idx);
3797 // Is_candidate already filters out humongous regions with some remembered set.
3798 // This will not lead to humongous object that we mistakenly keep alive because
3799 // during young collection the remembered sets will only be added to.
3800 if (is_candidate) {
3801 g1h->register_humongous_region_with_in_cset_fast_test(region_idx);
3802 _candidate_humongous++;
3803 }
3804 _total_humongous++;
3805
3806 return false;
3807 }
3808
3809 size_t total_humongous() const { return _total_humongous; }
3810 size_t candidate_humongous() const { return _candidate_humongous; }
3811 };
3812
3813 void G1CollectedHeap::register_humongous_regions_with_in_cset_fast_test() {
3814 if (!G1ReclaimDeadHumongousObjectsAtYoungGC) {
3815 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(0, 0);
3816 return;
3817 }
3818
3819 RegisterHumongousWithInCSetFastTestClosure cl;
3820 heap_region_iterate(&cl);
3821 g1_policy()->phase_times()->record_fast_reclaim_humongous_stats(cl.total_humongous(),
3822 cl.candidate_humongous());
3823 _has_humongous_reclaim_candidates = cl.candidate_humongous() > 0;
3824
3825 if (_has_humongous_reclaim_candidates) {
3826 clear_humongous_is_live_table();
3827 }
3828 }
3829
3830 void
3831 G1CollectedHeap::setup_surviving_young_words() {
3832 assert(_surviving_young_words == NULL, "pre-condition");
3833 uint array_length = g1_policy()->young_cset_region_length();
3834 _surviving_young_words = NEW_C_HEAP_ARRAY(size_t, (size_t) array_length, mtGC);
3835 if (_surviving_young_words == NULL) {
3836 vm_exit_out_of_memory(sizeof(size_t) * array_length, OOM_MALLOC_ERROR,
3837 "Not enough space for young surv words summary.");
3838 }
3839 memset(_surviving_young_words, 0, (size_t) array_length * sizeof(size_t));
3840 #ifdef ASSERT
3841 for (uint i = 0; i < array_length; ++i) {
3842 assert( _surviving_young_words[i] == 0, "memset above" );
3843 }
3844 #endif // !ASSERT
3845 }
3846
3847 void
3848 G1CollectedHeap::update_surviving_young_words(size_t* surv_young_words) {
3849 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4097 }
4098 g1_policy()->phase_times()->record_root_region_scan_wait_time(wait_time_ms);
4099
4100 #if YOUNG_LIST_VERBOSE
4101 gclog_or_tty->print_cr("\nAfter recording pause start.\nYoung_list:");
4102 _young_list->print();
4103 #endif // YOUNG_LIST_VERBOSE
4104
4105 if (g1_policy()->during_initial_mark_pause()) {
4106 concurrent_mark()->checkpointRootsInitialPre();
4107 }
4108
4109 #if YOUNG_LIST_VERBOSE
4110 gclog_or_tty->print_cr("\nBefore choosing collection set.\nYoung_list:");
4111 _young_list->print();
4112 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
4113 #endif // YOUNG_LIST_VERBOSE
4114
4115 g1_policy()->finalize_cset(target_pause_time_ms, evacuation_info);
4116
4117 register_humongous_regions_with_in_cset_fast_test();
4118
4119 _cm->note_start_of_gc();
4120 // We should not verify the per-thread SATB buffers given that
4121 // we have not filtered them yet (we'll do so during the
4122 // GC). We also call this after finalize_cset() to
4123 // ensure that the CSet has been finalized.
4124 _cm->verify_no_cset_oops(true /* verify_stacks */,
4125 true /* verify_enqueued_buffers */,
4126 false /* verify_thread_buffers */,
4127 true /* verify_fingers */);
4128
4129 if (_hr_printer.is_active()) {
4130 HeapRegion* hr = g1_policy()->collection_set();
4131 while (hr != NULL) {
4132 G1HRPrinter::RegionType type;
4133 if (!hr->is_young()) {
4134 type = G1HRPrinter::Old;
4135 } else if (hr->is_survivor()) {
4136 type = G1HRPrinter::Survivor;
4137 } else {
4138 type = G1HRPrinter::Eden;
4149
4150 setup_surviving_young_words();
4151
4152 // Initialize the GC alloc regions.
4153 init_gc_alloc_regions(evacuation_info);
4154
4155 // Actually do the work...
4156 evacuate_collection_set(evacuation_info);
4157
4158 // We do this to mainly verify the per-thread SATB buffers
4159 // (which have been filtered by now) since we didn't verify
4160 // them earlier. No point in re-checking the stacks / enqueued
4161 // buffers given that the CSet has not changed since last time
4162 // we checked.
4163 _cm->verify_no_cset_oops(false /* verify_stacks */,
4164 false /* verify_enqueued_buffers */,
4165 true /* verify_thread_buffers */,
4166 true /* verify_fingers */);
4167
4168 free_collection_set(g1_policy()->collection_set(), evacuation_info);
4169
4170 eagerly_reclaim_humongous_regions();
4171
4172 g1_policy()->clear_collection_set();
4173
4174 cleanup_surviving_young_words();
4175
4176 // Start a new incremental collection set for the next pause.
4177 g1_policy()->start_incremental_cset_building();
4178
4179 clear_cset_fast_test();
4180
4181 _young_list->reset_sampled_info();
4182
4183 // Don't check the whole heap at this point as the
4184 // GC alloc regions from this pause have been tagged
4185 // as survivors and moved on to the survivor list.
4186 // Survivor regions will fail the !is_young() check.
4187 assert(check_young_list_empty(false /* check_heap */),
4188 "young list should be empty");
4189
4190 #if YOUNG_LIST_VERBOSE
4191 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
4652 template <class T>
4653 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4654 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4655 _scanned_klass->record_modified_oops();
4656 }
4657 }
4658
4659 template <G1Barrier barrier, G1Mark do_mark_object>
4660 template <class T>
4661 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4662 T heap_oop = oopDesc::load_heap_oop(p);
4663
4664 if (oopDesc::is_null(heap_oop)) {
4665 return;
4666 }
4667
4668 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4669
4670 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4671
4672 G1FastCSetBiasedMappedArray::in_cset_state_t state = _g1->in_cset_state(obj);
4673
4674 if (state == G1FastCSetBiasedMappedArray::InCSet) {
4675 oop forwardee;
4676 if (obj->is_forwarded()) {
4677 forwardee = obj->forwardee();
4678 } else {
4679 forwardee = _par_scan_state->copy_to_survivor_space(obj);
4680 }
4681 assert(forwardee != NULL, "forwardee should not be NULL");
4682 oopDesc::encode_store_heap_oop(p, forwardee);
4683 if (do_mark_object != G1MarkNone && forwardee != obj) {
4684 // If the object is self-forwarded we don't need to explicitly
4685 // mark it, the evacuation failure protocol will do so.
4686 mark_forwarded_object(obj, forwardee);
4687 }
4688
4689 if (barrier == G1BarrierKlass) {
4690 do_klass_barrier(p, forwardee);
4691 }
4692 } else {
4693 if (state == G1FastCSetBiasedMappedArray::IsHumongous) {
4694 _g1->set_humongous_is_live(obj);
4695 }
4696 // The object is not in collection set. If we're a root scanning
4697 // closure during an initial mark pause then attempt to mark the object.
4698 if (do_mark_object == G1MarkFromRoot) {
4699 mark_object(obj);
4700 }
4701 }
4702
4703 if (barrier == G1BarrierEvac) {
4704 _par_scan_state->update_rs(_from, p, _worker_id);
4705 }
4706 }
4707
4708 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4709 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4710
4711 class G1ParEvacuateFollowersClosure : public VoidClosure {
4712 protected:
4713 G1CollectedHeap* _g1h;
4714 G1ParScanThreadState* _par_scan_state;
4715 RefToScanQueueSet* _queues;
5499 bool do_object_b(oop p) {
5500 if (p != NULL) {
5501 return true;
5502 }
5503 return false;
5504 }
5505 };
5506
5507 bool G1STWIsAliveClosure::do_object_b(oop p) {
5508 // An object is reachable if it is outside the collection set,
5509 // or is inside and copied.
5510 return !_g1->obj_in_cs(p) || p->is_forwarded();
5511 }
5512
5513 // Non Copying Keep Alive closure
5514 class G1KeepAliveClosure: public OopClosure {
5515 G1CollectedHeap* _g1;
5516 public:
5517 G1KeepAliveClosure(G1CollectedHeap* g1) : _g1(g1) {}
5518 void do_oop(narrowOop* p) { guarantee(false, "Not needed"); }
5519 void do_oop(oop* p) {
5520 oop obj = *p;
5521
5522 G1FastCSetBiasedMappedArray::in_cset_state_t cset_state = _g1->in_cset_state(obj);
5523 if (obj == NULL || cset_state == G1FastCSetBiasedMappedArray::InNeither) {
5524 return;
5525 }
5526 if (cset_state == G1FastCSetBiasedMappedArray::InCSet) {
5527 assert( obj->is_forwarded(), "invariant" );
5528 *p = obj->forwardee();
5529 } else {
5530 assert(!obj->is_forwarded(), "invariant" );
5531 assert(cset_state == G1FastCSetBiasedMappedArray::IsHumongous,
5532 err_msg("Only allowed InCSet state is IsHumongous, but is %d", cset_state));
5533 _g1->set_humongous_is_live(obj);
5534 }
5535 }
5536 };
5537
5538 // Copying Keep Alive closure - can be called from both
5539 // serial and parallel code as long as different worker
5540 // threads utilize different G1ParScanThreadState instances
5541 // and different queues.
5542
5543 class G1CopyingKeepAliveClosure: public OopClosure {
5544 G1CollectedHeap* _g1h;
5545 OopClosure* _copy_non_heap_obj_cl;
5546 G1ParScanThreadState* _par_scan_state;
5547
5548 public:
5549 G1CopyingKeepAliveClosure(G1CollectedHeap* g1h,
5550 OopClosure* non_heap_obj_cl,
5551 G1ParScanThreadState* pss):
5552 _g1h(g1h),
5553 _copy_non_heap_obj_cl(non_heap_obj_cl),
5554 _par_scan_state(pss)
5555 {}
5556
5557 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
5558 virtual void do_oop( oop* p) { do_oop_work(p); }
5559
5560 template <class T> void do_oop_work(T* p) {
5561 oop obj = oopDesc::load_decode_heap_oop(p);
5562
5563 if (_g1h->is_in_cset_or_humongous(obj)) {
5564 // If the referent object has been forwarded (either copied
5565 // to a new location or to itself in the event of an
5566 // evacuation failure) then we need to update the reference
5567 // field and, if both reference and referent are in the G1
5568 // heap, update the RSet for the referent.
5569 //
5570 // If the referent has not been forwarded then we have to keep
5571 // it alive by policy. Therefore we have copy the referent.
5572 //
5573 // If the reference field is in the G1 heap then we can push
5574 // on the PSS queue. When the queue is drained (after each
5575 // phase of reference processing) the object and it's followers
5576 // will be copied, the reference field set to point to the
5577 // new location, and the RSet updated. Otherwise we need to
5578 // use the the non-heap or metadata closures directly to copy
5579 // the referent object and update the pointer, while avoiding
5580 // updating the RSet.
5581
5582 if (_g1h->is_in_g1_reserved(p)) {
5583 _par_scan_state->push_on_queue(p);
6493
6494 evacuation_info.set_regions_freed(local_free_list.length());
6495 policy->record_max_rs_lengths(rs_lengths);
6496 policy->cset_regions_freed();
6497
6498 double end_sec = os::elapsedTime();
6499 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6500
6501 if (non_young) {
6502 non_young_time_ms += elapsed_ms;
6503 } else {
6504 young_time_ms += elapsed_ms;
6505 }
6506
6507 prepend_to_freelist(&local_free_list);
6508 decrement_summary_bytes(pre_used);
6509 policy->phase_times()->record_young_free_cset_time_ms(young_time_ms);
6510 policy->phase_times()->record_non_young_free_cset_time_ms(non_young_time_ms);
6511 }
6512
6513 class G1FreeHumongousRegionClosure : public HeapRegionClosure {
6514 private:
6515 FreeRegionList* _free_region_list;
6516 HeapRegionSet* _proxy_set;
6517 HeapRegionSetCount _humongous_regions_removed;
6518 size_t _freed_bytes;
6519 public:
6520
6521 G1FreeHumongousRegionClosure(FreeRegionList* free_region_list) :
6522 _free_region_list(free_region_list), _humongous_regions_removed(), _freed_bytes(0) {
6523 }
6524
6525 virtual bool doHeapRegion(HeapRegion* r) {
6526 if (!r->startsHumongous()) {
6527 return false;
6528 }
6529
6530 G1CollectedHeap* g1h = G1CollectedHeap::heap();
6531
6532 // The following checks whether the humongous object is live are sufficient.
6533 // The main additional check (in addition to having a reference from the roots
6534 // or the young gen) is whether the humongous object has a remembered set entry.
6535 //
6536 // A humongous object cannot be live if there is no remembered set for it
6537 // because:
6538 // - there can be no references from within humongous starts regions referencing
6539 // the object because we never allocate other objects into them.
6540 // (I.e. there are no intra-region references that may be missed by the
6541 // remembered set)
6542 // - as soon there is a remembered set entry to the humongous starts region
6543 // (i.e. it has "escaped" to an old object) this remembered set entry will stay
6544 // until the end of a concurrent mark.
6545 //
6546 // It is not required to check whether the object has been found dead by marking
6547 // or not, in fact it would prevent reclamation within a concurrent cycle, as
6548 // all objects allocated during that time are considered live.
6549 // SATB marking is even more conservative than the remembered set.
6550 // So if at this point in the collection there is no remembered set entry,
6551 // nobody has a reference to it.
6552 // At the start of collection we flush all refinement logs, and remembered sets
6553 // are completely up-to-date wrt to references to the humongous object.
6554 //
6555 // Other implementation considerations:
6556 // - never consider object arrays: while they are a valid target, they have not
6557 // been observed to be used as temporary objects.
6558 // - they would also pose considerable effort for cleaning up the the remembered
6559 // sets.
6560 // While this cleanup is not strictly necessary to be done (or done instantly),
6561 // given that their occurrence is very low, this saves us this additional
6562 // complexity.
6563 uint region_idx = r->hrs_index();
6564 if (g1h->humongous_is_live(region_idx) ||
6565 g1h->humongous_region_is_always_live(region_idx)) {
6566
6567 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6568 gclog_or_tty->print_cr("Live humongous %d region %d with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6569 r->isHumongous(),
6570 region_idx,
6571 r->rem_set()->occupied(),
6572 r->rem_set()->strong_code_roots_list_length(),
6573 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6574 g1h->humongous_is_live(region_idx),
6575 oop(r->bottom())->is_objArray()
6576 );
6577 }
6578
6579 return false;
6580 }
6581
6582 guarantee(!((oop)(r->bottom()))->is_objArray(),
6583 err_msg("Eagerly reclaiming object arrays is not supported, but the object "PTR_FORMAT" is.",
6584 r->bottom()));
6585
6586 if (G1TraceReclaimDeadHumongousObjectsAtYoungGC) {
6587 gclog_or_tty->print_cr("Reclaim humongous region %d start "PTR_FORMAT" region %d length "UINT32_FORMAT" with remset "SIZE_FORMAT" code roots "SIZE_FORMAT" is dead-bitmap %d live-other %d obj array %d",
6588 r->isHumongous(),
6589 r->bottom(),
6590 region_idx,
6591 r->region_num(),
6592 r->rem_set()->occupied(),
6593 r->rem_set()->strong_code_roots_list_length(),
6594 g1h->mark_in_progress() && !g1h->g1_policy()->during_initial_mark_pause(),
6595 g1h->humongous_is_live(region_idx),
6596 oop(r->bottom())->is_objArray()
6597 );
6598 }
6599 _freed_bytes += r->used();
6600 r->set_containing_set(NULL);
6601 _humongous_regions_removed.increment(1u, r->capacity());
6602 g1h->free_humongous_region(r, _free_region_list, false);
6603
6604 return false;
6605 }
6606
6607 HeapRegionSetCount& humongous_free_count() {
6608 return _humongous_regions_removed;
6609 }
6610
6611 size_t bytes_freed() const {
6612 return _freed_bytes;
6613 }
6614
6615 size_t humongous_reclaimed() const {
6616 return _humongous_regions_removed.length();
6617 }
6618 };
6619
6620 void G1CollectedHeap::eagerly_reclaim_humongous_regions() {
6621 assert_at_safepoint(true);
6622
6623 if (!G1ReclaimDeadHumongousObjectsAtYoungGC || !_has_humongous_reclaim_candidates) {
6624 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms(0.0, 0);
6625 return;
6626 }
6627
6628 double start_time = os::elapsedTime();
6629
6630 FreeRegionList local_cleanup_list("Local Humongous Cleanup List");
6631
6632 G1FreeHumongousRegionClosure cl(&local_cleanup_list);
6633 heap_region_iterate(&cl);
6634
6635 HeapRegionSetCount empty_set;
6636 remove_from_old_sets(empty_set, cl.humongous_free_count());
6637
6638 G1HRPrinter* hr_printer = _g1h->hr_printer();
6639 if (hr_printer->is_active()) {
6640 FreeRegionListIterator iter(&local_cleanup_list);
6641 while (iter.more_available()) {
6642 HeapRegion* hr = iter.get_next();
6643 hr_printer->cleanup(hr);
6644 }
6645 }
6646
6647 prepend_to_freelist(&local_cleanup_list);
6648 decrement_summary_bytes(cl.bytes_freed());
6649
6650 g1_policy()->phase_times()->record_fast_reclaim_humongous_time_ms((os::elapsedTime() - start_time) * 1000.0,
6651 cl.humongous_reclaimed());
6652 }
6653
6654 // This routine is similar to the above but does not record
6655 // any policy statistics or update free lists; we are abandoning
6656 // the current incremental collection set in preparation of a
6657 // full collection. After the full GC we will start to build up
6658 // the incremental collection set again.
6659 // This is only called when we're doing a full collection
6660 // and is immediately followed by the tearing down of the young list.
6661
6662 void G1CollectedHeap::abandon_collection_set(HeapRegion* cs_head) {
6663 HeapRegion* cur = cs_head;
6664
6665 while (cur != NULL) {
6666 HeapRegion* next = cur->next_in_collection_set();
6667 assert(cur->in_collection_set(), "bad CS");
6668 cur->set_next_in_collection_set(NULL);
6669 cur->set_in_collection_set(false);
6670 cur->set_young_index_in_cset(-1);
6671 cur = next;
6672 }
6673 }
|