29
30 #include "precompiled.hpp"
31 #include "classfile/metadataOnStackMark.hpp"
32 #include "classfile/stringTable.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/icBuffer.hpp"
35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
43 #include "gc_implementation/g1/g1EvacFailure.hpp"
44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
45 #include "gc_implementation/g1/g1Log.hpp"
46 #include "gc_implementation/g1/g1MarkSweep.hpp"
47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
49 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
50 #include "gc_implementation/g1/g1RemSet.inline.hpp"
51 #include "gc_implementation/g1/g1StringDedup.hpp"
52 #include "gc_implementation/g1/g1YCTypes.hpp"
53 #include "gc_implementation/g1/heapRegion.inline.hpp"
54 #include "gc_implementation/g1/heapRegionRemSet.hpp"
55 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
56 #include "gc_implementation/g1/vm_operations_g1.hpp"
57 #include "gc_implementation/shared/gcHeapSummary.hpp"
58 #include "gc_implementation/shared/gcTimer.hpp"
59 #include "gc_implementation/shared/gcTrace.hpp"
60 #include "gc_implementation/shared/gcTraceTime.hpp"
61 #include "gc_implementation/shared/isGCActiveMark.hpp"
62 #include "memory/allocation.hpp"
63 #include "memory/gcLocker.inline.hpp"
64 #include "memory/generationSpec.hpp"
65 #include "memory/iterator.hpp"
66 #include "memory/referenceProcessor.hpp"
67 #include "oops/oop.inline.hpp"
68 #include "oops/oop.pcgc.inline.hpp"
651 // last continues humongous region
652 assert(hr->bottom() < new_top && new_top <= hr->end(),
653 "new_top should fall on this region");
654 hr->set_top(new_top);
655 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
656 } else {
657 // not last one
658 assert(new_top > hr->end(), "new_top should be above this region");
659 hr->set_top(hr->end());
660 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
661 }
662 }
663 // If we have continues humongous regions (hr != NULL), then the
664 // end of the last one should match new_end and its top should
665 // match new_top.
666 assert(hr == NULL ||
667 (hr->end() == new_end && hr->top() == new_top), "sanity");
668 check_bitmaps("Humongous Region Allocation", first_hr);
669
670 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
671 _allocator->increase_used(first_hr->used());
672 _humongous_set.add(first_hr);
673
674 return new_obj;
675 }
676
677 // If could fit into free regions w/o expansion, try.
678 // Otherwise, if can expand, do so.
679 // Otherwise, if using ex regions might help, try with ex given back.
680 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
681 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
682
683 verify_region_sets_optional();
684
685 uint first = G1_NO_HRM_INDEX;
686 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
687
688 if (obj_regions == 1) {
689 // Only one region to allocate, try to use a fast path by directly allocating
690 // from the free lists. Do not try to expand here, we will potentially do that
691 // later.
834 // Make sure you read the note in attempt_allocation_humongous().
835
836 assert_heap_not_locked_and_not_at_safepoint();
837 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
838 "be called for humongous allocation requests");
839
840 // We should only get here after the first-level allocation attempt
841 // (attempt_allocation()) failed to allocate.
842
843 // We will loop until a) we manage to successfully perform the
844 // allocation or b) we successfully schedule a collection which
845 // fails to perform the allocation. b) is the only case when we'll
846 // return NULL.
847 HeapWord* result = NULL;
848 for (int try_count = 1; /* we'll return */; try_count += 1) {
849 bool should_try_gc;
850 uint gc_count_before;
851
852 {
853 MutexLockerEx x(Heap_lock);
854 result = _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
855 false /* bot_updates */);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->mutator_alloc_region(context)->attempt_allocation_force(word_size,
869 false /* bot_updates */);
870 if (result != NULL) {
871 return result;
872 }
873 }
874 should_try_gc = false;
875 } else {
876 // The GCLocker may not be active but the GCLocker initiated
877 // GC may not yet have been performed (GCLocker::needs_gc()
878 // returns true). In this case we do not try this GC and
879 // wait until the GCLocker initiated GC is performed, and
880 // then retry the allocation.
881 if (GC_locker::needs_gc()) {
882 should_try_gc = false;
883 } else {
884 // Read the GC count while still holding the Heap_lock.
885 gc_count_before = total_collections();
886 should_try_gc = true;
887 }
888 }
889 }
909 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
910 MutexLockerEx x(Heap_lock);
911 *gc_count_before_ret = total_collections();
912 return NULL;
913 }
914 // The GCLocker is either active or the GCLocker initiated
915 // GC has not yet been performed. Stall until it is and
916 // then retry the allocation.
917 GC_locker::stall_until_clear();
918 (*gclocker_retry_count_ret) += 1;
919 }
920
921 // We can reach here if we were unsuccessful in scheduling a
922 // collection (because another thread beat us to it) or if we were
923 // stalled due to the GC locker. In either can we should retry the
924 // allocation attempt in case another thread successfully
925 // performed a collection and reclaimed enough space. We do the
926 // first attempt (without holding the Heap_lock) here and the
927 // follow-on attempt will be at the start of the next loop
928 // iteration (after taking the Heap_lock).
929 result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
930 false /* bot_updates */);
931 if (result != NULL) {
932 return result;
933 }
934
935 // Give a warning if we seem to be looping forever.
936 if ((QueuedAllocationWarningCount > 0) &&
937 (try_count % QueuedAllocationWarningCount == 0)) {
938 warning("G1CollectedHeap::attempt_allocation_slow() "
939 "retries %d times", try_count);
940 }
941 }
942
943 ShouldNotReachHere();
944 return NULL;
945 }
946
947 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
948 uint* gc_count_before_ret,
949 uint* gclocker_retry_count_ret) {
950 // The structure of this method has a lot of similarities to
1049 // stalled due to the GC locker. In either can we should retry the
1050 // allocation attempt in case another thread successfully
1051 // performed a collection and reclaimed enough space. Give a
1052 // warning if we seem to be looping forever.
1053
1054 if ((QueuedAllocationWarningCount > 0) &&
1055 (try_count % QueuedAllocationWarningCount == 0)) {
1056 warning("G1CollectedHeap::attempt_allocation_humongous() "
1057 "retries %d times", try_count);
1058 }
1059 }
1060
1061 ShouldNotReachHere();
1062 return NULL;
1063 }
1064
1065 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1066 AllocationContext_t context,
1067 bool expect_null_mutator_alloc_region) {
1068 assert_at_safepoint(true /* should_be_vm_thread */);
1069 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1070 !expect_null_mutator_alloc_region,
1071 "the current alloc region was unexpectedly found to be non-NULL");
1072
1073 if (!is_humongous(word_size)) {
1074 return _allocator->mutator_alloc_region(context)->attempt_allocation_locked(word_size,
1075 false /* bot_updates */);
1076 } else {
1077 HeapWord* result = humongous_obj_allocate(word_size, context);
1078 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1079 g1_policy()->set_initiate_conc_mark_if_possible();
1080 }
1081 return result;
1082 }
1083
1084 ShouldNotReachHere();
1085 }
1086
1087 class PostMCRemSetClearClosure: public HeapRegionClosure {
1088 G1CollectedHeap* _g1h;
1089 ModRefBarrierSet* _mr_bs;
1090 public:
1091 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1092 _g1h(g1h), _mr_bs(mr_bs) {}
1093
1094 bool doHeapRegion(HeapRegion* r) {
1095 HeapRegionRemSet* hrrs = r->rem_set();
1766 _is_alive_closure_cm(this),
1767 _is_alive_closure_stw(this),
1768 _ref_processor_cm(NULL),
1769 _ref_processor_stw(NULL),
1770 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1771 _bot_shared(NULL),
1772 _evac_failure_scan_stack(NULL),
1773 _mark_in_progress(false),
1774 _cg1r(NULL),
1775 _g1mm(NULL),
1776 _refine_cte_cl(NULL),
1777 _full_collection(false),
1778 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1779 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1780 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1781 _humongous_is_live(),
1782 _has_humongous_reclaim_candidates(false),
1783 _free_regions_coming(false),
1784 _young_list(new YoungList(this)),
1785 _gc_time_stamp(0),
1786 _survivor_plab_stats(YoungPLABSize, PLABWeight),
1787 _old_plab_stats(OldPLABSize, PLABWeight),
1788 _expand_heap_after_alloc_failure(true),
1789 _surviving_young_words(NULL),
1790 _old_marking_cycles_started(0),
1791 _old_marking_cycles_completed(0),
1792 _concurrent_cycle_started(false),
1793 _heap_summary_sent(false),
1794 _in_cset_fast_test(),
1795 _dirty_cards_region_list(NULL),
1796 _worker_cset_start_region(NULL),
1797 _worker_cset_start_region_time_stamp(NULL),
1798 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1799 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1800 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1801 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1802
1803 _g1h = this;
1804 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1805 vm_exit_during_initialization("Failed necessary allocation.");
1806 }
1807
2201 DirtyCardQueue* into_cset_dcq,
2202 bool concurrent,
2203 uint worker_i) {
2204 // Clean cards in the hot card cache
2205 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2206 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2207
2208 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2209 int n_completed_buffers = 0;
2210 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2211 n_completed_buffers++;
2212 }
2213 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2214 dcqs.clear_n_completed_buffers();
2215 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2216 }
2217
2218
2219 // Computes the sum of the storage used by the various regions.
2220 size_t G1CollectedHeap::used() const {
2221 return _allocator->used();
2222 }
2223
2224 size_t G1CollectedHeap::used_unlocked() const {
2225 return _allocator->used_unlocked();
2226 }
2227
2228 class SumUsedClosure: public HeapRegionClosure {
2229 size_t _used;
2230 public:
2231 SumUsedClosure() : _used(0) {}
2232 bool doHeapRegion(HeapRegion* r) {
2233 if (!r->is_continues_humongous()) {
2234 _used += r->used();
2235 }
2236 return false;
2237 }
2238 size_t result() { return _used; }
2239 };
2240
2241 size_t G1CollectedHeap::recalculate_used() const {
2242 double recalculate_used_start = os::elapsedTime();
2243
2244 SumUsedClosure blk;
2245 heap_region_iterate(&blk);
2699
2700 bool G1CollectedHeap::supports_tlab_allocation() const {
2701 return true;
2702 }
2703
2704 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2705 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
2706 }
2707
2708 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2709 return young_list()->eden_used_bytes();
2710 }
2711
2712 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2713 // must be smaller than the humongous object limit.
2714 size_t G1CollectedHeap::max_tlab_size() const {
2715 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
2716 }
2717
2718 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2719 // Return the remaining space in the cur alloc region, but not less than
2720 // the min TLAB size.
2721
2722 // Also, this value can be at most the humongous object threshold,
2723 // since we can't allow tlabs to grow big enough to accommodate
2724 // humongous objects.
2725
2726 HeapRegion* hr = _allocator->mutator_alloc_region(AllocationContext::current())->get();
2727 size_t max_tlab = max_tlab_size() * wordSize;
2728 if (hr == NULL) {
2729 return max_tlab;
2730 } else {
2731 return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab);
2732 }
2733 }
2734
2735 size_t G1CollectedHeap::max_capacity() const {
2736 return _hrm.reserved().byte_size();
2737 }
2738
2739 jlong G1CollectedHeap::millis_since_last_gc() {
2740 // assert(false, "NYI");
2741 return 0;
2742 }
2743
2744 void G1CollectedHeap::prepare_for_verify() {
2745 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2746 ensure_parsability(false);
2747 }
2748 g1_rem_set()->prepare_for_verify();
2749 }
2750
2751 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2752 VerifyOption vo) {
3921
3922 // Don't check the whole heap at this point as the
3923 // GC alloc regions from this pause have been tagged
3924 // as survivors and moved on to the survivor list.
3925 // Survivor regions will fail the !is_young() check.
3926 assert(check_young_list_empty(false /* check_heap */),
3927 "young list should be empty");
3928
3929 #if YOUNG_LIST_VERBOSE
3930 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3931 _young_list->print();
3932 #endif // YOUNG_LIST_VERBOSE
3933
3934 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3935 _young_list->first_survivor_region(),
3936 _young_list->last_survivor_region());
3937
3938 _young_list->reset_auxilary_lists();
3939
3940 if (evacuation_failed()) {
3941 _allocator->set_used(recalculate_used());
3942 uint n_queues = MAX2((int)ParallelGCThreads, 1);
3943 for (uint i = 0; i < n_queues; i++) {
3944 if (_evacuation_failed_info_array[i].has_failed()) {
3945 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3946 }
3947 }
3948 } else {
3949 // The "used" of the the collection set have already been subtracted
3950 // when they were freed. Add in the bytes evacuated.
3951 _allocator->increase_used(g1_policy()->bytes_copied_during_gc());
3952 }
3953
3954 if (g1_policy()->during_initial_mark_pause()) {
3955 // We have to do this before we notify the CM threads that
3956 // they can start working to make sure that all the
3957 // appropriate initialization is done on the CM object.
3958 concurrent_mark()->checkpointRootsInitialPost();
3959 set_marking_started();
3960 // Note that we don't actually trigger the CM thread at
3961 // this point. We do that later when we're sure that
3962 // the current thread has completed its logging output.
3963 }
3964
3965 allocate_dummy_regions();
3966
3967 #if YOUNG_LIST_VERBOSE
3968 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3969 _young_list->print();
3970 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3971 #endif // YOUNG_LIST_VERBOSE
4138
4139 while (_evac_failure_scan_stack->length() > 0) {
4140 oop obj = _evac_failure_scan_stack->pop();
4141 _evac_failure_closure->set_region(heap_region_containing(obj));
4142 obj->oop_iterate_backwards(_evac_failure_closure);
4143 }
4144 }
4145
4146 oop
4147 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4148 oop old) {
4149 assert(obj_in_cs(old),
4150 err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4151 (HeapWord*) old));
4152 markOop m = old->mark();
4153 oop forward_ptr = old->forward_to_atomic(old);
4154 if (forward_ptr == NULL) {
4155 // Forward-to-self succeeded.
4156 assert(_par_scan_state != NULL, "par scan state");
4157 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4158 uint queue_num = _par_scan_state->queue_num();
4159
4160 _evacuation_failed = true;
4161 _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4162 if (_evac_failure_closure != cl) {
4163 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4164 assert(!_drain_in_progress,
4165 "Should only be true while someone holds the lock.");
4166 // Set the global evac-failure closure to the current thread's.
4167 assert(_evac_failure_closure == NULL, "Or locking has failed.");
4168 set_evac_failure_closure(cl);
4169 // Now do the common part.
4170 handle_evacuation_failure_common(old, m);
4171 // Reset to NULL.
4172 set_evac_failure_closure(NULL);
4173 } else {
4174 // The lock is already held, and this is recursive.
4175 assert(_drain_in_progress, "This should only be the recursive case.");
4176 handle_evacuation_failure_common(old, m);
4177 }
4178 return old;
4240 }
4241
4242 template <class T>
4243 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4244 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4245 _scanned_klass->record_modified_oops();
4246 }
4247 }
4248
4249 template <G1Barrier barrier, G1Mark do_mark_object>
4250 template <class T>
4251 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4252 T heap_oop = oopDesc::load_heap_oop(p);
4253
4254 if (oopDesc::is_null(heap_oop)) {
4255 return;
4256 }
4257
4258 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4259
4260 assert(_worker_id == _par_scan_state->queue_num(), "sanity");
4261
4262 const InCSetState state = _g1->in_cset_state(obj);
4263 if (state.is_in_cset()) {
4264 oop forwardee;
4265 markOop m = obj->mark();
4266 if (m->is_marked()) {
4267 forwardee = (oop) m->decode_pointer();
4268 } else {
4269 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4270 }
4271 assert(forwardee != NULL, "forwardee should not be NULL");
4272 oopDesc::encode_store_heap_oop(p, forwardee);
4273 if (do_mark_object != G1MarkNone && forwardee != obj) {
4274 // If the object is self-forwarded we don't need to explicitly
4275 // mark it, the evacuation failure protocol will do so.
4276 mark_forwarded_object(obj, forwardee);
4277 }
4278
4279 if (barrier == G1BarrierKlass) {
4280 do_klass_barrier(p, forwardee);
4282 } else {
4283 if (state.is_humongous()) {
4284 _g1->set_humongous_is_live(obj);
4285 }
4286 // The object is not in collection set. If we're a root scanning
4287 // closure during an initial mark pause then attempt to mark the object.
4288 if (do_mark_object == G1MarkFromRoot) {
4289 mark_object(obj);
4290 }
4291 }
4292
4293 if (barrier == G1BarrierEvac) {
4294 _par_scan_state->update_rs(_from, p, _worker_id);
4295 }
4296 }
4297
4298 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4299 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4300
4301 class G1ParEvacuateFollowersClosure : public VoidClosure {
4302 protected:
4303 G1CollectedHeap* _g1h;
4304 G1ParScanThreadState* _par_scan_state;
4305 RefToScanQueueSet* _queues;
4306 ParallelTaskTerminator* _terminator;
4307
4308 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4309 RefToScanQueueSet* queues() { return _queues; }
4310 ParallelTaskTerminator* terminator() { return _terminator; }
4311
4312 public:
4313 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4314 G1ParScanThreadState* par_scan_state,
4315 RefToScanQueueSet* queues,
4316 ParallelTaskTerminator* terminator)
4317 : _g1h(g1h), _par_scan_state(par_scan_state),
4318 _queues(queues), _terminator(terminator) {}
4319
4320 void do_void();
4321
4322 private:
4323 inline bool offer_termination();
4324 };
4325
4326 bool G1ParEvacuateFollowersClosure::offer_termination() {
4327 G1ParScanThreadState* const pss = par_scan_state();
4328 pss->start_term_time();
4329 const bool res = terminator()->offer_termination();
4330 pss->end_term_time();
4331 return res;
4332 }
4333
4334 void G1ParEvacuateFollowersClosure::do_void() {
4335 G1ParScanThreadState* const pss = par_scan_state();
4336 pss->trim_queue();
4337 do {
4338 pss->steal_and_trim_queue(queues());
4339 } while (!offer_termination());
4340 }
4341
4342 class G1KlassScanClosure : public KlassClosure {
4343 G1ParCopyHelper* _closure;
4344 bool _process_only_dirty;
4345 int _count;
4346 public:
4347 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4348 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4349 void do_klass(Klass* klass) {
4350 // If the klass has not been dirtied we know that there's
4401
4402 HeapRegionGatheringOopClosure _oc;
4403 public:
4404 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4405
4406 void do_code_blob(CodeBlob* cb) {
4407 nmethod* nm = cb->as_nmethod_or_null();
4408 if (nm != NULL) {
4409 if (!nm->test_set_oops_do_mark()) {
4410 _oc.set_nm(nm);
4411 nm->oops_do(&_oc);
4412 nm->fix_oop_relocations();
4413 }
4414 }
4415 }
4416 };
4417
4418 class G1ParTask : public AbstractGangTask {
4419 protected:
4420 G1CollectedHeap* _g1h;
4421 RefToScanQueueSet *_queues;
4422 ParallelTaskTerminator _terminator;
4423 uint _n_workers;
4424
4425 Mutex _stats_lock;
4426 Mutex* stats_lock() { return &_stats_lock; }
4427
4428 public:
4429 G1ParTask(G1CollectedHeap* g1h, RefToScanQueueSet *task_queues)
4430 : AbstractGangTask("G1 collection"),
4431 _g1h(g1h),
4432 _queues(task_queues),
4433 _terminator(0, _queues),
4434 _stats_lock(Mutex::leaf, "parallel G1 stats lock", true)
4435 {}
4436
4437 RefToScanQueueSet* queues() { return _queues; }
4438
4439 RefToScanQueue *work_queue(int i) {
4440 return queues()->queue(i);
4441 }
4442
4443 ParallelTaskTerminator* terminator() { return &_terminator; }
4444
4445 virtual void set_for_termination(int active_workers) {
4446 // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4447 // in the young space (_par_seq_tasks) in the G1 heap
4448 // for SequentialSubTasksDone.
4449 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4450 // both of which need setting by set_n_termination().
4451 _g1h->SharedHeap::set_n_termination(active_workers);
4452 _g1h->set_n_termination(active_workers);
4453 terminator()->reset_for_reuse(active_workers);
4454 _n_workers = active_workers;
4478
4479 }
4480
4481 void do_cld(ClassLoaderData* cld) {
4482 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4483 }
4484 };
4485
4486 void work(uint worker_id) {
4487 if (worker_id >= _n_workers) return; // no work needed this round
4488
4489 double start_time_ms = os::elapsedTime() * 1000.0;
4490 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4491
4492 {
4493 ResourceMark rm;
4494 HandleMark hm;
4495
4496 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4497
4498 G1ParScanThreadState pss(_g1h, worker_id, rp);
4499 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, rp);
4500
4501 pss.set_evac_failure_closure(&evac_failure_cl);
4502
4503 bool only_young = _g1h->g1_policy()->gcs_are_young();
4504
4505 // Non-IM young GC.
4506 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, &pss, rp);
4507 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4508 only_young, // Only process dirty klasses.
4509 false); // No need to claim CLDs.
4510 // IM young GC.
4511 // Strong roots closures.
4512 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, &pss, rp);
4513 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4514 false, // Process all klasses.
4515 true); // Need to claim CLDs.
4516 // Weak roots closures.
4517 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, &pss, rp);
4518 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4519 false, // Process all klasses.
4520 true); // Need to claim CLDs.
4521
4522 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4523 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4524 // IM Weak code roots are handled later.
4525
4526 OopClosure* strong_root_cl;
4527 OopClosure* weak_root_cl;
4528 CLDClosure* strong_cld_cl;
4529 CLDClosure* weak_cld_cl;
4530 CodeBlobClosure* strong_code_cl;
4531
4532 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4533 // We also need to mark copied objects.
4534 strong_root_cl = &scan_mark_root_cl;
4535 strong_cld_cl = &scan_mark_cld_cl;
4536 strong_code_cl = &scan_mark_code_cl;
4537 if (ClassUnloadingWithConcurrentMark) {
4538 weak_root_cl = &scan_mark_weak_root_cl;
4539 weak_cld_cl = &scan_mark_weak_cld_cl;
4540 } else {
4541 weak_root_cl = &scan_mark_root_cl;
4542 weak_cld_cl = &scan_mark_cld_cl;
4543 }
4544 } else {
4545 strong_root_cl = &scan_only_root_cl;
4546 weak_root_cl = &scan_only_root_cl;
4547 strong_cld_cl = &scan_only_cld_cl;
4548 weak_cld_cl = &scan_only_cld_cl;
4549 strong_code_cl = &scan_only_code_cl;
4550 }
4551
4552
4553 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, &pss);
4554
4555 pss.start_strong_roots();
4556 _g1h->g1_process_roots(strong_root_cl,
4557 weak_root_cl,
4558 &push_heap_rs_cl,
4559 strong_cld_cl,
4560 weak_cld_cl,
4561 strong_code_cl,
4562 worker_id);
4563
4564 pss.end_strong_roots();
4565
4566 {
4567 double start = os::elapsedTime();
4568 G1ParEvacuateFollowersClosure evac(_g1h, &pss, _queues, &_terminator);
4569 evac.do_void();
4570 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4571 double term_ms = pss.term_time()*1000.0;
4572 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms-term_ms);
4573 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, pss.term_attempts());
4574 }
4575 _g1h->g1_policy()->record_thread_age_table(pss.age_table());
4576 _g1h->update_surviving_young_words(pss.surviving_young_words()+1);
4577
4578 if (PrintTerminationStats) {
4579 MutexLocker x(stats_lock());
4580 pss.print_termination_stats(worker_id);
4581 }
4582
4583 assert(pss.queue_is_empty(), "should be empty");
4584
4585 // Close the inner scope so that the ResourceMark and HandleMark
4586 // destructors are executed here and are included as part of the
4587 // "GC Worker Time".
4588 }
4589
4590 double end_time_ms = os::elapsedTime() * 1000.0;
4591 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4592 }
4593 };
4594
4595 // *** Common G1 Evacuation Stuff
4596
4597 // This method is run in a GC worker.
4598
4599 void
4600 G1CollectedHeap::
4601 g1_process_roots(OopClosure* scan_non_heap_roots,
4602 OopClosure* scan_non_heap_weak_roots,
4603 G1ParPushHeapRSClosure* scan_rs,
4604 CLDClosure* scan_strong_clds,
4605 CLDClosure* scan_weak_clds,
4606 CodeBlobClosure* scan_strong_code,
4607 uint worker_i) {
4608
4609 // First scan the shared roots.
4610 double ext_roots_start = os::elapsedTime();
4611 double closure_app_time_sec = 0.0;
4612
4613 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4614 bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
5222 public:
5223 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5224 _g1h(g1h),
5225 _par_scan_state(pss)
5226 { }
5227
5228 void do_void() {
5229 G1ParScanThreadState* const pss = par_scan_state();
5230 pss->trim_queue();
5231 }
5232 };
5233
5234 // Parallel Reference Processing closures
5235
5236 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5237 // processing during G1 evacuation pauses.
5238
5239 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5240 private:
5241 G1CollectedHeap* _g1h;
5242 RefToScanQueueSet* _queues;
5243 FlexibleWorkGang* _workers;
5244 int _active_workers;
5245
5246 public:
5247 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5248 FlexibleWorkGang* workers,
5249 RefToScanQueueSet *task_queues,
5250 int n_workers) :
5251 _g1h(g1h),
5252 _queues(task_queues),
5253 _workers(workers),
5254 _active_workers(n_workers)
5255 {
5256 assert(n_workers > 0, "shouldn't call this otherwise");
5257 }
5258
5259 // Executes the given task using concurrent marking worker threads.
5260 virtual void execute(ProcessTask& task);
5261 virtual void execute(EnqueueTask& task);
5262 };
5263
5264 // Gang task for possibly parallel reference processing
5265
5266 class G1STWRefProcTaskProxy: public AbstractGangTask {
5267 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5268 ProcessTask& _proc_task;
5269 G1CollectedHeap* _g1h;
5270 RefToScanQueueSet *_task_queues;
5271 ParallelTaskTerminator* _terminator;
5272
5273 public:
5274 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5275 G1CollectedHeap* g1h,
5276 RefToScanQueueSet *task_queues,
5277 ParallelTaskTerminator* terminator) :
5278 AbstractGangTask("Process reference objects in parallel"),
5279 _proc_task(proc_task),
5280 _g1h(g1h),
5281 _task_queues(task_queues),
5282 _terminator(terminator)
5283 {}
5284
5285 virtual void work(uint worker_id) {
5286 // The reference processing task executed by a single worker.
5287 ResourceMark rm;
5288 HandleMark hm;
5289
5290 G1STWIsAliveClosure is_alive(_g1h);
5291
5292 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5293 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5294
5295 pss.set_evac_failure_closure(&evac_failure_cl);
5296
5297 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5298
5299 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5300
5301 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5302
5303 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5304 // We also need to mark copied objects.
5305 copy_non_heap_cl = ©_mark_non_heap_cl;
5306 }
5307
5308 // Keep alive closure.
5309 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5310
5311 // Complete GC closure
5312 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _task_queues, _terminator);
5313
5314 // Call the reference processing task's work routine.
5315 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5316
5317 // Note we cannot assert that the refs array is empty here as not all
5318 // of the processing tasks (specifically phase2 - pp2_work) execute
5319 // the complete_gc closure (which ordinarily would drain the queue) so
5320 // the queue may not be empty.
5321 }
5322 };
5323
5324 // Driver routine for parallel reference processing.
5325 // Creates an instance of the ref processing gang
5326 // task and has the worker threads execute it.
5327 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5328 assert(_workers != NULL, "Need parallel worker threads.");
5329
5330 ParallelTaskTerminator terminator(_active_workers, _queues);
5331 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _queues, &terminator);
5332
5333 _g1h->set_par_threads(_active_workers);
5334 _workers->run_task(&proc_task_proxy);
5335 _g1h->set_par_threads(0);
5336 }
5337
5338 // Gang task for parallel reference enqueueing.
5339
5340 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5341 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5342 EnqueueTask& _enq_task;
5343
5344 public:
5345 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5346 AbstractGangTask("Enqueue reference objects in parallel"),
5347 _enq_task(enq_task)
5348 { }
5349
5350 virtual void work(uint worker_id) {
5351 _enq_task.work(worker_id);
5358
5359 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5360 assert(_workers != NULL, "Need parallel worker threads.");
5361
5362 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5363
5364 _g1h->set_par_threads(_active_workers);
5365 _workers->run_task(&enq_task_proxy);
5366 _g1h->set_par_threads(0);
5367 }
5368
5369 // End of weak reference support closures
5370
5371 // Abstract task used to preserve (i.e. copy) any referent objects
5372 // that are in the collection set and are pointed to by reference
5373 // objects discovered by the CM ref processor.
5374
5375 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5376 protected:
5377 G1CollectedHeap* _g1h;
5378 RefToScanQueueSet *_queues;
5379 ParallelTaskTerminator _terminator;
5380 uint _n_workers;
5381
5382 public:
5383 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h,int workers, RefToScanQueueSet *task_queues) :
5384 AbstractGangTask("ParPreserveCMReferents"),
5385 _g1h(g1h),
5386 _queues(task_queues),
5387 _terminator(workers, _queues),
5388 _n_workers(workers)
5389 { }
5390
5391 void work(uint worker_id) {
5392 ResourceMark rm;
5393 HandleMark hm;
5394
5395 G1ParScanThreadState pss(_g1h, worker_id, NULL);
5396 G1ParScanHeapEvacFailureClosure evac_failure_cl(_g1h, &pss, NULL);
5397
5398 pss.set_evac_failure_closure(&evac_failure_cl);
5399
5400 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5401
5402 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, &pss, NULL);
5403
5404 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, &pss, NULL);
5405
5406 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5407
5408 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5409 // We also need to mark copied objects.
5410 copy_non_heap_cl = ©_mark_non_heap_cl;
5411 }
5412
5413 // Is alive closure
5414 G1AlwaysAliveClosure always_alive(_g1h);
5415
5416 // Copying keep alive closure. Applied to referent objects that need
5417 // to be copied.
5418 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, &pss);
5419
5420 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5421
5422 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5423 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5424
5425 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5426 // So this must be true - but assert just in case someone decides to
5427 // change the worker ids.
5428 assert(0 <= worker_id && worker_id < limit, "sanity");
5429 assert(!rp->discovery_is_atomic(), "check this code");
5430
5431 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5432 for (uint idx = worker_id; idx < limit; idx += stride) {
5433 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5434
5435 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5436 while (iter.has_next()) {
5437 // Since discovery is not atomic for the CM ref processor, we
5438 // can see some null referent objects.
5439 iter.load_ptrs(DEBUG_ONLY(true));
5440 oop ref = iter.obj();
5441
5442 // This will filter nulls.
5443 if (iter.is_referent_alive()) {
5444 iter.make_referent_alive();
5445 }
5446 iter.move_to_next();
5447 }
5448 }
5449
5450 // Drain the queue - which may cause stealing
5451 G1ParEvacuateFollowersClosure drain_queue(_g1h, &pss, _queues, &_terminator);
5452 drain_queue.do_void();
5453 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5454 assert(pss.queue_is_empty(), "should be");
5455 }
5456 };
5457
5458 // Weak Reference processing during an evacuation pause (part 1).
5459 void G1CollectedHeap::process_discovered_references(uint no_of_gc_workers) {
5460 double ref_proc_start = os::elapsedTime();
5461
5462 ReferenceProcessor* rp = _ref_processor_stw;
5463 assert(rp->discovery_enabled(), "should have been enabled");
5464
5465 // Any reference objects, in the collection set, that were 'discovered'
5466 // by the CM ref processor should have already been copied (either by
5467 // applying the external root copy closure to the discovered lists, or
5468 // by following an RSet entry).
5469 //
5470 // But some of the referents, that are in the collection set, that these
5471 // reference objects point to may not have been copied: the STW ref
5472 // processor would have seen that the reference object had already
5473 // been 'discovered' and would have skipped discovering the reference,
5474 // but would not have treated the reference object as a regular oop.
5475 // As a result the copy closure would not have been applied to the
5476 // referent object.
5477 //
5478 // We need to explicitly copy these referent objects - the references
5479 // will be processed at the end of remarking.
5480 //
5481 // We also need to do this copying before we process the reference
5482 // objects discovered by the STW ref processor in case one of these
5483 // referents points to another object which is also referenced by an
5484 // object discovered by the STW ref processor.
5485
5486 assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
5487
5488 set_par_threads(no_of_gc_workers);
5489 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5490 no_of_gc_workers,
5491 _task_queues);
5492
5493 workers()->run_task(&keep_cm_referents);
5494
5495 set_par_threads(0);
5496
5497 // Closure to test whether a referent is alive.
5498 G1STWIsAliveClosure is_alive(this);
5499
5500 // Even when parallel reference processing is enabled, the processing
5501 // of JNI refs is serial and performed serially by the current thread
5502 // rather than by a worker. The following PSS will be used for processing
5503 // JNI refs.
5504
5505 // Use only a single queue for this PSS.
5506 G1ParScanThreadState pss(this, 0, NULL);
5507
5508 // We do not embed a reference processor in the copying/scanning
5509 // closures while we're actually processing the discovered
5510 // reference objects.
5511 G1ParScanHeapEvacFailureClosure evac_failure_cl(this, &pss, NULL);
5512
5513 pss.set_evac_failure_closure(&evac_failure_cl);
5514
5515 assert(pss.queue_is_empty(), "pre-condition");
5516
5517 G1ParScanExtRootClosure only_copy_non_heap_cl(this, &pss, NULL);
5518
5519 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, &pss, NULL);
5520
5521 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5522
5523 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5524 // We also need to mark copied objects.
5525 copy_non_heap_cl = ©_mark_non_heap_cl;
5526 }
5527
5528 // Keep alive closure.
5529 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, &pss);
5530
5531 // Serial Complete GC closure
5532 G1STWDrainQueueClosure drain_queue(this, &pss);
5533
5534 // Setup the soft refs policy...
5535 rp->setup_policy(false);
5536
5537 ReferenceProcessorStats stats;
5538 if (!rp->processing_is_mt()) {
5539 // Serial reference processing...
5540 stats = rp->process_discovered_references(&is_alive,
5541 &keep_alive,
5542 &drain_queue,
5543 NULL,
5544 _gc_timer_stw,
5545 _gc_tracer_stw->gc_id());
5546 } else {
5547 // Parallel reference processing
5548 assert(rp->num_q() == no_of_gc_workers, "sanity");
5549 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5550
5551 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5552 stats = rp->process_discovered_references(&is_alive,
5553 &keep_alive,
5554 &drain_queue,
5555 &par_task_executor,
5556 _gc_timer_stw,
5557 _gc_tracer_stw->gc_id());
5558 }
5559
5560 _gc_tracer_stw->report_gc_reference_stats(stats);
5561
5562 // We have completed copying any necessary live referent objects.
5563 assert(pss.queue_is_empty(), "both queue and overflow should be empty");
5564
5565 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5566 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5567 }
5568
5569 // Weak Reference processing during an evacuation pause (part 2).
5570 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5571 double ref_enq_start = os::elapsedTime();
5572
5573 ReferenceProcessor* rp = _ref_processor_stw;
5574 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5575
5576 // Now enqueue any remaining on the discovered lists on to
5577 // the pending list.
5578 if (!rp->processing_is_mt()) {
5579 // Serial reference processing...
5580 rp->enqueue_discovered_references();
5581 } else {
5582 // Parallel reference enqueueing
5583
5584 assert(no_of_gc_workers == workers()->active_workers(),
5585 "Need to reset active workers");
5586 assert(rp->num_q() == no_of_gc_workers, "sanity");
5587 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5588
5589 G1STWRefProcTaskExecutor par_task_executor(this, workers(), _task_queues, no_of_gc_workers);
5590 rp->enqueue_discovered_references(&par_task_executor);
5591 }
5592
5593 rp->verify_no_references_recorded();
5594 assert(!rp->discovery_enabled(), "should have been disabled");
5595
5596 // FIXME
5597 // CM's reference processing also cleans up the string and symbol tables.
5598 // Should we do that here also? We could, but it is a serial operation
5599 // and could significantly increase the pause time.
5600
5601 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5602 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5603 }
5604
5605 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5606 _expand_heap_after_alloc_failure = true;
5607 _evacuation_failed = false;
5608
5609 // Should G1EvacuationFailureALot be in effect for this GC?
5610 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5611
5612 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5613
5614 // Disable the hot card cache.
5615 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5616 hot_card_cache->reset_hot_cache_claimed_index();
5617 hot_card_cache->set_use_cache(false);
5618
5619 uint n_workers;
5620 n_workers =
5621 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5622 workers()->active_workers(),
5623 Threads::number_of_non_daemon_threads());
5624 assert(UseDynamicNumberOfGCThreads ||
5625 n_workers == workers()->total_workers(),
5626 "If not dynamic should be using all the workers");
5627 workers()->set_active_workers(n_workers);
5628 set_par_threads(n_workers);
5629
5630 G1ParTask g1_par_task(this, _task_queues);
5631
5632 init_for_evac_failure(NULL);
5633
5634 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5635 double start_par_time_sec = os::elapsedTime();
5636 double end_par_time_sec;
5637
5638 {
5639 StrongRootsScope srs(this);
5640 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5641 if (g1_policy()->during_initial_mark_pause()) {
5642 ClassLoaderDataGraph::clear_claimed_marks();
5643 }
5644
5645 // The individual threads will set their evac-failure closures.
5646 if (PrintTerminationStats) G1ParScanThreadState::print_termination_stats_hdr();
5647 // These tasks use ShareHeap::_process_strong_tasks
5648 assert(UseDynamicNumberOfGCThreads ||
5649 workers()->active_workers() == workers()->total_workers(),
5650 "If not dynamic should be using all the workers");
5651 workers()->run_task(&g1_par_task);
5652 end_par_time_sec = os::elapsedTime();
5653
5654 // Closing the inner scope will execute the destructor
5655 // for the StrongRootsScope object. We record the current
5656 // elapsed time before closing the scope so that time
5657 // taken for the SRS destructor is NOT included in the
5658 // reported parallel time.
5659 }
5660
5661 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5662 g1_policy()->phase_times()->record_par_time(par_time_ms);
5663
5664 double code_root_fixup_time_ms =
5665 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5666 g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5667
5668 set_par_threads(0);
5669
5670 // Process any discovered reference objects - we have
5671 // to do this _before_ we retire the GC alloc regions
5672 // as we may have to copy some 'reachable' referent
5673 // objects (and their reachable sub-graphs) that were
5674 // not copied during the pause.
5675 process_discovered_references(n_workers);
5676
5677 if (G1StringDedup::is_enabled()) {
5678 G1STWIsAliveClosure is_alive(this);
5679 G1KeepAliveClosure keep_alive(this);
5680 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5681 }
5682
5683 _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5684 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5685
5686 // Reset and re-enable the hot card cache.
5687 // Note the counts for the cards in the regions in the
5688 // collection set are reset when the collection set is freed.
5689 hot_card_cache->reset_hot_cache();
5690 hot_card_cache->set_use_cache(true);
5691
5692 purge_code_root_memory();
5693
5694 finalize_for_evac_failure();
5695
5696 if (evacuation_failed()) {
5697 remove_self_forwarding_pointers();
5698
5699 // Reset the G1EvacuationFailureALot counters and flags
5700 // Note: the values are reset only when an actual
5701 // evacuation failure occurs.
5702 NOT_PRODUCT(reset_evacuation_should_fail();)
5703 }
5704
5705 // Enqueue any remaining references remaining on the STW
5706 // reference processor's discovered lists. We need to do
5707 // this after the card table is cleaned (and verified) as
5708 // the act of enqueueing entries on to the pending list
5709 // will log these updates (and dirty their associated
5710 // cards). We need these updates logged to update any
5711 // RSets.
5712 enqueue_discovered_references(n_workers);
5713
5714 redirty_logged_cards();
5715 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5716 }
5717
5718 void G1CollectedHeap::free_region(HeapRegion* hr,
5719 FreeRegionList* free_list,
5720 bool par,
5721 bool locked) {
5722 assert(!hr->is_free(), "the region should not be free");
5723 assert(!hr->is_empty(), "the region should not be empty");
5724 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5725 assert(free_list != NULL, "pre-condition");
5726
5727 if (G1VerifyBitmaps) {
5728 MemRegion mr(hr->bottom(), hr->end());
5729 concurrent_mark()->clearRangePrevBitmap(mr);
5730 }
5731
5732 // Clear the card counts for this region.
5733 // Note: we only need to do this if the region is not young
5734 // (since we don't refine cards in young regions).
5735 if (!hr->is_young()) {
5736 _cg1r->hot_card_cache()->reset_card_counts(hr);
5737 }
5764
5765 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5766 const HeapRegionSetCount& humongous_regions_removed) {
5767 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5768 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5769 _old_set.bulk_remove(old_regions_removed);
5770 _humongous_set.bulk_remove(humongous_regions_removed);
5771 }
5772
5773 }
5774
5775 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5776 assert(list != NULL, "list can't be null");
5777 if (!list->is_empty()) {
5778 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5779 _hrm.insert_list_into_free_list(list);
5780 }
5781 }
5782
5783 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5784 _allocator->decrease_used(bytes);
5785 }
5786
5787 class G1ParCleanupCTTask : public AbstractGangTask {
5788 G1SATBCardTableModRefBS* _ct_bs;
5789 G1CollectedHeap* _g1h;
5790 HeapRegion* volatile _su_head;
5791 public:
5792 G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5793 G1CollectedHeap* g1h) :
5794 AbstractGangTask("G1 Par Cleanup CT Task"),
5795 _ct_bs(ct_bs), _g1h(g1h) { }
5796
5797 void work(uint worker_id) {
5798 HeapRegion* r;
5799 while (r = _g1h->pop_dirty_cards_region()) {
5800 clear_cards(r);
5801 }
5802 }
5803
5804 void clear_cards(HeapRegion* r) {
6098
6099 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6100 (!cur->is_young() && cur->young_index_in_cset() == -1),
6101 "invariant" );
6102
6103 if (!cur->evacuation_failed()) {
6104 MemRegion used_mr = cur->used_region();
6105
6106 // And the region is empty.
6107 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6108 pre_used += cur->used();
6109 free_region(cur, &local_free_list, false /* par */, true /* locked */);
6110 } else {
6111 cur->uninstall_surv_rate_group();
6112 if (cur->is_young()) {
6113 cur->set_young_index_in_cset(-1);
6114 }
6115 cur->set_evacuation_failed(false);
6116 // The region is now considered to be old.
6117 cur->set_old();
6118 _old_set.add(cur);
6119 evacuation_info.increment_collectionset_used_after(cur->used());
6120 }
6121 cur = next;
6122 }
6123
6124 evacuation_info.set_regions_freed(local_free_list.length());
6125 policy->record_max_rs_lengths(rs_lengths);
6126 policy->cset_regions_freed();
6127
6128 double end_sec = os::elapsedTime();
6129 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6130
6131 if (non_young) {
6132 non_young_time_ms += elapsed_ms;
6133 } else {
6134 young_time_ms += elapsed_ms;
6135 }
6136
6137 prepend_to_freelist(&local_free_list);
6480
6481 return false;
6482 }
6483
6484 size_t total_used() {
6485 return _total_used;
6486 }
6487 };
6488
6489 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6490 assert_at_safepoint(true /* should_be_vm_thread */);
6491
6492 if (!free_list_only) {
6493 _young_list->empty_list();
6494 }
6495
6496 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6497 heap_region_iterate(&cl);
6498
6499 if (!free_list_only) {
6500 _allocator->set_used(cl.total_used());
6501 }
6502 assert(_allocator->used_unlocked() == recalculate_used(),
6503 err_msg("inconsistent _allocator->used_unlocked(), "
6504 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6505 _allocator->used_unlocked(), recalculate_used()));
6506 }
6507
6508 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6509 _refine_cte_cl->set_concurrent(concurrent);
6510 }
6511
6512 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6513 HeapRegion* hr = heap_region_containing(p);
6514 return hr->is_in(p);
6515 }
6516
6517 // Methods for the mutator alloc region
6518
6519 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6520 bool force) {
6521 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6522 assert(!force || g1_policy()->can_expand_young_list(),
6523 "if force is true we should be able to expand the young list");
6524 bool young_list_full = g1_policy()->is_young_list_full();
6525 if (force || !young_list_full) {
6526 HeapRegion* new_alloc_region = new_region(word_size,
6527 false /* is_old */,
6528 false /* do_expand */);
6529 if (new_alloc_region != NULL) {
6530 set_region_short_lived_locked(new_alloc_region);
6531 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6532 check_bitmaps("Mutator Region Allocation", new_alloc_region);
6533 return new_alloc_region;
6534 }
6535 }
6536 return NULL;
6537 }
6538
6539 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6540 size_t allocated_bytes) {
6541 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6542 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6543
6544 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6545 _allocator->increase_used(allocated_bytes);
6546 _hr_printer.retire(alloc_region);
6547 // We update the eden sizes here, when the region is retired,
6548 // instead of when it's allocated, since this is the point that its
6549 // used space has been recored in _summary_bytes_used.
6550 g1mm()->update_eden_size();
6551 }
6552
6553 void G1CollectedHeap::set_par_threads() {
6554 // Don't change the number of workers. Use the value previously set
6555 // in the workgroup.
6556 uint n_workers = workers()->active_workers();
6557 assert(UseDynamicNumberOfGCThreads ||
6558 n_workers == workers()->total_workers(),
6559 "Otherwise should be using the total number of workers");
6560 if (n_workers == 0) {
6561 assert(false, "Should have been set in prior evacuation pause.");
6562 n_workers = ParallelGCThreads;
6563 workers()->set_active_workers(n_workers);
6564 }
6565 set_par_threads(n_workers);
6566 }
6567
6568 // Methods for the GC alloc regions
6569
6570 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6571 uint count,
6572 InCSetState dest) {
6573 assert(FreeList_lock->owned_by_self(), "pre-condition");
6574
6575 if (count < g1_policy()->max_regions(dest)) {
6576 const bool is_survivor = (dest.is_young());
6577 HeapRegion* new_alloc_region = new_region(word_size,
6578 !is_survivor,
6579 true /* do_expand */);
6580 if (new_alloc_region != NULL) {
6581 // We really only need to do this for old regions given that we
6582 // should never scan survivors. But it doesn't hurt to do it
6583 // for survivors too.
6584 new_alloc_region->record_timestamp();
6585 if (is_survivor) {
6586 new_alloc_region->set_survivor();
6587 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6594 bool during_im = g1_policy()->during_initial_mark_pause();
6595 new_alloc_region->note_start_of_copying(during_im);
6596 return new_alloc_region;
6597 }
6598 }
6599 return NULL;
6600 }
6601
6602 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6603 size_t allocated_bytes,
6604 InCSetState dest) {
6605 bool during_im = g1_policy()->during_initial_mark_pause();
6606 alloc_region->note_end_of_copying(during_im);
6607 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6608 if (dest.is_young()) {
6609 young_list()->add_survivor_region(alloc_region);
6610 } else {
6611 _old_set.add(alloc_region);
6612 }
6613 _hr_printer.retire(alloc_region);
6614 }
6615
6616 // Heap region set verification
6617
6618 class VerifyRegionListsClosure : public HeapRegionClosure {
6619 private:
6620 HeapRegionSet* _old_set;
6621 HeapRegionSet* _humongous_set;
6622 HeapRegionManager* _hrm;
6623
6624 public:
6625 HeapRegionSetCount _old_count;
6626 HeapRegionSetCount _humongous_count;
6627 HeapRegionSetCount _free_count;
6628
6629 VerifyRegionListsClosure(HeapRegionSet* old_set,
6630 HeapRegionSet* humongous_set,
6631 HeapRegionManager* hrm) :
6632 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6633 _old_count(), _humongous_count(), _free_count(){ }
|
29
30 #include "precompiled.hpp"
31 #include "classfile/metadataOnStackMark.hpp"
32 #include "classfile/stringTable.hpp"
33 #include "code/codeCache.hpp"
34 #include "code/icBuffer.hpp"
35 #include "gc_implementation/g1/bufferingOopClosure.hpp"
36 #include "gc_implementation/g1/concurrentG1Refine.hpp"
37 #include "gc_implementation/g1/concurrentG1RefineThread.hpp"
38 #include "gc_implementation/g1/concurrentMarkThread.inline.hpp"
39 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
41 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
42 #include "gc_implementation/g1/g1ErgoVerbose.hpp"
43 #include "gc_implementation/g1/g1EvacFailure.hpp"
44 #include "gc_implementation/g1/g1GCPhaseTimes.hpp"
45 #include "gc_implementation/g1/g1Log.hpp"
46 #include "gc_implementation/g1/g1MarkSweep.hpp"
47 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
48 #include "gc_implementation/g1/g1ParScanThreadState.inline.hpp"
49 #include "gc_implementation/g1/g1EvacStats.hpp"
50 #include "gc_implementation/g1/g1RegionToSpaceMapper.hpp"
51 #include "gc_implementation/g1/g1RemSet.inline.hpp"
52 #include "gc_implementation/g1/g1StringDedup.hpp"
53 #include "gc_implementation/g1/g1YCTypes.hpp"
54 #include "gc_implementation/g1/heapRegion.inline.hpp"
55 #include "gc_implementation/g1/heapRegionRemSet.hpp"
56 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
57 #include "gc_implementation/g1/vm_operations_g1.hpp"
58 #include "gc_implementation/shared/gcHeapSummary.hpp"
59 #include "gc_implementation/shared/gcTimer.hpp"
60 #include "gc_implementation/shared/gcTrace.hpp"
61 #include "gc_implementation/shared/gcTraceTime.hpp"
62 #include "gc_implementation/shared/isGCActiveMark.hpp"
63 #include "memory/allocation.hpp"
64 #include "memory/gcLocker.inline.hpp"
65 #include "memory/generationSpec.hpp"
66 #include "memory/iterator.hpp"
67 #include "memory/referenceProcessor.hpp"
68 #include "oops/oop.inline.hpp"
69 #include "oops/oop.pcgc.inline.hpp"
652 // last continues humongous region
653 assert(hr->bottom() < new_top && new_top <= hr->end(),
654 "new_top should fall on this region");
655 hr->set_top(new_top);
656 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, new_top);
657 } else {
658 // not last one
659 assert(new_top > hr->end(), "new_top should be above this region");
660 hr->set_top(hr->end());
661 _hr_printer.alloc(G1HRPrinter::ContinuesHumongous, hr, hr->end());
662 }
663 }
664 // If we have continues humongous regions (hr != NULL), then the
665 // end of the last one should match new_end and its top should
666 // match new_top.
667 assert(hr == NULL ||
668 (hr->end() == new_end && hr->top() == new_top), "sanity");
669 check_bitmaps("Humongous Region Allocation", first_hr);
670
671 assert(first_hr->used() == word_size * HeapWordSize, "invariant");
672 increase_used(first_hr->used());
673 _humongous_set.add(first_hr);
674
675 return new_obj;
676 }
677
678 // If could fit into free regions w/o expansion, try.
679 // Otherwise, if can expand, do so.
680 // Otherwise, if using ex regions might help, try with ex given back.
681 HeapWord* G1CollectedHeap::humongous_obj_allocate(size_t word_size, AllocationContext_t context) {
682 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
683
684 verify_region_sets_optional();
685
686 uint first = G1_NO_HRM_INDEX;
687 uint obj_regions = (uint)(align_size_up_(word_size, HeapRegion::GrainWords) / HeapRegion::GrainWords);
688
689 if (obj_regions == 1) {
690 // Only one region to allocate, try to use a fast path by directly allocating
691 // from the free lists. Do not try to expand here, we will potentially do that
692 // later.
835 // Make sure you read the note in attempt_allocation_humongous().
836
837 assert_heap_not_locked_and_not_at_safepoint();
838 assert(!is_humongous(word_size), "attempt_allocation_slow() should not "
839 "be called for humongous allocation requests");
840
841 // We should only get here after the first-level allocation attempt
842 // (attempt_allocation()) failed to allocate.
843
844 // We will loop until a) we manage to successfully perform the
845 // allocation or b) we successfully schedule a collection which
846 // fails to perform the allocation. b) is the only case when we'll
847 // return NULL.
848 HeapWord* result = NULL;
849 for (int try_count = 1; /* we'll return */; try_count += 1) {
850 bool should_try_gc;
851 uint gc_count_before;
852
853 {
854 MutexLockerEx x(Heap_lock);
855 result = _allocator->par_allocate_during_mutator_locked(word_size, false /* bot_updates */, context);
856 if (result != NULL) {
857 return result;
858 }
859
860 // If we reach here, attempt_allocation_locked() above failed to
861 // allocate a new region. So the mutator alloc region should be NULL.
862 // assert(_allocator->mutator_alloc_region(context)->get() == NULL, "only way to get here");
863
864 if (GC_locker::is_active_and_needs_gc()) {
865 if (g1_policy()->can_expand_young_list()) {
866 // No need for an ergo verbose message here,
867 // can_expand_young_list() does this when it returns true.
868 result = _allocator->par_allocate_during_mutator_force(word_size, false /* bot_updates */, context);
869 if (result != NULL) {
870 return result;
871 }
872 }
873 should_try_gc = false;
874 } else {
875 // The GCLocker may not be active but the GCLocker initiated
876 // GC may not yet have been performed (GCLocker::needs_gc()
877 // returns true). In this case we do not try this GC and
878 // wait until the GCLocker initiated GC is performed, and
879 // then retry the allocation.
880 if (GC_locker::needs_gc()) {
881 should_try_gc = false;
882 } else {
883 // Read the GC count while still holding the Heap_lock.
884 gc_count_before = total_collections();
885 should_try_gc = true;
886 }
887 }
888 }
908 if (*gclocker_retry_count_ret > GCLockerRetryAllocationCount) {
909 MutexLockerEx x(Heap_lock);
910 *gc_count_before_ret = total_collections();
911 return NULL;
912 }
913 // The GCLocker is either active or the GCLocker initiated
914 // GC has not yet been performed. Stall until it is and
915 // then retry the allocation.
916 GC_locker::stall_until_clear();
917 (*gclocker_retry_count_ret) += 1;
918 }
919
920 // We can reach here if we were unsuccessful in scheduling a
921 // collection (because another thread beat us to it) or if we were
922 // stalled due to the GC locker. In either can we should retry the
923 // allocation attempt in case another thread successfully
924 // performed a collection and reclaimed enough space. We do the
925 // first attempt (without holding the Heap_lock) here and the
926 // follow-on attempt will be at the start of the next loop
927 // iteration (after taking the Heap_lock).
928 result = _allocator->par_allocate_during_mutator(word_size, false /* bot_updates */, context);
929 if (result != NULL) {
930 return result;
931 }
932
933 // Give a warning if we seem to be looping forever.
934 if ((QueuedAllocationWarningCount > 0) &&
935 (try_count % QueuedAllocationWarningCount == 0)) {
936 warning("G1CollectedHeap::attempt_allocation_slow() "
937 "retries %d times", try_count);
938 }
939 }
940
941 ShouldNotReachHere();
942 return NULL;
943 }
944
945 HeapWord* G1CollectedHeap::attempt_allocation_humongous(size_t word_size,
946 uint* gc_count_before_ret,
947 uint* gclocker_retry_count_ret) {
948 // The structure of this method has a lot of similarities to
1047 // stalled due to the GC locker. In either can we should retry the
1048 // allocation attempt in case another thread successfully
1049 // performed a collection and reclaimed enough space. Give a
1050 // warning if we seem to be looping forever.
1051
1052 if ((QueuedAllocationWarningCount > 0) &&
1053 (try_count % QueuedAllocationWarningCount == 0)) {
1054 warning("G1CollectedHeap::attempt_allocation_humongous() "
1055 "retries %d times", try_count);
1056 }
1057 }
1058
1059 ShouldNotReachHere();
1060 return NULL;
1061 }
1062
1063 HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
1064 AllocationContext_t context,
1065 bool expect_null_mutator_alloc_region) {
1066 assert_at_safepoint(true /* should_be_vm_thread */);
1067 /*
1068 assert(_allocator->mutator_alloc_region(context)->get() == NULL ||
1069 !expect_null_mutator_alloc_region,
1070 "the current alloc region was unexpectedly found to be non-NULL");
1071 */
1072 if (!is_humongous(word_size)) {
1073 return _allocator->par_allocate_during_mutator_locked(word_size, false /* bot_updates */, context);
1074 } else {
1075 HeapWord* result = humongous_obj_allocate(word_size, context);
1076 if (result != NULL && g1_policy()->need_to_start_conc_mark("STW humongous allocation")) {
1077 g1_policy()->set_initiate_conc_mark_if_possible();
1078 }
1079 return result;
1080 }
1081
1082 ShouldNotReachHere();
1083 }
1084
1085 class PostMCRemSetClearClosure: public HeapRegionClosure {
1086 G1CollectedHeap* _g1h;
1087 ModRefBarrierSet* _mr_bs;
1088 public:
1089 PostMCRemSetClearClosure(G1CollectedHeap* g1h, ModRefBarrierSet* mr_bs) :
1090 _g1h(g1h), _mr_bs(mr_bs) {}
1091
1092 bool doHeapRegion(HeapRegion* r) {
1093 HeapRegionRemSet* hrrs = r->rem_set();
1764 _is_alive_closure_cm(this),
1765 _is_alive_closure_stw(this),
1766 _ref_processor_cm(NULL),
1767 _ref_processor_stw(NULL),
1768 _process_strong_tasks(new SubTasksDone(G1H_PS_NumElements)),
1769 _bot_shared(NULL),
1770 _evac_failure_scan_stack(NULL),
1771 _mark_in_progress(false),
1772 _cg1r(NULL),
1773 _g1mm(NULL),
1774 _refine_cte_cl(NULL),
1775 _full_collection(false),
1776 _secondary_free_list("Secondary Free List", new SecondaryFreeRegionListMtSafeChecker()),
1777 _old_set("Old Set", false /* humongous */, new OldRegionSetMtSafeChecker()),
1778 _humongous_set("Master Humongous Set", true /* humongous */, new HumongousRegionSetMtSafeChecker()),
1779 _humongous_is_live(),
1780 _has_humongous_reclaim_candidates(false),
1781 _free_regions_coming(false),
1782 _young_list(new YoungList(this)),
1783 _gc_time_stamp(0),
1784 _expand_heap_after_alloc_failure(true),
1785 _surviving_young_words(NULL),
1786 _old_marking_cycles_started(0),
1787 _old_marking_cycles_completed(0),
1788 _concurrent_cycle_started(false),
1789 _heap_summary_sent(false),
1790 _in_cset_fast_test(),
1791 _dirty_cards_region_list(NULL),
1792 _worker_cset_start_region(NULL),
1793 _worker_cset_start_region_time_stamp(NULL),
1794 _gc_timer_stw(new (ResourceObj::C_HEAP, mtGC) STWGCTimer()),
1795 _gc_timer_cm(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
1796 _gc_tracer_stw(new (ResourceObj::C_HEAP, mtGC) G1NewTracer()),
1797 _gc_tracer_cm(new (ResourceObj::C_HEAP, mtGC) G1OldTracer()) {
1798
1799 _g1h = this;
1800 if (_process_strong_tasks == NULL || !_process_strong_tasks->valid()) {
1801 vm_exit_during_initialization("Failed necessary allocation.");
1802 }
1803
2197 DirtyCardQueue* into_cset_dcq,
2198 bool concurrent,
2199 uint worker_i) {
2200 // Clean cards in the hot card cache
2201 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
2202 hot_card_cache->drain(worker_i, g1_rem_set(), into_cset_dcq);
2203
2204 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
2205 int n_completed_buffers = 0;
2206 while (dcqs.apply_closure_to_completed_buffer(cl, worker_i, 0, true)) {
2207 n_completed_buffers++;
2208 }
2209 g1_policy()->phase_times()->record_update_rs_processed_buffers(worker_i, n_completed_buffers);
2210 dcqs.clear_n_completed_buffers();
2211 assert(!dcqs.completed_buffers_exist_dirty(), "Completed buffers exist!");
2212 }
2213
2214
2215 // Computes the sum of the storage used by the various regions.
2216 size_t G1CollectedHeap::used() const {
2217 return _summary_bytes_used + _allocator->used_in_alloc_regions();
2218 }
2219
2220 size_t G1CollectedHeap::used_unlocked() const {
2221 return _summary_bytes_used;
2222 }
2223
2224 class SumUsedClosure: public HeapRegionClosure {
2225 size_t _used;
2226 public:
2227 SumUsedClosure() : _used(0) {}
2228 bool doHeapRegion(HeapRegion* r) {
2229 if (!r->is_continues_humongous()) {
2230 _used += r->used();
2231 }
2232 return false;
2233 }
2234 size_t result() { return _used; }
2235 };
2236
2237 size_t G1CollectedHeap::recalculate_used() const {
2238 double recalculate_used_start = os::elapsedTime();
2239
2240 SumUsedClosure blk;
2241 heap_region_iterate(&blk);
2695
2696 bool G1CollectedHeap::supports_tlab_allocation() const {
2697 return true;
2698 }
2699
2700 size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
2701 return (_g1_policy->young_list_target_length() - young_list()->survivor_length()) * HeapRegion::GrainBytes;
2702 }
2703
2704 size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
2705 return young_list()->eden_used_bytes();
2706 }
2707
2708 // For G1 TLABs should not contain humongous objects, so the maximum TLAB size
2709 // must be smaller than the humongous object limit.
2710 size_t G1CollectedHeap::max_tlab_size() const {
2711 return align_size_down(_humongous_object_threshold_in_words - 1, MinObjAlignment);
2712 }
2713
2714 size_t G1CollectedHeap::unsafe_max_tlab_alloc(Thread* ignored) const {
2715 return _allocator->unsafe_max_tlab_alloc();
2716 }
2717
2718 size_t G1CollectedHeap::max_capacity() const {
2719 return _hrm.reserved().byte_size();
2720 }
2721
2722 jlong G1CollectedHeap::millis_since_last_gc() {
2723 // assert(false, "NYI");
2724 return 0;
2725 }
2726
2727 void G1CollectedHeap::prepare_for_verify() {
2728 if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
2729 ensure_parsability(false);
2730 }
2731 g1_rem_set()->prepare_for_verify();
2732 }
2733
2734 bool G1CollectedHeap::allocated_since_marking(oop obj, HeapRegion* hr,
2735 VerifyOption vo) {
3904
3905 // Don't check the whole heap at this point as the
3906 // GC alloc regions from this pause have been tagged
3907 // as survivors and moved on to the survivor list.
3908 // Survivor regions will fail the !is_young() check.
3909 assert(check_young_list_empty(false /* check_heap */),
3910 "young list should be empty");
3911
3912 #if YOUNG_LIST_VERBOSE
3913 gclog_or_tty->print_cr("Before recording survivors.\nYoung List:");
3914 _young_list->print();
3915 #endif // YOUNG_LIST_VERBOSE
3916
3917 g1_policy()->record_survivor_regions(_young_list->survivor_length(),
3918 _young_list->first_survivor_region(),
3919 _young_list->last_survivor_region());
3920
3921 _young_list->reset_auxilary_lists();
3922
3923 if (evacuation_failed()) {
3924 set_used(recalculate_used());
3925 uint n_queues = MAX2((int)ParallelGCThreads, 1);
3926 for (uint i = 0; i < n_queues; i++) {
3927 if (_evacuation_failed_info_array[i].has_failed()) {
3928 _gc_tracer_stw->report_evacuation_failed(_evacuation_failed_info_array[i]);
3929 }
3930 }
3931 } else {
3932 // The "used" of the the collection set have already been subtracted
3933 // when they were freed. Add in the bytes evacuated.
3934 increase_used(g1_policy()->bytes_copied_during_gc());
3935 }
3936
3937 if (g1_policy()->during_initial_mark_pause()) {
3938 // We have to do this before we notify the CM threads that
3939 // they can start working to make sure that all the
3940 // appropriate initialization is done on the CM object.
3941 concurrent_mark()->checkpointRootsInitialPost();
3942 set_marking_started();
3943 // Note that we don't actually trigger the CM thread at
3944 // this point. We do that later when we're sure that
3945 // the current thread has completed its logging output.
3946 }
3947
3948 allocate_dummy_regions();
3949
3950 #if YOUNG_LIST_VERBOSE
3951 gclog_or_tty->print_cr("\nEnd of the pause.\nYoung_list:");
3952 _young_list->print();
3953 g1_policy()->print_collection_set(g1_policy()->inc_cset_head(), gclog_or_tty);
3954 #endif // YOUNG_LIST_VERBOSE
4121
4122 while (_evac_failure_scan_stack->length() > 0) {
4123 oop obj = _evac_failure_scan_stack->pop();
4124 _evac_failure_closure->set_region(heap_region_containing(obj));
4125 obj->oop_iterate_backwards(_evac_failure_closure);
4126 }
4127 }
4128
4129 oop
4130 G1CollectedHeap::handle_evacuation_failure_par(G1ParScanThreadState* _par_scan_state,
4131 oop old) {
4132 assert(obj_in_cs(old),
4133 err_msg("obj: "PTR_FORMAT" should still be in the CSet",
4134 (HeapWord*) old));
4135 markOop m = old->mark();
4136 oop forward_ptr = old->forward_to_atomic(old);
4137 if (forward_ptr == NULL) {
4138 // Forward-to-self succeeded.
4139 assert(_par_scan_state != NULL, "par scan state");
4140 OopsInHeapRegionClosure* cl = _par_scan_state->evac_failure_closure();
4141 uint queue_num = _par_scan_state->worker_queue_id();
4142
4143 _evacuation_failed = true;
4144 _evacuation_failed_info_array[queue_num].register_copy_failure(old->size());
4145 if (_evac_failure_closure != cl) {
4146 MutexLockerEx x(EvacFailureStack_lock, Mutex::_no_safepoint_check_flag);
4147 assert(!_drain_in_progress,
4148 "Should only be true while someone holds the lock.");
4149 // Set the global evac-failure closure to the current thread's.
4150 assert(_evac_failure_closure == NULL, "Or locking has failed.");
4151 set_evac_failure_closure(cl);
4152 // Now do the common part.
4153 handle_evacuation_failure_common(old, m);
4154 // Reset to NULL.
4155 set_evac_failure_closure(NULL);
4156 } else {
4157 // The lock is already held, and this is recursive.
4158 assert(_drain_in_progress, "This should only be the recursive case.");
4159 handle_evacuation_failure_common(old, m);
4160 }
4161 return old;
4223 }
4224
4225 template <class T>
4226 void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
4227 if (_g1->heap_region_containing_raw(new_obj)->is_young()) {
4228 _scanned_klass->record_modified_oops();
4229 }
4230 }
4231
4232 template <G1Barrier barrier, G1Mark do_mark_object>
4233 template <class T>
4234 void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
4235 T heap_oop = oopDesc::load_heap_oop(p);
4236
4237 if (oopDesc::is_null(heap_oop)) {
4238 return;
4239 }
4240
4241 oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
4242
4243 assert(_worker_id == _par_scan_state->worker_queue_id(), "sanity");
4244
4245 const InCSetState state = _g1->in_cset_state(obj);
4246 if (state.is_in_cset()) {
4247 oop forwardee;
4248 markOop m = obj->mark();
4249 if (m->is_marked()) {
4250 forwardee = (oop) m->decode_pointer();
4251 } else {
4252 forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
4253 }
4254 assert(forwardee != NULL, "forwardee should not be NULL");
4255 oopDesc::encode_store_heap_oop(p, forwardee);
4256 if (do_mark_object != G1MarkNone && forwardee != obj) {
4257 // If the object is self-forwarded we don't need to explicitly
4258 // mark it, the evacuation failure protocol will do so.
4259 mark_forwarded_object(obj, forwardee);
4260 }
4261
4262 if (barrier == G1BarrierKlass) {
4263 do_klass_barrier(p, forwardee);
4265 } else {
4266 if (state.is_humongous()) {
4267 _g1->set_humongous_is_live(obj);
4268 }
4269 // The object is not in collection set. If we're a root scanning
4270 // closure during an initial mark pause then attempt to mark the object.
4271 if (do_mark_object == G1MarkFromRoot) {
4272 mark_object(obj);
4273 }
4274 }
4275
4276 if (barrier == G1BarrierEvac) {
4277 _par_scan_state->update_rs(_from, p, _worker_id);
4278 }
4279 }
4280
4281 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(oop* p);
4282 template void G1ParCopyClosure<G1BarrierEvac, G1MarkNone>::do_oop_work(narrowOop* p);
4283
4284 class G1ParEvacuateFollowersClosure : public VoidClosure {
4285 private:
4286 double _start_term;
4287 double _term_time;
4288 size_t _term_attempts;
4289
4290 void start_term_time() { _term_attempts++; _start_term = os::elapsedTime(); }
4291 void end_term_time() { _term_time += os::elapsedTime() - _start_term; }
4292 protected:
4293 G1CollectedHeap* _g1h;
4294 G1ParScanThreadState* _par_scan_state;
4295 RefToScanQueueSet* _queues;
4296 ParallelTaskTerminator* _terminator;
4297
4298 G1ParScanThreadState* par_scan_state() { return _par_scan_state; }
4299 RefToScanQueueSet* queues() { return _queues; }
4300 ParallelTaskTerminator* terminator() { return _terminator; }
4301
4302 public:
4303 G1ParEvacuateFollowersClosure(G1CollectedHeap* g1h,
4304 G1ParScanThreadState* par_scan_state,
4305 RefToScanQueueSet* queues,
4306 ParallelTaskTerminator* terminator)
4307 : _g1h(g1h), _par_scan_state(par_scan_state),
4308 _queues(queues), _terminator(terminator),
4309 _start_term(0.0), _term_time(0.0), _term_attempts(0) {}
4310
4311 void do_void();
4312
4313 double term_time() const { return _term_time; }
4314 size_t term_attempts() const { return _term_attempts; }
4315
4316 private:
4317 inline bool offer_termination();
4318 };
4319
4320 bool G1ParEvacuateFollowersClosure::offer_termination() {
4321 G1ParScanThreadState* const pss = par_scan_state();
4322 start_term_time();
4323 const bool res = terminator()->offer_termination();
4324 end_term_time();
4325 return res;
4326 }
4327
4328 void G1ParEvacuateFollowersClosure::do_void() {
4329 G1ParScanThreadState* const pss = par_scan_state();
4330 pss->trim_queue();
4331 do {
4332 pss->steal_and_trim_queue(queues());
4333 } while (!offer_termination());
4334 }
4335
4336 class G1KlassScanClosure : public KlassClosure {
4337 G1ParCopyHelper* _closure;
4338 bool _process_only_dirty;
4339 int _count;
4340 public:
4341 G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
4342 : _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
4343 void do_klass(Klass* klass) {
4344 // If the klass has not been dirtied we know that there's
4395
4396 HeapRegionGatheringOopClosure _oc;
4397 public:
4398 G1CodeBlobClosure(OopClosure* oc) : _oc(oc) {}
4399
4400 void do_code_blob(CodeBlob* cb) {
4401 nmethod* nm = cb->as_nmethod_or_null();
4402 if (nm != NULL) {
4403 if (!nm->test_set_oops_do_mark()) {
4404 _oc.set_nm(nm);
4405 nm->oops_do(&_oc);
4406 nm->fix_oop_relocations();
4407 }
4408 }
4409 }
4410 };
4411
4412 class G1ParTask : public AbstractGangTask {
4413 protected:
4414 G1CollectedHeap* _g1h;
4415 G1ParScanThreadState** _pss;
4416 RefToScanQueueSet* _queues;
4417 ParallelTaskTerminator _terminator;
4418 uint _n_workers;
4419
4420 public:
4421 G1ParTask(G1CollectedHeap* g1h, G1ParScanThreadState** pss, RefToScanQueueSet *task_queues)
4422 : AbstractGangTask("G1 collection"),
4423 _g1h(g1h),
4424 _pss(pss),
4425 _queues(task_queues),
4426 _terminator(0, _queues)
4427 {}
4428
4429 RefToScanQueueSet* queues() { return _queues; }
4430
4431 RefToScanQueue *work_queue(int i) {
4432 return queues()->queue(i);
4433 }
4434
4435 ParallelTaskTerminator* terminator() { return &_terminator; }
4436
4437 virtual void set_for_termination(int active_workers) {
4438 // This task calls set_n_termination() in par_non_clean_card_iterate_work()
4439 // in the young space (_par_seq_tasks) in the G1 heap
4440 // for SequentialSubTasksDone.
4441 // This task also uses SubTasksDone in SharedHeap and G1CollectedHeap
4442 // both of which need setting by set_n_termination().
4443 _g1h->SharedHeap::set_n_termination(active_workers);
4444 _g1h->set_n_termination(active_workers);
4445 terminator()->reset_for_reuse(active_workers);
4446 _n_workers = active_workers;
4470
4471 }
4472
4473 void do_cld(ClassLoaderData* cld) {
4474 cld->oops_do(_oop_closure, &_klass_in_cld_closure, _claim);
4475 }
4476 };
4477
4478 void work(uint worker_id) {
4479 if (worker_id >= _n_workers) return; // no work needed this round
4480
4481 double start_time_ms = os::elapsedTime() * 1000.0;
4482 _g1h->g1_policy()->phase_times()->record_gc_worker_start_time(worker_id, start_time_ms);
4483
4484 {
4485 ResourceMark rm;
4486 HandleMark hm;
4487
4488 ReferenceProcessor* rp = _g1h->ref_processor_stw();
4489
4490 G1ParScanThreadState* pss = _pss[worker_id];
4491 pss->set_ref_processor(rp);
4492
4493 bool only_young = _g1h->g1_policy()->gcs_are_young();
4494
4495 // Non-IM young GC.
4496 G1ParCopyClosure<G1BarrierNone, G1MarkNone> scan_only_root_cl(_g1h, pss, rp);
4497 G1CLDClosure<G1MarkNone> scan_only_cld_cl(&scan_only_root_cl,
4498 only_young, // Only process dirty klasses.
4499 false); // No need to claim CLDs.
4500 // IM young GC.
4501 // Strong roots closures.
4502 G1ParCopyClosure<G1BarrierNone, G1MarkFromRoot> scan_mark_root_cl(_g1h, pss, rp);
4503 G1CLDClosure<G1MarkFromRoot> scan_mark_cld_cl(&scan_mark_root_cl,
4504 false, // Process all klasses.
4505 true); // Need to claim CLDs.
4506 // Weak roots closures.
4507 G1ParCopyClosure<G1BarrierNone, G1MarkPromotedFromRoot> scan_mark_weak_root_cl(_g1h, pss, rp);
4508 G1CLDClosure<G1MarkPromotedFromRoot> scan_mark_weak_cld_cl(&scan_mark_weak_root_cl,
4509 false, // Process all klasses.
4510 true); // Need to claim CLDs.
4511
4512 G1CodeBlobClosure scan_only_code_cl(&scan_only_root_cl);
4513 G1CodeBlobClosure scan_mark_code_cl(&scan_mark_root_cl);
4514 // IM Weak code roots are handled later.
4515
4516 OopClosure* strong_root_cl;
4517 OopClosure* weak_root_cl;
4518 CLDClosure* strong_cld_cl;
4519 CLDClosure* weak_cld_cl;
4520 CodeBlobClosure* strong_code_cl;
4521
4522 if (_g1h->g1_policy()->during_initial_mark_pause()) {
4523 // We also need to mark copied objects.
4524 strong_root_cl = &scan_mark_root_cl;
4525 strong_cld_cl = &scan_mark_cld_cl;
4526 strong_code_cl = &scan_mark_code_cl;
4527 if (ClassUnloadingWithConcurrentMark) {
4528 weak_root_cl = &scan_mark_weak_root_cl;
4529 weak_cld_cl = &scan_mark_weak_cld_cl;
4530 } else {
4531 weak_root_cl = &scan_mark_root_cl;
4532 weak_cld_cl = &scan_mark_cld_cl;
4533 }
4534 } else {
4535 strong_root_cl = &scan_only_root_cl;
4536 weak_root_cl = &scan_only_root_cl;
4537 strong_cld_cl = &scan_only_cld_cl;
4538 weak_cld_cl = &scan_only_cld_cl;
4539 strong_code_cl = &scan_only_code_cl;
4540 }
4541
4542
4543 G1ParPushHeapRSClosure push_heap_rs_cl(_g1h, pss);
4544
4545 double start_strong_roots = os::elapsedTime();
4546 _g1h->g1_process_roots(strong_root_cl,
4547 weak_root_cl,
4548 &push_heap_rs_cl,
4549 strong_cld_cl,
4550 weak_cld_cl,
4551 strong_code_cl,
4552 worker_id);
4553 double strong_roots_time = os::elapsedTime() - start_strong_roots;
4554 double evac_term_time = 0.0;
4555 size_t evac_term_attempts = 0;
4556 {
4557 double start = os::elapsedTime();
4558 G1ParEvacuateFollowersClosure evac(_g1h, pss, _queues, &_terminator);
4559 evac.do_void();
4560 double elapsed_ms = (os::elapsedTime()-start)*1000.0;
4561 evac_term_attempts = evac.term_attempts();
4562 evac_term_time = evac.term_time();
4563 double term_ms = evac_term_time * 1000.0;
4564 _g1h->g1_policy()->phase_times()->add_obj_copy_time(worker_id, elapsed_ms - term_ms);
4565 _g1h->g1_policy()->phase_times()->record_termination(worker_id, term_ms, evac.term_attempts());
4566 }
4567 _g1h->g1_policy()->record_thread_age_table(pss->age_table());
4568 _g1h->update_surviving_young_words(pss->surviving_young_words()+1);
4569 assert(pss->queue_is_empty(), "should be empty");
4570
4571 if (PrintTerminationStats) {
4572 MutexLockerEx x(ParGCRareEvent_lock, Mutex::_no_safepoint_check_flag);
4573 _g1h->print_termination_stats(gclog_or_tty,
4574 worker_id,
4575 os::elapsedTime() * 1000.0 - start_time_ms, /* elapsed time */
4576 strong_roots_time * 1000.0, /* strong roots time */
4577 evac_term_time * 1000.0, /* evac term time */
4578 evac_term_attempts, /* evac term attempts */
4579 pss->lab_waste(), /* alloc buffer waste */
4580 pss->lab_undo_waste() /* undo waste */
4581 );
4582 }
4583 // Close the inner scope so that the ResourceMark and HandleMark
4584 // destructors are executed here and are included as part of the
4585 // "GC Worker Time".
4586 }
4587
4588 double end_time_ms = os::elapsedTime() * 1000.0;
4589 _g1h->g1_policy()->phase_times()->record_gc_worker_end_time(worker_id, end_time_ms);
4590 }
4591 };
4592
4593 void G1CollectedHeap::print_termination_stats_hdr(outputStream* const st) {
4594 st->print_raw_cr("GC Termination Stats");
4595 st->print_raw_cr(" elapsed --strong roots-- -------termination------- ------waste (KiB)------");
4596 st->print_raw_cr("thr ms ms % ms % attempts total alloc undo");
4597 st->print_raw_cr("--- --------- --------- ------ --------- ------ -------- ------- ------- -------");
4598 }
4599
4600 void G1CollectedHeap::print_termination_stats(outputStream* const st,
4601 uint worker_id,
4602 double elapsed_ms,
4603 double strong_roots_ms,
4604 double term_ms,
4605 size_t term_attempts,
4606 size_t alloc_buffer_waste,
4607 size_t undo_waste) const {
4608 st->print_cr("%3d %9.2f %9.2f %6.2f "
4609 "%9.2f %6.2f " SIZE_FORMAT_W(8) " "
4610 SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7) " " SIZE_FORMAT_W(7),
4611 worker_id, elapsed_ms, strong_roots_ms, strong_roots_ms * 100 / elapsed_ms,
4612 term_ms, term_ms * 100 / elapsed_ms, term_attempts,
4613 (alloc_buffer_waste + undo_waste) * HeapWordSize / K,
4614 alloc_buffer_waste * HeapWordSize / K,
4615 undo_waste * HeapWordSize / K);
4616 }
4617
4618 // *** Common G1 Evacuation Stuff
4619
4620 // This method is run in a GC worker.
4621
4622 void
4623 G1CollectedHeap::
4624 g1_process_roots(OopClosure* scan_non_heap_roots,
4625 OopClosure* scan_non_heap_weak_roots,
4626 G1ParPushHeapRSClosure* scan_rs,
4627 CLDClosure* scan_strong_clds,
4628 CLDClosure* scan_weak_clds,
4629 CodeBlobClosure* scan_strong_code,
4630 uint worker_i) {
4631
4632 // First scan the shared roots.
4633 double ext_roots_start = os::elapsedTime();
4634 double closure_app_time_sec = 0.0;
4635
4636 bool during_im = _g1h->g1_policy()->during_initial_mark_pause();
4637 bool trace_metadata = during_im && ClassUnloadingWithConcurrentMark;
5245 public:
5246 G1STWDrainQueueClosure(G1CollectedHeap* g1h, G1ParScanThreadState* pss) :
5247 _g1h(g1h),
5248 _par_scan_state(pss)
5249 { }
5250
5251 void do_void() {
5252 G1ParScanThreadState* const pss = par_scan_state();
5253 pss->trim_queue();
5254 }
5255 };
5256
5257 // Parallel Reference Processing closures
5258
5259 // Implementation of AbstractRefProcTaskExecutor for parallel reference
5260 // processing during G1 evacuation pauses.
5261
5262 class G1STWRefProcTaskExecutor: public AbstractRefProcTaskExecutor {
5263 private:
5264 G1CollectedHeap* _g1h;
5265 G1ParScanThreadState** _pss;
5266 RefToScanQueueSet* _queues;
5267 FlexibleWorkGang* _workers;
5268 int _active_workers;
5269
5270 public:
5271 G1STWRefProcTaskExecutor(G1CollectedHeap* g1h,
5272 G1ParScanThreadState** pss,
5273 FlexibleWorkGang* workers,
5274 RefToScanQueueSet *task_queues,
5275 int n_workers) :
5276 _g1h(g1h),
5277 _pss(pss),
5278 _queues(task_queues),
5279 _workers(workers),
5280 _active_workers(n_workers)
5281 {
5282 assert(n_workers > 0, "shouldn't call this otherwise");
5283 }
5284
5285 // Executes the given task using concurrent marking worker threads.
5286 virtual void execute(ProcessTask& task);
5287 virtual void execute(EnqueueTask& task);
5288 };
5289
5290 // Gang task for possibly parallel reference processing
5291
5292 class G1STWRefProcTaskProxy: public AbstractGangTask {
5293 typedef AbstractRefProcTaskExecutor::ProcessTask ProcessTask;
5294 ProcessTask& _proc_task;
5295 G1CollectedHeap* _g1h;
5296 G1ParScanThreadState** _pss;
5297 RefToScanQueueSet* _task_queues;
5298 ParallelTaskTerminator* _terminator;
5299
5300 public:
5301 G1STWRefProcTaskProxy(ProcessTask& proc_task,
5302 G1CollectedHeap* g1h,
5303 G1ParScanThreadState** pss,
5304 RefToScanQueueSet *task_queues,
5305 ParallelTaskTerminator* terminator) :
5306 AbstractGangTask("Process reference objects in parallel"),
5307 _proc_task(proc_task),
5308 _g1h(g1h),
5309 _pss(pss),
5310 _task_queues(task_queues),
5311 _terminator(terminator)
5312 {}
5313
5314 virtual void work(uint worker_id) {
5315 // The reference processing task executed by a single worker.
5316 ResourceMark rm;
5317 HandleMark hm;
5318
5319 G1STWIsAliveClosure is_alive(_g1h);
5320
5321 G1ParScanThreadState* pss = _pss[worker_id];
5322 pss->set_ref_processor(NULL);
5323
5324 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
5325
5326 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5327
5328 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5329
5330 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5331 // We also need to mark copied objects.
5332 copy_non_heap_cl = ©_mark_non_heap_cl;
5333 }
5334
5335 // Keep alive closure.
5336 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
5337
5338 // Complete GC closure
5339 G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _task_queues, _terminator);
5340
5341 // Call the reference processing task's work routine.
5342 _proc_task.work(worker_id, is_alive, keep_alive, drain_queue);
5343
5344 // Note we cannot assert that the refs array is empty here as not all
5345 // of the processing tasks (specifically phase2 - pp2_work) execute
5346 // the complete_gc closure (which ordinarily would drain the queue) so
5347 // the queue may not be empty.
5348 }
5349 };
5350
5351 // Driver routine for parallel reference processing.
5352 // Creates an instance of the ref processing gang
5353 // task and has the worker threads execute it.
5354 void G1STWRefProcTaskExecutor::execute(ProcessTask& proc_task) {
5355 assert(_workers != NULL, "Need parallel worker threads.");
5356
5357 ParallelTaskTerminator terminator(_active_workers, _queues);
5358 G1STWRefProcTaskProxy proc_task_proxy(proc_task, _g1h, _pss, _queues, &terminator);
5359
5360 _g1h->set_par_threads(_active_workers);
5361 _workers->run_task(&proc_task_proxy);
5362 _g1h->set_par_threads(0);
5363 }
5364
5365 // Gang task for parallel reference enqueueing.
5366
5367 class G1STWRefEnqueueTaskProxy: public AbstractGangTask {
5368 typedef AbstractRefProcTaskExecutor::EnqueueTask EnqueueTask;
5369 EnqueueTask& _enq_task;
5370
5371 public:
5372 G1STWRefEnqueueTaskProxy(EnqueueTask& enq_task) :
5373 AbstractGangTask("Enqueue reference objects in parallel"),
5374 _enq_task(enq_task)
5375 { }
5376
5377 virtual void work(uint worker_id) {
5378 _enq_task.work(worker_id);
5385
5386 void G1STWRefProcTaskExecutor::execute(EnqueueTask& enq_task) {
5387 assert(_workers != NULL, "Need parallel worker threads.");
5388
5389 G1STWRefEnqueueTaskProxy enq_task_proxy(enq_task);
5390
5391 _g1h->set_par_threads(_active_workers);
5392 _workers->run_task(&enq_task_proxy);
5393 _g1h->set_par_threads(0);
5394 }
5395
5396 // End of weak reference support closures
5397
5398 // Abstract task used to preserve (i.e. copy) any referent objects
5399 // that are in the collection set and are pointed to by reference
5400 // objects discovered by the CM ref processor.
5401
5402 class G1ParPreserveCMReferentsTask: public AbstractGangTask {
5403 protected:
5404 G1CollectedHeap* _g1h;
5405 G1ParScanThreadState** _pss;
5406 RefToScanQueueSet *_queues;
5407 ParallelTaskTerminator _terminator;
5408 uint _n_workers;
5409
5410 public:
5411 G1ParPreserveCMReferentsTask(G1CollectedHeap* g1h, G1ParScanThreadState** pss, int workers, RefToScanQueueSet *task_queues) :
5412 AbstractGangTask("ParPreserveCMReferents"),
5413 _g1h(g1h),
5414 _pss(pss),
5415 _queues(task_queues),
5416 _terminator(workers, _queues),
5417 _n_workers(workers)
5418 { }
5419
5420 void work(uint worker_id) {
5421 ResourceMark rm;
5422 HandleMark hm;
5423
5424 G1ParScanThreadState* pss = _pss[worker_id];
5425 pss->set_ref_processor(NULL);
5426 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5427
5428 G1ParScanExtRootClosure only_copy_non_heap_cl(_g1h, pss, NULL);
5429
5430 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(_g1h, pss, NULL);
5431
5432 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5433
5434 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5435 // We also need to mark copied objects.
5436 copy_non_heap_cl = ©_mark_non_heap_cl;
5437 }
5438
5439 // Is alive closure
5440 G1AlwaysAliveClosure always_alive(_g1h);
5441
5442 // Copying keep alive closure. Applied to referent objects that need
5443 // to be copied.
5444 G1CopyingKeepAliveClosure keep_alive(_g1h, copy_non_heap_cl, pss);
5445
5446 ReferenceProcessor* rp = _g1h->ref_processor_cm();
5447
5448 uint limit = ReferenceProcessor::number_of_subclasses_of_ref() * rp->max_num_q();
5449 uint stride = MIN2(MAX2(_n_workers, 1U), limit);
5450
5451 // limit is set using max_num_q() - which was set using ParallelGCThreads.
5452 // So this must be true - but assert just in case someone decides to
5453 // change the worker ids.
5454 assert(0 <= worker_id && worker_id < limit, "sanity");
5455 assert(!rp->discovery_is_atomic(), "check this code");
5456
5457 // Select discovered lists [i, i+stride, i+2*stride,...,limit)
5458 for (uint idx = worker_id; idx < limit; idx += stride) {
5459 DiscoveredList& ref_list = rp->discovered_refs()[idx];
5460
5461 DiscoveredListIterator iter(ref_list, &keep_alive, &always_alive);
5462 while (iter.has_next()) {
5463 // Since discovery is not atomic for the CM ref processor, we
5464 // can see some null referent objects.
5465 iter.load_ptrs(DEBUG_ONLY(true));
5466 oop ref = iter.obj();
5467
5468 // This will filter nulls.
5469 if (iter.is_referent_alive()) {
5470 iter.make_referent_alive();
5471 }
5472 iter.move_to_next();
5473 }
5474 }
5475
5476 // Drain the queue - which may cause stealing
5477 G1ParEvacuateFollowersClosure drain_queue(_g1h, pss, _queues, &_terminator);
5478 drain_queue.do_void();
5479 // Allocation buffers were retired at the end of G1ParEvacuateFollowersClosure
5480 assert(pss->queue_is_empty(), "should be");
5481 }
5482 };
5483
5484 // Weak Reference processing during an evacuation pause (part 1).
5485 void G1CollectedHeap::process_discovered_references(G1ParScanThreadState** pss_, uint no_of_gc_workers) {
5486 double ref_proc_start = os::elapsedTime();
5487
5488 ReferenceProcessor* rp = _ref_processor_stw;
5489 assert(rp->discovery_enabled(), "should have been enabled");
5490
5491 // Any reference objects, in the collection set, that were 'discovered'
5492 // by the CM ref processor should have already been copied (either by
5493 // applying the external root copy closure to the discovered lists, or
5494 // by following an RSet entry).
5495 //
5496 // But some of the referents, that are in the collection set, that these
5497 // reference objects point to may not have been copied: the STW ref
5498 // processor would have seen that the reference object had already
5499 // been 'discovered' and would have skipped discovering the reference,
5500 // but would not have treated the reference object as a regular oop.
5501 // As a result the copy closure would not have been applied to the
5502 // referent object.
5503 //
5504 // We need to explicitly copy these referent objects - the references
5505 // will be processed at the end of remarking.
5506 //
5507 // We also need to do this copying before we process the reference
5508 // objects discovered by the STW ref processor in case one of these
5509 // referents points to another object which is also referenced by an
5510 // object discovered by the STW ref processor.
5511
5512 assert(no_of_gc_workers == workers()->active_workers(), "Need to reset active GC workers");
5513
5514 set_par_threads(no_of_gc_workers);
5515 G1ParPreserveCMReferentsTask keep_cm_referents(this,
5516 pss_,
5517 no_of_gc_workers,
5518 _task_queues);
5519
5520 workers()->run_task(&keep_cm_referents);
5521
5522 set_par_threads(0);
5523
5524 // Closure to test whether a referent is alive.
5525 G1STWIsAliveClosure is_alive(this);
5526
5527 // Even when parallel reference processing is enabled, the processing
5528 // of JNI refs is serial and performed serially by the current thread
5529 // rather than by a worker. The following PSS will be used for processing
5530 // JNI refs.
5531
5532 // Use only a single queue for this PSS.
5533 G1ParScanThreadState* pss = pss_[0];
5534 pss->set_ref_processor(NULL);
5535 assert(pss->queue_is_empty(), "pre-condition");
5536
5537 // We do not embed a reference processor in the copying/scanning
5538 // closures while we're actually processing the discovered
5539 // reference objects.
5540
5541 G1ParScanExtRootClosure only_copy_non_heap_cl(this, pss, NULL);
5542
5543 G1ParScanAndMarkExtRootClosure copy_mark_non_heap_cl(this, pss, NULL);
5544
5545 OopClosure* copy_non_heap_cl = &only_copy_non_heap_cl;
5546
5547 if (_g1h->g1_policy()->during_initial_mark_pause()) {
5548 // We also need to mark copied objects.
5549 copy_non_heap_cl = ©_mark_non_heap_cl;
5550 }
5551
5552 // Keep alive closure.
5553 G1CopyingKeepAliveClosure keep_alive(this, copy_non_heap_cl, pss);
5554
5555 // Serial Complete GC closure
5556 G1STWDrainQueueClosure drain_queue(this, pss);
5557
5558 // Setup the soft refs policy...
5559 rp->setup_policy(false);
5560
5561 ReferenceProcessorStats stats;
5562 if (!rp->processing_is_mt()) {
5563 // Serial reference processing...
5564 stats = rp->process_discovered_references(&is_alive,
5565 &keep_alive,
5566 &drain_queue,
5567 NULL,
5568 _gc_timer_stw,
5569 _gc_tracer_stw->gc_id());
5570 } else {
5571 // Parallel reference processing
5572 assert(rp->num_q() == no_of_gc_workers, "sanity");
5573 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5574
5575 G1STWRefProcTaskExecutor par_task_executor(this, pss_, workers(), _task_queues, no_of_gc_workers);
5576 stats = rp->process_discovered_references(&is_alive,
5577 &keep_alive,
5578 &drain_queue,
5579 &par_task_executor,
5580 _gc_timer_stw,
5581 _gc_tracer_stw->gc_id());
5582 }
5583
5584 _gc_tracer_stw->report_gc_reference_stats(stats);
5585
5586 // We have completed copying any necessary live referent objects.
5587 assert(pss->queue_is_empty(), "both queue and overflow should be empty");
5588
5589 double ref_proc_time = os::elapsedTime() - ref_proc_start;
5590 g1_policy()->phase_times()->record_ref_proc_time(ref_proc_time * 1000.0);
5591 }
5592
5593 // Weak Reference processing during an evacuation pause (part 2).
5594 void G1CollectedHeap::enqueue_discovered_references(uint no_of_gc_workers) {
5595 double ref_enq_start = os::elapsedTime();
5596
5597 ReferenceProcessor* rp = _ref_processor_stw;
5598 assert(!rp->discovery_enabled(), "should have been disabled as part of processing");
5599
5600 // Now enqueue any remaining on the discovered lists on to
5601 // the pending list.
5602 if (!rp->processing_is_mt()) {
5603 // Serial reference processing...
5604 rp->enqueue_discovered_references();
5605 } else {
5606 // Parallel reference enqueueing
5607
5608 assert(no_of_gc_workers == workers()->active_workers(),
5609 "Need to reset active workers");
5610 assert(rp->num_q() == no_of_gc_workers, "sanity");
5611 assert(no_of_gc_workers <= rp->max_num_q(), "sanity");
5612
5613 G1STWRefProcTaskExecutor par_task_executor(this, NULL, workers(), _task_queues, no_of_gc_workers);
5614 rp->enqueue_discovered_references(&par_task_executor);
5615 }
5616
5617 rp->verify_no_references_recorded();
5618 assert(!rp->discovery_enabled(), "should have been disabled");
5619
5620 // FIXME
5621 // CM's reference processing also cleans up the string and symbol tables.
5622 // Should we do that here also? We could, but it is a serial operation
5623 // and could significantly increase the pause time.
5624
5625 double ref_enq_time = os::elapsedTime() - ref_enq_start;
5626 g1_policy()->phase_times()->record_ref_enq_time(ref_enq_time * 1000.0);
5627 }
5628
5629 void G1CollectedHeap::evacuate_collection_set(EvacuationInfo& evacuation_info) {
5630 _expand_heap_after_alloc_failure = true;
5631 _evacuation_failed = false;
5632
5633 // Should G1EvacuationFailureALot be in effect for this GC?
5634 NOT_PRODUCT(set_evacuation_failure_alot_for_current_gc();)
5635
5636 g1_rem_set()->prepare_for_oops_into_collection_set_do();
5637
5638 // Disable the hot card cache.
5639 G1HotCardCache* hot_card_cache = _cg1r->hot_card_cache();
5640 hot_card_cache->reset_hot_cache_claimed_index();
5641 hot_card_cache->set_use_cache(false);
5642
5643 uint n_workers;
5644 n_workers =
5645 AdaptiveSizePolicy::calc_active_workers(workers()->total_workers(),
5646 workers()->active_workers(),
5647 Threads::number_of_non_daemon_threads());
5648 assert(UseDynamicNumberOfGCThreads ||
5649 n_workers == workers()->total_workers(),
5650 "If not dynamic should be using all the workers");
5651 workers()->set_active_workers(n_workers);
5652 set_par_threads(n_workers);
5653
5654 G1ParScanThreadState** per_thread_states = NEW_C_HEAP_ARRAY(G1ParScanThreadState*, n_workers, mtGC);
5655 for (uint i = 0; i < n_workers; i++) {
5656 per_thread_states[i] = new G1ParScanThreadState(this, i);
5657 }
5658
5659 G1ParTask g1_par_task(this, per_thread_states, _task_queues);
5660
5661 init_for_evac_failure(NULL);
5662
5663 assert(dirty_card_queue_set().completed_buffers_num() == 0, "Should be empty");
5664 double start_par_time_sec = os::elapsedTime();
5665 double end_par_time_sec;
5666
5667 {
5668 StrongRootsScope srs(this);
5669 // InitialMark needs claim bits to keep track of the marked-through CLDs.
5670 if (g1_policy()->during_initial_mark_pause()) {
5671 ClassLoaderDataGraph::clear_claimed_marks();
5672 }
5673
5674 // The individual threads will set their evac-failure closures.
5675 if (PrintTerminationStats) {
5676 print_termination_stats_hdr(gclog_or_tty);
5677 }
5678 // These tasks use ShareHeap::_process_strong_tasks
5679 assert(UseDynamicNumberOfGCThreads ||
5680 workers()->active_workers() == workers()->total_workers(),
5681 "If not dynamic should be using all the workers");
5682 workers()->run_task(&g1_par_task);
5683 end_par_time_sec = os::elapsedTime();
5684
5685 // Closing the inner scope will execute the destructor
5686 // for the StrongRootsScope object. We record the current
5687 // elapsed time before closing the scope so that time
5688 // taken for the SRS destructor is NOT included in the
5689 // reported parallel time.
5690 }
5691
5692 double par_time_ms = (end_par_time_sec - start_par_time_sec) * 1000.0;
5693 g1_policy()->phase_times()->record_par_time(par_time_ms);
5694
5695 double code_root_fixup_time_ms =
5696 (os::elapsedTime() - end_par_time_sec) * 1000.0;
5697 g1_policy()->phase_times()->record_code_root_fixup_time(code_root_fixup_time_ms);
5698
5699 set_par_threads(0);
5700
5701 // Process any discovered reference objects - we have
5702 // to do this _before_ we retire the GC alloc regions
5703 // as we may have to copy some 'reachable' referent
5704 // objects (and their reachable sub-graphs) that were
5705 // not copied during the pause.
5706 process_discovered_references(per_thread_states, n_workers);
5707
5708 if (G1StringDedup::is_enabled()) {
5709 G1STWIsAliveClosure is_alive(this);
5710 G1KeepAliveClosure keep_alive(this);
5711 G1StringDedup::unlink_or_oops_do(&is_alive, &keep_alive);
5712 }
5713
5714 for (uint i = 0; i < n_workers; i++) {
5715 delete per_thread_states[i];
5716 }
5717 FREE_C_HEAP_ARRAY(G1ParScanThreadState*, per_thread_states);
5718
5719 record_obj_copy_mem_stats();
5720
5721 _allocator->release_gc_alloc_regions(n_workers, evacuation_info);
5722 g1_rem_set()->cleanup_after_oops_into_collection_set_do();
5723
5724 // Reset and re-enable the hot card cache.
5725 // Note the counts for the cards in the regions in the
5726 // collection set are reset when the collection set is freed.
5727 hot_card_cache->reset_hot_cache();
5728 hot_card_cache->set_use_cache(true);
5729
5730 purge_code_root_memory();
5731
5732 finalize_for_evac_failure();
5733
5734 if (evacuation_failed()) {
5735 remove_self_forwarding_pointers();
5736
5737 // Reset the G1EvacuationFailureALot counters and flags
5738 // Note: the values are reset only when an actual
5739 // evacuation failure occurs.
5740 NOT_PRODUCT(reset_evacuation_should_fail();)
5741 }
5742
5743 // Enqueue any remaining references remaining on the STW
5744 // reference processor's discovered lists. We need to do
5745 // this after the card table is cleaned (and verified) as
5746 // the act of enqueueing entries on to the pending list
5747 // will log these updates (and dirty their associated
5748 // cards). We need these updates logged to update any
5749 // RSets.
5750 enqueue_discovered_references(n_workers);
5751
5752 redirty_logged_cards();
5753 COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
5754 }
5755
5756 void G1CollectedHeap::record_obj_copy_mem_stats() {
5757 record_obj_copy_mem_stats(InCSetState::Young);
5758 record_obj_copy_mem_stats(InCSetState::Old);
5759 }
5760
5761 void G1CollectedHeap::record_obj_copy_mem_stats(InCSetState which) {
5762 G1EvacStats* stats = _allocator->evac_stats(which);
5763
5764 EventGCG1EvacuationMemoryStatistics e;
5765 if (e.should_commit()) {
5766 e.set_gcId(GCId::peek().id());
5767 e.set_gen(InCSetState::to_gen_number(which));
5768 e.set_allocated(stats->allocated() * HeapWordSize);
5769 e.set_wasted(stats->wasted() * HeapWordSize);
5770 e.set_used(stats->used() * HeapWordSize);
5771 e.set_undo_waste(stats->undo_waste() * HeapWordSize);
5772 e.set_region_end_waste(stats->region_end_waste() * HeapWordSize);
5773 e.set_regions_refilled(stats->regions_refilled());
5774 e.set_inline_allocated(stats->inline_allocated() * HeapWordSize);
5775 e.set_failure_used(stats->failure_used() * HeapWordSize);
5776 e.set_failure_waste(stats->failure_waste() * HeapWordSize);
5777 e.commit();
5778 }
5779 }
5780
5781 void G1CollectedHeap::free_region(HeapRegion* hr,
5782 FreeRegionList* free_list,
5783 bool par,
5784 bool locked) {
5785 assert(!hr->is_free(), "the region should not be free");
5786 assert(!hr->is_empty(), "the region should not be empty");
5787 assert(_hrm.is_available(hr->hrm_index()), "region should be committed");
5788 assert(free_list != NULL, "pre-condition");
5789
5790 if (G1VerifyBitmaps) {
5791 MemRegion mr(hr->bottom(), hr->end());
5792 concurrent_mark()->clearRangePrevBitmap(mr);
5793 }
5794
5795 // Clear the card counts for this region.
5796 // Note: we only need to do this if the region is not young
5797 // (since we don't refine cards in young regions).
5798 if (!hr->is_young()) {
5799 _cg1r->hot_card_cache()->reset_card_counts(hr);
5800 }
5827
5828 void G1CollectedHeap::remove_from_old_sets(const HeapRegionSetCount& old_regions_removed,
5829 const HeapRegionSetCount& humongous_regions_removed) {
5830 if (old_regions_removed.length() > 0 || humongous_regions_removed.length() > 0) {
5831 MutexLockerEx x(OldSets_lock, Mutex::_no_safepoint_check_flag);
5832 _old_set.bulk_remove(old_regions_removed);
5833 _humongous_set.bulk_remove(humongous_regions_removed);
5834 }
5835
5836 }
5837
5838 void G1CollectedHeap::prepend_to_freelist(FreeRegionList* list) {
5839 assert(list != NULL, "list can't be null");
5840 if (!list->is_empty()) {
5841 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
5842 _hrm.insert_list_into_free_list(list);
5843 }
5844 }
5845
5846 void G1CollectedHeap::decrement_summary_bytes(size_t bytes) {
5847 decrease_used(bytes);
5848 }
5849
5850 class G1ParCleanupCTTask : public AbstractGangTask {
5851 G1SATBCardTableModRefBS* _ct_bs;
5852 G1CollectedHeap* _g1h;
5853 HeapRegion* volatile _su_head;
5854 public:
5855 G1ParCleanupCTTask(G1SATBCardTableModRefBS* ct_bs,
5856 G1CollectedHeap* g1h) :
5857 AbstractGangTask("G1 Par Cleanup CT Task"),
5858 _ct_bs(ct_bs), _g1h(g1h) { }
5859
5860 void work(uint worker_id) {
5861 HeapRegion* r;
5862 while (r = _g1h->pop_dirty_cards_region()) {
5863 clear_cards(r);
5864 }
5865 }
5866
5867 void clear_cards(HeapRegion* r) {
6161
6162 assert( (cur->is_young() && cur->young_index_in_cset() > -1) ||
6163 (!cur->is_young() && cur->young_index_in_cset() == -1),
6164 "invariant" );
6165
6166 if (!cur->evacuation_failed()) {
6167 MemRegion used_mr = cur->used_region();
6168
6169 // And the region is empty.
6170 assert(!used_mr.is_empty(), "Should not have empty regions in a CS.");
6171 pre_used += cur->used();
6172 free_region(cur, &local_free_list, false /* par */, true /* locked */);
6173 } else {
6174 cur->uninstall_surv_rate_group();
6175 if (cur->is_young()) {
6176 cur->set_young_index_in_cset(-1);
6177 }
6178 cur->set_evacuation_failed(false);
6179 // The region is now considered to be old.
6180 cur->set_old();
6181 // Do some allocation statistics accounting. Regions that failed evacuation
6182 // are always made old, so there is no need to update anything in the young
6183 // gen statistics.
6184 size_t used_words = cur->marked_bytes() / HeapWordSize;
6185 _allocator->evac_stats(InCSetState::Old)->add_failure_used_and_waste(used_words, HeapRegion::GrainWords - used_words);
6186 _old_set.add(cur);
6187 evacuation_info.increment_collectionset_used_after(cur->used());
6188 }
6189 cur = next;
6190 }
6191
6192 evacuation_info.set_regions_freed(local_free_list.length());
6193 policy->record_max_rs_lengths(rs_lengths);
6194 policy->cset_regions_freed();
6195
6196 double end_sec = os::elapsedTime();
6197 double elapsed_ms = (end_sec - start_sec) * 1000.0;
6198
6199 if (non_young) {
6200 non_young_time_ms += elapsed_ms;
6201 } else {
6202 young_time_ms += elapsed_ms;
6203 }
6204
6205 prepend_to_freelist(&local_free_list);
6548
6549 return false;
6550 }
6551
6552 size_t total_used() {
6553 return _total_used;
6554 }
6555 };
6556
6557 void G1CollectedHeap::rebuild_region_sets(bool free_list_only) {
6558 assert_at_safepoint(true /* should_be_vm_thread */);
6559
6560 if (!free_list_only) {
6561 _young_list->empty_list();
6562 }
6563
6564 RebuildRegionSetsClosure cl(free_list_only, &_old_set, &_hrm);
6565 heap_region_iterate(&cl);
6566
6567 if (!free_list_only) {
6568 set_used(cl.total_used());
6569 }
6570 assert(used_unlocked() == recalculate_used(),
6571 err_msg("inconsistent _allocator->used_unlocked(), "
6572 "value: "SIZE_FORMAT" recalculated: "SIZE_FORMAT,
6573 used_unlocked(), recalculate_used()));
6574 }
6575
6576 void G1CollectedHeap::set_refine_cte_cl_concurrency(bool concurrent) {
6577 _refine_cte_cl->set_concurrent(concurrent);
6578 }
6579
6580 bool G1CollectedHeap::is_in_closed_subset(const void* p) const {
6581 HeapRegion* hr = heap_region_containing(p);
6582 return hr->is_in(p);
6583 }
6584
6585 // Methods for the mutator alloc region
6586
6587 HeapRegion* G1CollectedHeap::new_mutator_alloc_region(size_t word_size,
6588 bool force) {
6589 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6590 assert(!force || g1_policy()->can_expand_young_list(),
6591 "if force is true we should be able to expand the young list");
6592 bool young_list_full = g1_policy()->is_young_list_full();
6593 if (force || !young_list_full) {
6594 HeapRegion* new_alloc_region = new_region(word_size,
6595 false /* is_old */,
6596 false /* do_expand */);
6597 if (new_alloc_region != NULL) {
6598 set_region_short_lived_locked(new_alloc_region);
6599 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Eden, young_list_full);
6600 check_bitmaps("Mutator Region Allocation", new_alloc_region);
6601 return new_alloc_region;
6602 }
6603 }
6604 return NULL;
6605 }
6606
6607 void G1CollectedHeap::retire_mutator_alloc_region(HeapRegion* alloc_region,
6608 size_t allocated_bytes) {
6609 assert_heap_locked_or_at_safepoint(true /* should_be_vm_thread */);
6610 assert(alloc_region->is_eden(), "all mutator alloc regions should be eden");
6611
6612 g1_policy()->add_region_to_incremental_cset_lhs(alloc_region);
6613 increase_used(allocated_bytes);
6614 _hr_printer.retire(alloc_region);
6615 // We update the eden sizes here, when the region is retired,
6616 // instead of when it's allocated, since this is the point that its
6617 // used space has been recored in _summary_bytes_used.
6618 g1mm()->update_eden_size();
6619 }
6620
6621 // Methods for the GC alloc regions
6622
6623 HeapRegion* G1CollectedHeap::new_gc_alloc_region(size_t word_size,
6624 uint count,
6625 InCSetState dest) {
6626 assert(FreeList_lock->owned_by_self(), "pre-condition");
6627
6628 if (count < g1_policy()->max_regions(dest)) {
6629 const bool is_survivor = (dest.is_young());
6630 HeapRegion* new_alloc_region = new_region(word_size,
6631 !is_survivor,
6632 true /* do_expand */);
6633 if (new_alloc_region != NULL) {
6634 // We really only need to do this for old regions given that we
6635 // should never scan survivors. But it doesn't hurt to do it
6636 // for survivors too.
6637 new_alloc_region->record_timestamp();
6638 if (is_survivor) {
6639 new_alloc_region->set_survivor();
6640 _hr_printer.alloc(new_alloc_region, G1HRPrinter::Survivor);
6647 bool during_im = g1_policy()->during_initial_mark_pause();
6648 new_alloc_region->note_start_of_copying(during_im);
6649 return new_alloc_region;
6650 }
6651 }
6652 return NULL;
6653 }
6654
6655 void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
6656 size_t allocated_bytes,
6657 InCSetState dest) {
6658 bool during_im = g1_policy()->during_initial_mark_pause();
6659 alloc_region->note_end_of_copying(during_im);
6660 g1_policy()->record_bytes_copied_during_gc(allocated_bytes);
6661 if (dest.is_young()) {
6662 young_list()->add_survivor_region(alloc_region);
6663 } else {
6664 _old_set.add(alloc_region);
6665 }
6666 _hr_printer.retire(alloc_region);
6667 }
6668
6669 void G1CollectedHeap::set_par_threads() {
6670 // Don't change the number of workers. Use the value previously set
6671 // in the workgroup.
6672 uint n_workers = workers()->active_workers();
6673 assert(UseDynamicNumberOfGCThreads ||
6674 n_workers == workers()->total_workers(),
6675 "Otherwise should be using the total number of workers");
6676 if (n_workers == 0) {
6677 assert(false, "Should have been set in prior evacuation pause.");
6678 n_workers = ParallelGCThreads;
6679 workers()->set_active_workers(n_workers);
6680 }
6681 set_par_threads(n_workers);
6682 }
6683
6684 // Heap region set verification
6685
6686 class VerifyRegionListsClosure : public HeapRegionClosure {
6687 private:
6688 HeapRegionSet* _old_set;
6689 HeapRegionSet* _humongous_set;
6690 HeapRegionManager* _hrm;
6691
6692 public:
6693 HeapRegionSetCount _old_count;
6694 HeapRegionSetCount _humongous_count;
6695 HeapRegionSetCount _free_count;
6696
6697 VerifyRegionListsClosure(HeapRegionSet* old_set,
6698 HeapRegionSet* humongous_set,
6699 HeapRegionManager* hrm) :
6700 _old_set(old_set), _humongous_set(humongous_set), _hrm(hrm),
6701 _old_count(), _humongous_count(), _free_count(){ }
|