20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/metadataOnStackMark.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/g1Allocator.inline.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1CollectedHeap.inline.hpp"
34 #include "gc/g1/g1CollectionSet.hpp"
35 #include "gc/g1/g1CollectorPolicy.hpp"
36 #include "gc/g1/g1CollectorState.hpp"
37 #include "gc/g1/g1ConcurrentRefine.hpp"
38 #include "gc/g1/g1ConcurrentRefineThread.hpp"
39 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
40 #include "gc/g1/g1EvacStats.inline.hpp"
41 #include "gc/g1/g1FullCollector.hpp"
42 #include "gc/g1/g1GCPhaseTimes.hpp"
43 #include "gc/g1/g1HeapSizingPolicy.hpp"
44 #include "gc/g1/g1HeapTransition.hpp"
45 #include "gc/g1/g1HeapVerifier.hpp"
46 #include "gc/g1/g1HotCardCache.hpp"
47 #include "gc/g1/g1MemoryPool.hpp"
48 #include "gc/g1/g1OopClosures.inline.hpp"
49 #include "gc/g1/g1ParScanThreadState.inline.hpp"
50 #include "gc/g1/g1Policy.hpp"
51 #include "gc/g1/g1RegionToSpaceMapper.hpp"
52 #include "gc/g1/g1RemSet.hpp"
53 #include "gc/g1/g1RootClosures.hpp"
54 #include "gc/g1/g1RootProcessor.hpp"
55 #include "gc/g1/g1SATBMarkQueueSet.hpp"
56 #include "gc/g1/g1StringDedup.hpp"
57 #include "gc/g1/g1ThreadLocalData.hpp"
58 #include "gc/g1/g1YCTypes.hpp"
59 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
90 #include "runtime/handles.inline.hpp"
91 #include "runtime/init.hpp"
92 #include "runtime/orderAccess.hpp"
93 #include "runtime/threadSMR.hpp"
94 #include "runtime/vmThread.hpp"
95 #include "utilities/align.hpp"
96 #include "utilities/globalDefinitions.hpp"
97 #include "utilities/stack.inline.hpp"
98
99 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
100
101 // INVARIANTS/NOTES
102 //
103 // All allocation activity covered by the G1CollectedHeap interface is
104 // serialized by acquiring the HeapLock. This happens in mem_allocate
105 // and allocate_new_tlab, which are the "entry" points to the
106 // allocation code from the rest of the JVM. (Note that this does not
107 // apply to TLAB allocation, which is not part of this interface: it
108 // is done by clients of this interface.)
109
110 class RedirtyLoggedCardTableEntryClosure : public CardTableEntryClosure {
111 private:
112 size_t _num_dirtied;
113 G1CollectedHeap* _g1h;
114 G1CardTable* _g1_ct;
115
116 HeapRegion* region_for_card(jbyte* card_ptr) const {
117 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
118 }
119
120 bool will_become_free(HeapRegion* hr) const {
121 // A region will be freed by free_collection_set if the region is in the
122 // collection set and has not had an evacuation failure.
123 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
124 }
125
126 public:
127 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : CardTableEntryClosure(),
128 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
129
130 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
131 HeapRegion* hr = region_for_card(card_ptr);
132
133 // Should only dirty cards in regions that won't be freed.
134 if (!will_become_free(hr)) {
135 *card_ptr = G1CardTable::dirty_card_val();
136 _num_dirtied++;
137 }
138
139 return true;
140 }
141
142 size_t num_dirtied() const { return _num_dirtied; }
143 };
144
145
146 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
147 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
1794 // Now expand into the initial heap size.
1795 if (!expand(init_byte_size, _workers)) {
1796 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1797 return JNI_ENOMEM;
1798 }
1799
1800 // Perform any initialization actions delegated to the policy.
1801 g1_policy()->init(this, &_collection_set);
1802
1803 jint ecode = initialize_concurrent_refinement();
1804 if (ecode != JNI_OK) {
1805 return ecode;
1806 }
1807
1808 ecode = initialize_young_gen_sampling_thread();
1809 if (ecode != JNI_OK) {
1810 return ecode;
1811 }
1812
1813 {
1814 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1815 dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
1816 dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
1817 }
1818
1819 // Here we allocate the dummy HeapRegion that is required by the
1820 // G1AllocRegion class.
1821 HeapRegion* dummy_region = _hrm->get_dummy_region();
1822
1823 // We'll re-use the same region whether the alloc region will
1824 // require BOT updates or not and, if it doesn't, then a non-young
1825 // region will complain that it cannot support allocations without
1826 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1827 dummy_region->set_eden();
1828 // Make sure it's full.
1829 dummy_region->set_top(dummy_region->end());
1830 G1AllocRegion::setup(this, dummy_region);
1831
1832 _allocator->init_mutator_alloc_region();
1833
1834 // Do create of the monitoring and management support so that
1937 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1938 return _collector_policy;
1939 }
1940
1941 G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
1942 return _collector_policy;
1943 }
1944
1945 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1946 return &_soft_ref_policy;
1947 }
1948
1949 size_t G1CollectedHeap::capacity() const {
1950 return _hrm->length() * HeapRegion::GrainBytes;
1951 }
1952
1953 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1954 return _hrm->total_free_bytes();
1955 }
1956
1957 void G1CollectedHeap::iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i) {
1958 _hot_card_cache->drain(cl, worker_i);
1959 }
1960
1961 void G1CollectedHeap::iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i) {
1962 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1963 size_t n_completed_buffers = 0;
1964 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1965 n_completed_buffers++;
1966 }
1967 assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
1968 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1969 }
1970
1971 // Computes the sum of the storage used by the various regions.
1972 size_t G1CollectedHeap::used() const {
1973 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1974 if (_archive_allocator != NULL) {
1975 result += _archive_allocator->used();
1976 }
1977 return result;
1978 }
1979
1980 size_t G1CollectedHeap::used_unlocked() const {
1981 return _summary_bytes_used;
1982 }
2592 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2593 assert(result == NULL || ret_succeeded,
2594 "the result should be NULL if the VM did not succeed");
2595 *succeeded = ret_succeeded;
2596
2597 assert_heap_not_locked();
2598 return result;
2599 }
2600
2601 void G1CollectedHeap::do_concurrent_mark() {
2602 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2603 if (!_cm_thread->in_progress()) {
2604 _cm_thread->set_started();
2605 CGC_lock->notify();
2606 }
2607 }
2608
2609 size_t G1CollectedHeap::pending_card_num() {
2610 size_t extra_cards = 0;
2611 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2612 DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
2613 extra_cards += dcq.size();
2614 }
2615 DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2616 size_t buffer_size = dcqs.buffer_size();
2617 size_t buffer_num = dcqs.completed_buffers_num();
2618
2619 return buffer_size * buffer_num + extra_cards;
2620 }
2621
2622 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2623 // We don't nominate objects with many remembered set entries, on
2624 // the assumption that such objects are likely still live.
2625 HeapRegionRemSet* rem_set = r->rem_set();
2626
2627 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2628 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2629 G1EagerReclaimHumongousObjects && rem_set->is_empty();
2630 }
2631
2632 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2633 private:
2634 size_t _total_humongous;
2635 size_t _candidate_humongous;
2636
2637 DirtyCardQueue _dcq;
2638
2639 bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2640 assert(region->is_starts_humongous(), "Must start a humongous object");
2641
2642 oop obj = oop(region->bottom());
2643
2644 // Dead objects cannot be eager reclaim candidates. Due to class
2645 // unloading it is unsafe to query their classes so we return early.
2646 if (g1h->is_obj_dead(obj, region)) {
2647 return false;
2648 }
2649
2650 // If we do not have a complete remembered set for the region, then we can
2651 // not be sure that we have all references to it.
2652 if (!region->rem_set()->is_complete()) {
2653 return false;
2654 }
2655 // Candidate selection must satisfy the following constraints
2656 // while concurrent marking is in progress:
2657 //
3397 {
3398 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
3399 StringDedupQueue::unlink_or_oops_do(&cl);
3400 }
3401 {
3402 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
3403 StringDedupTable::unlink_or_oops_do(&cl, worker_id);
3404 }
3405 }
3406 };
3407
3408 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3409 OopClosure* keep_alive,
3410 G1GCPhaseTimes* phase_times) {
3411 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3412 workers()->run_task(&cl);
3413 }
3414
3415 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3416 private:
3417 DirtyCardQueueSet* _queue;
3418 G1CollectedHeap* _g1h;
3419 public:
3420 G1RedirtyLoggedCardsTask(DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3421 _queue(queue), _g1h(g1h) { }
3422
3423 virtual void work(uint worker_id) {
3424 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3425 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3426
3427 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3428 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3429
3430 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3431 }
3432 };
3433
3434 void G1CollectedHeap::redirty_logged_cards() {
3435 double redirty_logged_cards_start = os::elapsedTime();
3436
3437 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3438 dirty_card_queue_set().reset_for_par_iteration();
3439 workers()->run_task(&redirty_task);
3440
3441 DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3442 dcq.merge_bufferlists(&dirty_card_queue_set());
3443 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3444
3445 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3446 }
3447
3448 // Weak Reference Processing support
3449
3450 bool G1STWIsAliveClosure::do_object_b(oop p) {
3451 // An object is reachable if it is outside the collection set,
3452 // or is inside and copied.
3453 return !_g1h->is_in_cset(p) || p->is_forwarded();
3454 }
3455
3456 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3457 assert(obj != NULL, "must not be NULL");
3458 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3459 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3460 // may falsely indicate that this is not the case here: however the collection set only
3461 // contains old regions when concurrent mark is not running.
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "classfile/classLoaderDataGraph.hpp"
27 #include "classfile/metadataOnStackMark.hpp"
28 #include "classfile/stringTable.hpp"
29 #include "code/codeCache.hpp"
30 #include "code/icBuffer.hpp"
31 #include "gc/g1/g1Allocator.inline.hpp"
32 #include "gc/g1/g1BarrierSet.hpp"
33 #include "gc/g1/g1CollectedHeap.inline.hpp"
34 #include "gc/g1/g1CollectionSet.hpp"
35 #include "gc/g1/g1CollectorPolicy.hpp"
36 #include "gc/g1/g1CollectorState.hpp"
37 #include "gc/g1/g1ConcurrentRefine.hpp"
38 #include "gc/g1/g1ConcurrentRefineThread.hpp"
39 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
40 #include "gc/g1/g1DirtyCardQueue.hpp"
41 #include "gc/g1/g1EvacStats.inline.hpp"
42 #include "gc/g1/g1FullCollector.hpp"
43 #include "gc/g1/g1GCPhaseTimes.hpp"
44 #include "gc/g1/g1HeapSizingPolicy.hpp"
45 #include "gc/g1/g1HeapTransition.hpp"
46 #include "gc/g1/g1HeapVerifier.hpp"
47 #include "gc/g1/g1HotCardCache.hpp"
48 #include "gc/g1/g1MemoryPool.hpp"
49 #include "gc/g1/g1OopClosures.inline.hpp"
50 #include "gc/g1/g1ParScanThreadState.inline.hpp"
51 #include "gc/g1/g1Policy.hpp"
52 #include "gc/g1/g1RegionToSpaceMapper.hpp"
53 #include "gc/g1/g1RemSet.hpp"
54 #include "gc/g1/g1RootClosures.hpp"
55 #include "gc/g1/g1RootProcessor.hpp"
56 #include "gc/g1/g1SATBMarkQueueSet.hpp"
57 #include "gc/g1/g1StringDedup.hpp"
58 #include "gc/g1/g1ThreadLocalData.hpp"
59 #include "gc/g1/g1YCTypes.hpp"
60 #include "gc/g1/g1YoungRemSetSamplingThread.hpp"
91 #include "runtime/handles.inline.hpp"
92 #include "runtime/init.hpp"
93 #include "runtime/orderAccess.hpp"
94 #include "runtime/threadSMR.hpp"
95 #include "runtime/vmThread.hpp"
96 #include "utilities/align.hpp"
97 #include "utilities/globalDefinitions.hpp"
98 #include "utilities/stack.inline.hpp"
99
100 size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0;
101
102 // INVARIANTS/NOTES
103 //
104 // All allocation activity covered by the G1CollectedHeap interface is
105 // serialized by acquiring the HeapLock. This happens in mem_allocate
106 // and allocate_new_tlab, which are the "entry" points to the
107 // allocation code from the rest of the JVM. (Note that this does not
108 // apply to TLAB allocation, which is not part of this interface: it
109 // is done by clients of this interface.)
110
111 class RedirtyLoggedCardTableEntryClosure : public G1CardTableEntryClosure {
112 private:
113 size_t _num_dirtied;
114 G1CollectedHeap* _g1h;
115 G1CardTable* _g1_ct;
116
117 HeapRegion* region_for_card(jbyte* card_ptr) const {
118 return _g1h->heap_region_containing(_g1_ct->addr_for(card_ptr));
119 }
120
121 bool will_become_free(HeapRegion* hr) const {
122 // A region will be freed by free_collection_set if the region is in the
123 // collection set and has not had an evacuation failure.
124 return _g1h->is_in_cset(hr) && !hr->evacuation_failed();
125 }
126
127 public:
128 RedirtyLoggedCardTableEntryClosure(G1CollectedHeap* g1h) : G1CardTableEntryClosure(),
129 _num_dirtied(0), _g1h(g1h), _g1_ct(g1h->card_table()) { }
130
131 bool do_card_ptr(jbyte* card_ptr, uint worker_i) {
132 HeapRegion* hr = region_for_card(card_ptr);
133
134 // Should only dirty cards in regions that won't be freed.
135 if (!will_become_free(hr)) {
136 *card_ptr = G1CardTable::dirty_card_val();
137 _num_dirtied++;
138 }
139
140 return true;
141 }
142
143 size_t num_dirtied() const { return _num_dirtied; }
144 };
145
146
147 void G1RegionMappingChangedListener::reset_from_card_cache(uint start_idx, size_t num_regions) {
148 HeapRegionRemSet::invalidate_from_card_cache(start_idx, num_regions);
1795 // Now expand into the initial heap size.
1796 if (!expand(init_byte_size, _workers)) {
1797 vm_shutdown_during_initialization("Failed to allocate initial heap.");
1798 return JNI_ENOMEM;
1799 }
1800
1801 // Perform any initialization actions delegated to the policy.
1802 g1_policy()->init(this, &_collection_set);
1803
1804 jint ecode = initialize_concurrent_refinement();
1805 if (ecode != JNI_OK) {
1806 return ecode;
1807 }
1808
1809 ecode = initialize_young_gen_sampling_thread();
1810 if (ecode != JNI_OK) {
1811 return ecode;
1812 }
1813
1814 {
1815 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1816 dcqs.set_process_completed_buffers_threshold(concurrent_refine()->yellow_zone());
1817 dcqs.set_max_completed_buffers(concurrent_refine()->red_zone());
1818 }
1819
1820 // Here we allocate the dummy HeapRegion that is required by the
1821 // G1AllocRegion class.
1822 HeapRegion* dummy_region = _hrm->get_dummy_region();
1823
1824 // We'll re-use the same region whether the alloc region will
1825 // require BOT updates or not and, if it doesn't, then a non-young
1826 // region will complain that it cannot support allocations without
1827 // BOT updates. So we'll tag the dummy region as eden to avoid that.
1828 dummy_region->set_eden();
1829 // Make sure it's full.
1830 dummy_region->set_top(dummy_region->end());
1831 G1AllocRegion::setup(this, dummy_region);
1832
1833 _allocator->init_mutator_alloc_region();
1834
1835 // Do create of the monitoring and management support so that
1938 CollectorPolicy* G1CollectedHeap::collector_policy() const {
1939 return _collector_policy;
1940 }
1941
1942 G1CollectorPolicy* G1CollectedHeap::g1_collector_policy() const {
1943 return _collector_policy;
1944 }
1945
1946 SoftRefPolicy* G1CollectedHeap::soft_ref_policy() {
1947 return &_soft_ref_policy;
1948 }
1949
1950 size_t G1CollectedHeap::capacity() const {
1951 return _hrm->length() * HeapRegion::GrainBytes;
1952 }
1953
1954 size_t G1CollectedHeap::unused_committed_regions_in_bytes() const {
1955 return _hrm->total_free_bytes();
1956 }
1957
1958 void G1CollectedHeap::iterate_hcc_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1959 _hot_card_cache->drain(cl, worker_i);
1960 }
1961
1962 void G1CollectedHeap::iterate_dirty_card_closure(G1CardTableEntryClosure* cl, uint worker_i) {
1963 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
1964 size_t n_completed_buffers = 0;
1965 while (dcqs.apply_closure_during_gc(cl, worker_i)) {
1966 n_completed_buffers++;
1967 }
1968 assert(dcqs.completed_buffers_num() == 0, "Completed buffers exist!");
1969 g1_policy()->phase_times()->record_thread_work_item(G1GCPhaseTimes::UpdateRS, worker_i, n_completed_buffers, G1GCPhaseTimes::UpdateRSProcessedBuffers);
1970 }
1971
1972 // Computes the sum of the storage used by the various regions.
1973 size_t G1CollectedHeap::used() const {
1974 size_t result = _summary_bytes_used + _allocator->used_in_alloc_regions();
1975 if (_archive_allocator != NULL) {
1976 result += _archive_allocator->used();
1977 }
1978 return result;
1979 }
1980
1981 size_t G1CollectedHeap::used_unlocked() const {
1982 return _summary_bytes_used;
1983 }
2593 bool ret_succeeded = op.prologue_succeeded() && op.pause_succeeded();
2594 assert(result == NULL || ret_succeeded,
2595 "the result should be NULL if the VM did not succeed");
2596 *succeeded = ret_succeeded;
2597
2598 assert_heap_not_locked();
2599 return result;
2600 }
2601
2602 void G1CollectedHeap::do_concurrent_mark() {
2603 MutexLockerEx x(CGC_lock, Mutex::_no_safepoint_check_flag);
2604 if (!_cm_thread->in_progress()) {
2605 _cm_thread->set_started();
2606 CGC_lock->notify();
2607 }
2608 }
2609
2610 size_t G1CollectedHeap::pending_card_num() {
2611 size_t extra_cards = 0;
2612 for (JavaThreadIteratorWithHandle jtiwh; JavaThread *curr = jtiwh.next(); ) {
2613 G1DirtyCardQueue& dcq = G1ThreadLocalData::dirty_card_queue(curr);
2614 extra_cards += dcq.size();
2615 }
2616 G1DirtyCardQueueSet& dcqs = G1BarrierSet::dirty_card_queue_set();
2617 size_t buffer_size = dcqs.buffer_size();
2618 size_t buffer_num = dcqs.completed_buffers_num();
2619
2620 return buffer_size * buffer_num + extra_cards;
2621 }
2622
2623 bool G1CollectedHeap::is_potential_eager_reclaim_candidate(HeapRegion* r) const {
2624 // We don't nominate objects with many remembered set entries, on
2625 // the assumption that such objects are likely still live.
2626 HeapRegionRemSet* rem_set = r->rem_set();
2627
2628 return G1EagerReclaimHumongousObjectsWithStaleRefs ?
2629 rem_set->occupancy_less_or_equal_than(G1RSetSparseRegionEntries) :
2630 G1EagerReclaimHumongousObjects && rem_set->is_empty();
2631 }
2632
2633 class RegisterHumongousWithInCSetFastTestClosure : public HeapRegionClosure {
2634 private:
2635 size_t _total_humongous;
2636 size_t _candidate_humongous;
2637
2638 G1DirtyCardQueue _dcq;
2639
2640 bool humongous_region_is_candidate(G1CollectedHeap* g1h, HeapRegion* region) const {
2641 assert(region->is_starts_humongous(), "Must start a humongous object");
2642
2643 oop obj = oop(region->bottom());
2644
2645 // Dead objects cannot be eager reclaim candidates. Due to class
2646 // unloading it is unsafe to query their classes so we return early.
2647 if (g1h->is_obj_dead(obj, region)) {
2648 return false;
2649 }
2650
2651 // If we do not have a complete remembered set for the region, then we can
2652 // not be sure that we have all references to it.
2653 if (!region->rem_set()->is_complete()) {
2654 return false;
2655 }
2656 // Candidate selection must satisfy the following constraints
2657 // while concurrent marking is in progress:
2658 //
3398 {
3399 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupQueueFixup, worker_id);
3400 StringDedupQueue::unlink_or_oops_do(&cl);
3401 }
3402 {
3403 G1GCParPhaseTimesTracker x(_phase_times, G1GCPhaseTimes::StringDedupTableFixup, worker_id);
3404 StringDedupTable::unlink_or_oops_do(&cl, worker_id);
3405 }
3406 }
3407 };
3408
3409 void G1CollectedHeap::string_dedup_cleaning(BoolObjectClosure* is_alive,
3410 OopClosure* keep_alive,
3411 G1GCPhaseTimes* phase_times) {
3412 G1StringDedupCleaningTask cl(is_alive, keep_alive, phase_times);
3413 workers()->run_task(&cl);
3414 }
3415
3416 class G1RedirtyLoggedCardsTask : public AbstractGangTask {
3417 private:
3418 G1DirtyCardQueueSet* _queue;
3419 G1CollectedHeap* _g1h;
3420 public:
3421 G1RedirtyLoggedCardsTask(G1DirtyCardQueueSet* queue, G1CollectedHeap* g1h) : AbstractGangTask("Redirty Cards"),
3422 _queue(queue), _g1h(g1h) { }
3423
3424 virtual void work(uint worker_id) {
3425 G1GCPhaseTimes* phase_times = _g1h->g1_policy()->phase_times();
3426 G1GCParPhaseTimesTracker x(phase_times, G1GCPhaseTimes::RedirtyCards, worker_id);
3427
3428 RedirtyLoggedCardTableEntryClosure cl(_g1h);
3429 _queue->par_apply_closure_to_all_completed_buffers(&cl);
3430
3431 phase_times->record_thread_work_item(G1GCPhaseTimes::RedirtyCards, worker_id, cl.num_dirtied());
3432 }
3433 };
3434
3435 void G1CollectedHeap::redirty_logged_cards() {
3436 double redirty_logged_cards_start = os::elapsedTime();
3437
3438 G1RedirtyLoggedCardsTask redirty_task(&dirty_card_queue_set(), this);
3439 dirty_card_queue_set().reset_for_par_iteration();
3440 workers()->run_task(&redirty_task);
3441
3442 G1DirtyCardQueueSet& dcq = G1BarrierSet::dirty_card_queue_set();
3443 dcq.merge_bufferlists(&dirty_card_queue_set());
3444 assert(dirty_card_queue_set().completed_buffers_num() == 0, "All should be consumed");
3445
3446 g1_policy()->phase_times()->record_redirty_logged_cards_time_ms((os::elapsedTime() - redirty_logged_cards_start) * 1000.0);
3447 }
3448
3449 // Weak Reference Processing support
3450
3451 bool G1STWIsAliveClosure::do_object_b(oop p) {
3452 // An object is reachable if it is outside the collection set,
3453 // or is inside and copied.
3454 return !_g1h->is_in_cset(p) || p->is_forwarded();
3455 }
3456
3457 bool G1STWSubjectToDiscoveryClosure::do_object_b(oop obj) {
3458 assert(obj != NULL, "must not be NULL");
3459 assert(_g1h->is_in_reserved(obj), "Trying to discover obj " PTR_FORMAT " not in heap", p2i(obj));
3460 // The areas the CM and STW ref processor manage must be disjoint. The is_in_cset() below
3461 // may falsely indicate that this is not the case here: however the collection set only
3462 // contains old regions when concurrent mark is not running.
|