18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/g1RemSet.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/shared/ageTable.hpp"
36 #include "memory/allocation.hpp"
37 #include "oops/oop.hpp"
38
39 class G1PLABAllocator;
40 class G1EvacuationRootClosures;
41 class HeapRegion;
42 class outputStream;
43
44 class G1ParScanThreadState : public CHeapObj<mtGC> {
45 private:
46 G1CollectedHeap* _g1h;
47 RefToScanQueue* _refs;
48 DirtyCardQueue _dcq;
49 G1CardTable* _ct;
50 G1EvacuationRootClosures* _closures;
51
52 G1PLABAllocator* _plab_allocator;
53
54 AgeTable _age_table;
55 InCSetState _dest[InCSetState::Num];
56 // Local tenuring threshold.
57 uint _tenuring_threshold;
58 G1ScanEvacuatedObjClosure _scanner;
59
60 int _hash_seed;
61 uint _worker_id;
62
63 // Map from young-age-index (0 == not young, 1 is youngest) to
64 // surviving words. base is what we get back from the malloc call
65 size_t* _surviving_young_words_base;
66 // this points into the array, as we use the first few entries for padding
67 size_t* _surviving_young_words;
68
69 // Indicates whether in the last generation (old) there is no more space
70 // available for allocation.
71 bool _old_gen_is_full;
72
73 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
74
75 DirtyCardQueue& dirty_card_queue() { return _dcq; }
76 G1CardTable* ct() { return _ct; }
77
78 InCSetState dest(InCSetState original) const {
79 assert(original.is_valid(),
80 "Original state invalid: " CSETSTATE_FORMAT, original.value());
81 assert(_dest[original.value()].is_valid_gen(),
82 "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
83 return _dest[original.value()];
84 }
85
86 public:
87 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
88 virtual ~G1ParScanThreadState();
89
90 void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
91
92 #ifdef ASSERT
93 bool queue_is_empty() const { return _refs->is_empty(); }
94
95 bool verify_ref(narrowOop* ref) const;
96 bool verify_ref(oop* ref) const;
97 bool verify_task(StarTask ref) const;
98 #endif // ASSERT
99
100 template <class T> void do_oop_ext(T* ref);
101 template <class T> void push_on_queue(T* ref);
102
103 template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
104 assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
105 // If the field originates from the to-space, we don't need to include it
106 // in the remembered set updates. Also, if we are not tracking the remembered
112 dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
113 }
114 }
115 }
116
117 G1EvacuationRootClosures* closures() { return _closures; }
118 uint worker_id() { return _worker_id; }
119
120 // Returns the current amount of waste due to alignment or not being able to fit
121 // objects within LABs and the undo waste.
122 virtual void waste(size_t& wasted, size_t& undo_wasted);
123
124 size_t* surviving_young_words() {
125 // We add one to hide entry 0 which accumulates surviving words for
126 // age -1 regions (i.e. non-young ones)
127 return _surviving_young_words + 1;
128 }
129
130 void flush(size_t* surviving_young_words);
131
132 private:
133 #define G1_PARTIAL_ARRAY_MASK 0x2
134
135 inline bool has_partial_array_mask(oop* ref) const {
136 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
137 }
138
139 // We never encode partial array oops as narrowOop*, so return false immediately.
140 // This allows the compiler to create optimized code when popping references from
141 // the work queue.
142 inline bool has_partial_array_mask(narrowOop* ref) const {
143 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
144 return false;
145 }
146
147 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
148 // We always encode partial arrays as regular oop, to allow the
149 // specialization for has_partial_array_mask() for narrowOops above.
150 // This means that unintentional use of this method with narrowOops are caught
151 // by the compiler.
152 inline oop* set_partial_array_mask(oop obj) const {
168
169 inline void dispatch_reference(StarTask ref);
170
171 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
172 // allocate into dest. State is the original (source) cset state for the object
173 // that is allocated for. Previous_plab_refill_failed indicates whether previously
174 // a PLAB refill into "state" failed.
175 // Returns a non-NULL pointer if successful, and updates dest if required.
176 // Also determines whether we should continue to try to allocate into the various
177 // generations or just end trying to allocate.
178 HeapWord* allocate_in_next_plab(InCSetState const state,
179 InCSetState* dest,
180 size_t word_sz,
181 bool previous_plab_refill_failed);
182
183 inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
184
185 void report_promotion_event(InCSetState const dest_state,
186 oop const old, size_t word_sz, uint age,
187 HeapWord * const obj_ptr) const;
188 public:
189
190 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
191
192 void trim_queue();
193
194 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
195
196 // An attempt to evacuate "obj" has failed; take necessary steps.
197 oop handle_evacuation_failure_par(oop obj, markOop m);
198 };
199
200 class G1ParScanThreadStateSet : public StackObj {
201 G1CollectedHeap* _g1h;
202 G1ParScanThreadState** _states;
203 size_t* _surviving_young_words_total;
204 size_t _young_cset_length;
205 uint _n_workers;
206 bool _flushed;
207
208 public:
209 G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
210 ~G1ParScanThreadStateSet();
211
212 void flush();
|
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/g1RemSet.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/shared/ageTable.hpp"
36 #include "memory/allocation.hpp"
37 #include "oops/oop.hpp"
38 #include "utilities/ticks.hpp"
39
40 class G1PLABAllocator;
41 class G1EvacuationRootClosures;
42 class HeapRegion;
43 class outputStream;
44
45 class G1ParScanThreadState : public CHeapObj<mtGC> {
46 G1CollectedHeap* _g1h;
47 RefToScanQueue* _refs;
48 DirtyCardQueue _dcq;
49 G1CardTable* _ct;
50 G1EvacuationRootClosures* _closures;
51
52 G1PLABAllocator* _plab_allocator;
53
54 AgeTable _age_table;
55 InCSetState _dest[InCSetState::Num];
56 // Local tenuring threshold.
57 uint _tenuring_threshold;
58 G1ScanEvacuatedObjClosure _scanner;
59
60 int _hash_seed;
61 uint _worker_id;
62
63 // Upper and lower threshold to start and end work queue draining.
64 uint const _stack_drain_upper_threshold;
65 uint const _stack_drain_lower_threshold;
66
67 Tickspan _trim_ticks;
68 // Map from young-age-index (0 == not young, 1 is youngest) to
69 // surviving words. base is what we get back from the malloc call
70 size_t* _surviving_young_words_base;
71 // this points into the array, as we use the first few entries for padding
72 size_t* _surviving_young_words;
73
74 // Indicates whether in the last generation (old) there is no more space
75 // available for allocation.
76 bool _old_gen_is_full;
77
78 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
79
80 DirtyCardQueue& dirty_card_queue() { return _dcq; }
81 G1CardTable* ct() { return _ct; }
82
83 InCSetState dest(InCSetState original) const {
84 assert(original.is_valid(),
85 "Original state invalid: " CSETSTATE_FORMAT, original.value());
86 assert(_dest[original.value()].is_valid_gen(),
87 "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
88 return _dest[original.value()];
89 }
90
91 public:
92 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
93 virtual ~G1ParScanThreadState();
94
95 void set_ref_processor(ReferenceProcessor* rp) { _scanner.set_ref_processor(rp); }
96
97 #ifdef ASSERT
98 bool queue_is_empty() const { return _refs->is_empty(); }
99
100 bool verify_ref(narrowOop* ref) const;
101 bool verify_ref(oop* ref) const;
102 bool verify_task(StarTask ref) const;
103 #endif // ASSERT
104
105 template <class T> void do_oop_ext(T* ref);
106 template <class T> void push_on_queue(T* ref);
107
108 template <class T> void update_rs(HeapRegion* from, T* p, oop o) {
109 assert(!HeapRegion::is_in_same_region(p, o), "Caller should have filtered out cross-region references already.");
110 // If the field originates from the to-space, we don't need to include it
111 // in the remembered set updates. Also, if we are not tracking the remembered
117 dirty_card_queue().enqueue((jbyte*)ct()->byte_for_index(card_index));
118 }
119 }
120 }
121
122 G1EvacuationRootClosures* closures() { return _closures; }
123 uint worker_id() { return _worker_id; }
124
125 // Returns the current amount of waste due to alignment or not being able to fit
126 // objects within LABs and the undo waste.
127 virtual void waste(size_t& wasted, size_t& undo_wasted);
128
129 size_t* surviving_young_words() {
130 // We add one to hide entry 0 which accumulates surviving words for
131 // age -1 regions (i.e. non-young ones)
132 return _surviving_young_words + 1;
133 }
134
135 void flush(size_t* surviving_young_words);
136
137 private:
138 #define G1_PARTIAL_ARRAY_MASK 0x2
139
140 inline bool has_partial_array_mask(oop* ref) const {
141 return ((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) == G1_PARTIAL_ARRAY_MASK;
142 }
143
144 // We never encode partial array oops as narrowOop*, so return false immediately.
145 // This allows the compiler to create optimized code when popping references from
146 // the work queue.
147 inline bool has_partial_array_mask(narrowOop* ref) const {
148 assert(((uintptr_t)ref & G1_PARTIAL_ARRAY_MASK) != G1_PARTIAL_ARRAY_MASK, "Partial array oop reference encoded as narrowOop*");
149 return false;
150 }
151
152 // Only implement set_partial_array_mask() for regular oops, not for narrowOops.
153 // We always encode partial arrays as regular oop, to allow the
154 // specialization for has_partial_array_mask() for narrowOops above.
155 // This means that unintentional use of this method with narrowOops are caught
156 // by the compiler.
157 inline oop* set_partial_array_mask(oop obj) const {
173
174 inline void dispatch_reference(StarTask ref);
175
176 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
177 // allocate into dest. State is the original (source) cset state for the object
178 // that is allocated for. Previous_plab_refill_failed indicates whether previously
179 // a PLAB refill into "state" failed.
180 // Returns a non-NULL pointer if successful, and updates dest if required.
181 // Also determines whether we should continue to try to allocate into the various
182 // generations or just end trying to allocate.
183 HeapWord* allocate_in_next_plab(InCSetState const state,
184 InCSetState* dest,
185 size_t word_sz,
186 bool previous_plab_refill_failed);
187
188 inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
189
190 void report_promotion_event(InCSetState const dest_state,
191 oop const old, size_t word_sz, uint age,
192 HeapWord * const obj_ptr) const;
193
194 inline bool should_start_trim_queue_partially() const;
195 inline bool should_end_trim_queue_partially() const;
196
197 inline void trim_queue_partially_internal();
198 public:
199 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
200
201 void trim_queue();
202 void trim_queue_partially();
203
204 Tickspan trim_ticks_and_reset();
205
206 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
207
208 // An attempt to evacuate "obj" has failed; take necessary steps.
209 oop handle_evacuation_failure_par(oop obj, markOop m);
210 };
211
212 class G1ParScanThreadStateSet : public StackObj {
213 G1CollectedHeap* _g1h;
214 G1ParScanThreadState** _states;
215 size_t* _surviving_young_words_total;
216 size_t _young_cset_length;
217 uint _n_workers;
218 bool _flushed;
219
220 public:
221 G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
222 ~G1ParScanThreadStateSet();
223
224 void flush();
|