20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/g1RemSet.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/shared/ageTable.hpp"
36 #include "memory/allocation.hpp"
37 #include "oops/oop.hpp"
38 #include "utilities/ticks.hpp"
39
40 class G1PLABAllocator;
41 class G1EvacuationRootClosures;
42 class HeapRegion;
43 class outputStream;
44
45 class G1ParScanThreadState : public CHeapObj<mtGC> {
46 G1CollectedHeap* _g1h;
47 RefToScanQueue* _refs;
48 DirtyCardQueue _dcq;
49 G1CardTable* _ct;
50 G1EvacuationRootClosures* _closures;
51
52 G1PLABAllocator* _plab_allocator;
53
54 AgeTable _age_table;
55 InCSetState _dest[InCSetState::Num];
56 // Local tenuring threshold.
57 uint _tenuring_threshold;
58 G1ScanEvacuatedObjClosure _scanner;
59
70 // this points into the array, as we use the first few entries for padding
71 size_t* _surviving_young_words;
72
73 // Indicates whether in the last generation (old) there is no more space
74 // available for allocation.
75 bool _old_gen_is_full;
76
77 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
78
79 DirtyCardQueue& dirty_card_queue() { return _dcq; }
80 G1CardTable* ct() { return _ct; }
81
82 InCSetState dest(InCSetState original) const {
83 assert(original.is_valid(),
84 "Original state invalid: " CSETSTATE_FORMAT, original.value());
85 assert(_dest[original.value()].is_valid_gen(),
86 "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
87 return _dest[original.value()];
88 }
89
90 public:
91 G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length);
92 virtual ~G1ParScanThreadState();
93
94 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
95
96 #ifdef ASSERT
97 bool queue_is_empty() const { return _refs->is_empty(); }
98
99 bool verify_ref(narrowOop* ref) const;
100 bool verify_ref(oop* ref) const;
101 bool verify_task(StarTask ref) const;
102 #endif // ASSERT
103
104 template <class T> void do_oop_ext(T* ref);
105 template <class T> void push_on_queue(T* ref);
106
107 template <class T> void enqueue_card_if_tracked(T* p, oop o) {
108 assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
109 assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
110 if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
111 return;
189 oop const old, size_t word_sz, uint age,
190 HeapWord * const obj_ptr) const;
191
192 inline bool needs_partial_trimming() const;
193 inline bool is_partially_trimmed() const;
194
195 inline void trim_queue_to_threshold(uint threshold);
196 public:
197 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
198
199 void trim_queue();
200 void trim_queue_partially();
201
202 Tickspan trim_ticks() const;
203 void reset_trim_ticks();
204
205 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
206
207 // An attempt to evacuate "obj" has failed; take necessary steps.
208 oop handle_evacuation_failure_par(oop obj, markOop m);
209 };
210
211 class G1ParScanThreadStateSet : public StackObj {
212 G1CollectedHeap* _g1h;
213 G1ParScanThreadState** _states;
214 size_t* _surviving_young_words_total;
215 size_t _young_cset_length;
216 uint _n_workers;
217 bool _flushed;
218
219 public:
220 G1ParScanThreadStateSet(G1CollectedHeap* g1h, uint n_workers, size_t young_cset_length);
221 ~G1ParScanThreadStateSet();
222
223 void flush();
224
225 G1ParScanThreadState* state_for_worker(uint worker_id);
226
227 const size_t* surviving_young_words() const;
228
229 private:
230 G1ParScanThreadState* new_par_scan_state(uint worker_id, size_t young_cset_length);
231 };
232
233 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
|
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
27
28 #include "gc/g1/dirtyCardQueue.hpp"
29 #include "gc/g1/g1CardTable.hpp"
30 #include "gc/g1/g1CollectedHeap.hpp"
31 #include "gc/g1/g1OopClosures.hpp"
32 #include "gc/g1/g1Policy.hpp"
33 #include "gc/g1/g1RemSet.hpp"
34 #include "gc/g1/heapRegionRemSet.hpp"
35 #include "gc/shared/ageTable.hpp"
36 #include "memory/allocation.hpp"
37 #include "oops/oop.hpp"
38 #include "utilities/ticks.hpp"
39
40 class G1OopStarChunkedList;
41 class G1PLABAllocator;
42 class G1EvacuationRootClosures;
43 class HeapRegion;
44 class outputStream;
45
46 class G1ParScanThreadState : public CHeapObj<mtGC> {
47 G1CollectedHeap* _g1h;
48 RefToScanQueue* _refs;
49 DirtyCardQueue _dcq;
50 G1CardTable* _ct;
51 G1EvacuationRootClosures* _closures;
52
53 G1PLABAllocator* _plab_allocator;
54
55 AgeTable _age_table;
56 InCSetState _dest[InCSetState::Num];
57 // Local tenuring threshold.
58 uint _tenuring_threshold;
59 G1ScanEvacuatedObjClosure _scanner;
60
71 // this points into the array, as we use the first few entries for padding
72 size_t* _surviving_young_words;
73
74 // Indicates whether in the last generation (old) there is no more space
75 // available for allocation.
76 bool _old_gen_is_full;
77
78 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
79
80 DirtyCardQueue& dirty_card_queue() { return _dcq; }
81 G1CardTable* ct() { return _ct; }
82
83 InCSetState dest(InCSetState original) const {
84 assert(original.is_valid(),
85 "Original state invalid: " CSETSTATE_FORMAT, original.value());
86 assert(_dest[original.value()].is_valid_gen(),
87 "Dest state is invalid: " CSETSTATE_FORMAT, _dest[original.value()].value());
88 return _dest[original.value()];
89 }
90
91 size_t _num_optional_regions;
92 G1OopStarChunkedList* _oops_into_optional_regions;
93
94 public:
95 G1ParScanThreadState(G1CollectedHeap* g1h,
96 uint worker_id,
97 size_t young_cset_length,
98 size_t optional_cset_length);
99 virtual ~G1ParScanThreadState();
100
101 void set_ref_discoverer(ReferenceDiscoverer* rd) { _scanner.set_ref_discoverer(rd); }
102
103 #ifdef ASSERT
104 bool queue_is_empty() const { return _refs->is_empty(); }
105
106 bool verify_ref(narrowOop* ref) const;
107 bool verify_ref(oop* ref) const;
108 bool verify_task(StarTask ref) const;
109 #endif // ASSERT
110
111 template <class T> void do_oop_ext(T* ref);
112 template <class T> void push_on_queue(T* ref);
113
114 template <class T> void enqueue_card_if_tracked(T* p, oop o) {
115 assert(!HeapRegion::is_in_same_region(p, o), "Should have filtered out cross-region references already.");
116 assert(!_g1h->heap_region_containing(p)->is_young(), "Should have filtered out from-young references already.");
117 if (!_g1h->heap_region_containing((HeapWord*)o)->rem_set()->is_tracked()) {
118 return;
196 oop const old, size_t word_sz, uint age,
197 HeapWord * const obj_ptr) const;
198
199 inline bool needs_partial_trimming() const;
200 inline bool is_partially_trimmed() const;
201
202 inline void trim_queue_to_threshold(uint threshold);
203 public:
204 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
205
206 void trim_queue();
207 void trim_queue_partially();
208
209 Tickspan trim_ticks() const;
210 void reset_trim_ticks();
211
212 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
213
214 // An attempt to evacuate "obj" has failed; take necessary steps.
215 oop handle_evacuation_failure_par(oop obj, markOop m);
216
217 template <typename T>
218 inline void remember_root_into_optional_region(T* p);
219 template <typename T>
220 inline void remember_reference_into_optional_region(T* p);
221
222 inline G1OopStarChunkedList* oops_into_optional_region(const HeapRegion* hr);
223 };
224
225 class G1ParScanThreadStateSet : public StackObj {
226 G1CollectedHeap* _g1h;
227 G1ParScanThreadState** _states;
228 size_t* _surviving_young_words_total;
229 size_t _young_cset_length;
230 size_t _optional_cset_length;
231 uint _n_workers;
232 bool _flushed;
233
234 public:
235 G1ParScanThreadStateSet(G1CollectedHeap* g1h,
236 uint n_workers,
237 size_t young_cset_length,
238 size_t optional_cset_length);
239 ~G1ParScanThreadStateSet();
240
241 void flush();
242 void record_unused_optional_region(HeapRegion* hr);
243
244 G1ParScanThreadState* state_for_worker(uint worker_id);
245
246 const size_t* surviving_young_words() const;
247
248 private:
249 G1ParScanThreadState* new_par_scan_state(uint worker_id, size_t young_cset_length);
250 };
251
252 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_HPP
|