32 #include "gc_implementation/g1/g1OopClosures.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/shared/ageTable.hpp"
35 #include "memory/allocation.hpp"
36 #include "oops/oop.hpp"
37
38 class HeapRegion;
39 class outputStream;
40
41 class G1ParScanThreadState : public StackObj {
42 private:
43 G1CollectedHeap* _g1h;
44 RefToScanQueue* _refs;
45 DirtyCardQueue _dcq;
46 G1SATBCardTableModRefBS* _ct_bs;
47 G1RemSet* _g1_rem;
48
49 G1ParGCAllocator* _g1_par_allocator;
50
51 ageTable _age_table;
52
53 G1ParScanClosure _scanner;
54
55 size_t _alloc_buffer_waste;
56 size_t _undo_waste;
57
58 OopsInHeapRegionClosure* _evac_failure_cl;
59
60 int _hash_seed;
61 uint _queue_num;
62
63 size_t _term_attempts;
64
65 double _start;
66 double _start_strong_roots;
67 double _strong_roots_time;
68 double _start_term;
69 double _term_time;
70
71 // Map from young-age-index (0 == not young, 1 is youngest) to
72 // surviving words. base is what we get back from the malloc call
73 size_t* _surviving_young_words_base;
74 // this points into the array, as we use the first few entries for padding
75 size_t* _surviving_young_words;
76
77 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
78
79 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
80 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
81
82 DirtyCardQueue& dirty_card_queue() { return _dcq; }
83 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
84
85 public:
86 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
87 ~G1ParScanThreadState();
88
89 ageTable* age_table() { return &_age_table; }
90
91 #ifdef ASSERT
92 bool queue_is_empty() const { return _refs->is_empty(); }
93
94 bool verify_ref(narrowOop* ref) const;
95 bool verify_ref(oop* ref) const;
96 bool verify_task(StarTask ref) const;
97 #endif // ASSERT
98
99 template <class T> void push_on_queue(T* ref) {
100 assert(verify_ref(ref), "sanity");
101 _refs->push(ref);
102 }
103
104 template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
105 // If the new value of the field points to the same region or
106 // is the to-space, we don't need to include it in the Rset updates.
107 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
108 size_t card_index = ctbs()->index_for(p);
109 // If the card hasn't been added to the buffer, do it.
110 if (ctbs()->mark_card_deferred(card_index)) {
111 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
112 }
113 }
114 }
115 public:
116
117 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
118 _evac_failure_cl = evac_failure_cl;
119 }
120
121 OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
122
123 int* hash_seed() { return &_hash_seed; }
124 uint queue_num() { return _queue_num; }
125
126 size_t term_attempts() const { return _term_attempts; }
127 void note_term_attempt() { _term_attempts++; }
128
129 void start_strong_roots() {
130 _start_strong_roots = os::elapsedTime();
131 }
132 void end_strong_roots() {
133 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
134 }
135 double strong_roots_time() const { return _strong_roots_time; }
176 // specialization for has_partial_array_mask() for narrowOops above.
177 // This means that unintentional use of this method with narrowOops are caught
178 // by the compiler.
179 inline oop* set_partial_array_mask(oop obj) const {
180 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
181 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
182 }
183
184 inline oop clear_partial_array_mask(oop* ref) const {
185 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
186 }
187
188 inline void do_oop_partial_array(oop* p);
189
190 // This method is applied to the fields of the objects that have just been copied.
191 template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
192
193 template <class T> inline void deal_with_reference(T* ref_to_scan);
194
195 inline void dispatch_reference(StarTask ref);
196 public:
197
198 oop copy_to_survivor_space(oop const obj, markOop const old_mark);
199
200 void trim_queue();
201
202 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
203 };
204
205 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|
32 #include "gc_implementation/g1/g1OopClosures.hpp"
33 #include "gc_implementation/g1/g1RemSet.hpp"
34 #include "gc_implementation/shared/ageTable.hpp"
35 #include "memory/allocation.hpp"
36 #include "oops/oop.hpp"
37
38 class HeapRegion;
39 class outputStream;
40
41 class G1ParScanThreadState : public StackObj {
42 private:
43 G1CollectedHeap* _g1h;
44 RefToScanQueue* _refs;
45 DirtyCardQueue _dcq;
46 G1SATBCardTableModRefBS* _ct_bs;
47 G1RemSet* _g1_rem;
48
49 G1ParGCAllocator* _g1_par_allocator;
50
51 ageTable _age_table;
52 InCSetState _dest[InCSetState::Num];
53 // Local tenuring threshold.
54 uint _tenuring_threshold;
55 G1ParScanClosure _scanner;
56
57 size_t _alloc_buffer_waste;
58 size_t _undo_waste;
59
60 OopsInHeapRegionClosure* _evac_failure_cl;
61
62 int _hash_seed;
63 uint _queue_num;
64
65 size_t _term_attempts;
66
67 double _start;
68 double _start_strong_roots;
69 double _strong_roots_time;
70 double _start_term;
71 double _term_time;
72
73 // Map from young-age-index (0 == not young, 1 is youngest) to
74 // surviving words. base is what we get back from the malloc call
75 size_t* _surviving_young_words_base;
76 // this points into the array, as we use the first few entries for padding
77 size_t* _surviving_young_words;
78
79 #define PADDING_ELEM_NUM (DEFAULT_CACHE_LINE_SIZE / sizeof(size_t))
80
81 void add_to_alloc_buffer_waste(size_t waste) { _alloc_buffer_waste += waste; }
82 void add_to_undo_waste(size_t waste) { _undo_waste += waste; }
83
84 DirtyCardQueue& dirty_card_queue() { return _dcq; }
85 G1SATBCardTableModRefBS* ctbs() { return _ct_bs; }
86
87 InCSetState dest(InCSetState original) const {
88 assert(original.is_valid(),
89 err_msg("Original state invalid: %d", original.value()));
90 assert(_dest[original.value()].is_valid_gen(),
91 err_msg("Dest state is invalid: %d", _dest[original.value()].value()));
92 return _dest[original.value()];
93 }
94
95 public:
96 G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp);
97 ~G1ParScanThreadState();
98
99 ageTable* age_table() { return &_age_table; }
100
101 #ifdef ASSERT
102 bool queue_is_empty() const { return _refs->is_empty(); }
103
104 bool verify_ref(narrowOop* ref) const;
105 bool verify_ref(oop* ref) const;
106 bool verify_task(StarTask ref) const;
107 #endif // ASSERT
108
109 template <class T> void push_on_queue(T* ref) {
110 assert(verify_ref(ref), "sanity");
111 _refs->push(ref);
112 }
113
114 template <class T> void update_rs(HeapRegion* from, T* p, uint tid) {
115 // If the new value of the field points to the same region or
116 // is the to-space, we don't need to include it in the Rset updates.
117 if (!from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && !from->is_survivor()) {
118 size_t card_index = ctbs()->index_for(p);
119 // If the card hasn't been added to the buffer, do it.
120 if (ctbs()->mark_card_deferred(card_index)) {
121 dirty_card_queue().enqueue((jbyte*)ctbs()->byte_for_index(card_index));
122 }
123 }
124 }
125
126 void set_evac_failure_closure(OopsInHeapRegionClosure* evac_failure_cl) {
127 _evac_failure_cl = evac_failure_cl;
128 }
129
130 OopsInHeapRegionClosure* evac_failure_closure() { return _evac_failure_cl; }
131
132 int* hash_seed() { return &_hash_seed; }
133 uint queue_num() { return _queue_num; }
134
135 size_t term_attempts() const { return _term_attempts; }
136 void note_term_attempt() { _term_attempts++; }
137
138 void start_strong_roots() {
139 _start_strong_roots = os::elapsedTime();
140 }
141 void end_strong_roots() {
142 _strong_roots_time += (os::elapsedTime() - _start_strong_roots);
143 }
144 double strong_roots_time() const { return _strong_roots_time; }
185 // specialization for has_partial_array_mask() for narrowOops above.
186 // This means that unintentional use of this method with narrowOops are caught
187 // by the compiler.
188 inline oop* set_partial_array_mask(oop obj) const {
189 assert(((uintptr_t)(void *)obj & G1_PARTIAL_ARRAY_MASK) == 0, "Information loss!");
190 return (oop*) ((uintptr_t)(void *)obj | G1_PARTIAL_ARRAY_MASK);
191 }
192
193 inline oop clear_partial_array_mask(oop* ref) const {
194 return cast_to_oop((intptr_t)ref & ~G1_PARTIAL_ARRAY_MASK);
195 }
196
197 inline void do_oop_partial_array(oop* p);
198
199 // This method is applied to the fields of the objects that have just been copied.
200 template <class T> inline void do_oop_evac(T* p, HeapRegion* from);
201
202 template <class T> inline void deal_with_reference(T* ref_to_scan);
203
204 inline void dispatch_reference(StarTask ref);
205
206 // Tries to allocate word_sz in the PLAB of the next "generation" after trying to
207 // allocate into dest. State is the original (source) cset state for the object
208 // that is allocated for.
209 // Returns a non-NULL pointer if successful, and updates dest if required.
210 HeapWord* allocate_in_next_plab(InCSetState const state,
211 InCSetState* dest,
212 size_t word_sz,
213 AllocationContext_t const context);
214
215 inline InCSetState next_state(InCSetState const state, markOop const m, uint& age);
216 public:
217
218 oop copy_to_survivor_space(InCSetState const state, oop const obj, markOop const old_mark);
219
220 void trim_queue();
221
222 inline void steal_and_trim_queue(RefToScanQueueSet *task_queues);
223 };
224
225 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_HPP
|