63 // Remember the last enqueued card to avoid enqueuing the same card over and over;
64 // since we only ever scan a card once, this is sufficient.
65 size_t _last_enqueued_card;
66
67 // Upper and lower threshold to start and end work queue draining.
68 uint const _stack_trim_upper_threshold;
69 uint const _stack_trim_lower_threshold;
70
71 Tickspan _trim_ticks;
72 // Map from young-age-index (0 == not young, 1 is youngest) to
73 // surviving words. base is what we get back from the malloc call
74 size_t* _surviving_young_words_base;
75 // this points into the array, as we use the first few entries for padding
76 size_t* _surviving_young_words;
77 // Number of elements in the array above.
78 size_t _surviving_words_length;
79 // Indicates whether in the last generation (old) there is no more space
80 // available for allocation.
81 bool _old_gen_is_full;
82
83 G1RedirtyCardsQueue& redirty_cards_queue() { return _rdcq; }
84 G1CardTable* ct() { return _ct; }
85
86 G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
87 assert(original.is_valid(),
88 "Original region attr invalid: %s", original.get_type_str());
89 assert(_dest[original.type()].is_valid_gen(),
90 "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
91 return _dest[original.type()];
92 }
93
94 size_t _num_optional_regions;
95 G1OopStarChunkedList* _oops_into_optional_regions;
96
97 G1NUMA* _numa;
98
99 // Records how many object allocations happened at each node during copy to survivor.
100 // Only starts recording when log of gc+heap+numa is enabled and its data is
101 // transferred when flushed.
102 size_t* _obj_alloc_stat;
140 size_t card_index = ct()->index_for(p);
141 // If the card hasn't been added to the buffer, do it.
142 if (_last_enqueued_card != card_index) {
143 redirty_cards_queue().enqueue(ct()->byte_for_index(card_index));
144 _last_enqueued_card = card_index;
145 }
146 }
147
148 G1EvacuationRootClosures* closures() { return _closures; }
149 uint worker_id() { return _worker_id; }
150
151 size_t lab_waste_words() const;
152 size_t lab_undo_waste_words() const;
153
154 // Pass locally gathered statistics to global state. Returns the total number of
155 // HeapWords copied.
156 size_t flush(size_t* surviving_young_words);
157
158 private:
159 inline void do_partial_array(PartialArrayScanTask task);
160
161 HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
162 oop old,
163 size_t word_sz,
164 uint age,
165 uint node_index);
166
167 void undo_allocation(G1HeapRegionAttr dest_addr,
168 HeapWord* obj_ptr,
169 size_t word_sz,
170 uint node_index);
171
172 inline oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
173 oop obj,
174 markWord old_mark);
175
176 // This method is applied to the fields of the objects that have just been copied.
177 template <class T> inline void do_oop_evac(T* p);
178
179 inline void dispatch_task(ScannerTask task);
|
63 // Remember the last enqueued card to avoid enqueuing the same card over and over;
64 // since we only ever scan a card once, this is sufficient.
65 size_t _last_enqueued_card;
66
67 // Upper and lower threshold to start and end work queue draining.
68 uint const _stack_trim_upper_threshold;
69 uint const _stack_trim_lower_threshold;
70
71 Tickspan _trim_ticks;
72 // Map from young-age-index (0 == not young, 1 is youngest) to
73 // surviving words. base is what we get back from the malloc call
74 size_t* _surviving_young_words_base;
75 // this points into the array, as we use the first few entries for padding
76 size_t* _surviving_young_words;
77 // Number of elements in the array above.
78 size_t _surviving_words_length;
79 // Indicates whether in the last generation (old) there is no more space
80 // available for allocation.
81 bool _old_gen_is_full;
82
83 int _objarray_scan_chunk_size;
84 int _objarray_length_offset_in_bytes;
85
86 G1RedirtyCardsQueue& redirty_cards_queue() { return _rdcq; }
87 G1CardTable* ct() { return _ct; }
88
89 G1HeapRegionAttr dest(G1HeapRegionAttr original) const {
90 assert(original.is_valid(),
91 "Original region attr invalid: %s", original.get_type_str());
92 assert(_dest[original.type()].is_valid_gen(),
93 "Dest region attr is invalid: %s", _dest[original.type()].get_type_str());
94 return _dest[original.type()];
95 }
96
97 size_t _num_optional_regions;
98 G1OopStarChunkedList* _oops_into_optional_regions;
99
100 G1NUMA* _numa;
101
102 // Records how many object allocations happened at each node during copy to survivor.
103 // Only starts recording when log of gc+heap+numa is enabled and its data is
104 // transferred when flushed.
105 size_t* _obj_alloc_stat;
143 size_t card_index = ct()->index_for(p);
144 // If the card hasn't been added to the buffer, do it.
145 if (_last_enqueued_card != card_index) {
146 redirty_cards_queue().enqueue(ct()->byte_for_index(card_index));
147 _last_enqueued_card = card_index;
148 }
149 }
150
151 G1EvacuationRootClosures* closures() { return _closures; }
152 uint worker_id() { return _worker_id; }
153
154 size_t lab_waste_words() const;
155 size_t lab_undo_waste_words() const;
156
157 // Pass locally gathered statistics to global state. Returns the total number of
158 // HeapWords copied.
159 size_t flush(size_t* surviving_young_words);
160
161 private:
162 inline void do_partial_array(PartialArrayScanTask task);
163 inline oop start_partial_objArray(G1HeapRegionAttr dest_dir, oop from, oop to);
164
165 HeapWord* allocate_copy_slow(G1HeapRegionAttr* dest_attr,
166 oop old,
167 size_t word_sz,
168 uint age,
169 uint node_index);
170
171 void undo_allocation(G1HeapRegionAttr dest_addr,
172 HeapWord* obj_ptr,
173 size_t word_sz,
174 uint node_index);
175
176 inline oop do_copy_to_survivor_space(G1HeapRegionAttr region_attr,
177 oop obj,
178 markWord old_mark);
179
180 // This method is applied to the fields of the objects that have just been copied.
181 template <class T> inline void do_oop_evac(T* p);
182
183 inline void dispatch_task(ScannerTask task);
|