52 // non-young regions (where the age is -1)
53 // We also add a few elements at the beginning and at the end in
54 // an attempt to eliminate cache contention
55 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
56 uint array_length = PADDING_ELEM_NUM +
57 real_length +
58 PADDING_ELEM_NUM;
59 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
60 if (_surviving_young_words_base == NULL)
61 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
62 "Not enough space for young surv histo.");
63 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
64 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
65
66 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
67 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
68
69 _start = os::elapsedTime();
70 }
71
72 void
73 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
74 {
75 st->print_raw_cr("GC Termination Stats");
76 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
77 " ------waste (KiB)------");
78 st->print_raw_cr("thr ms ms % ms % attempts"
79 " total alloc undo");
80 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
81 " ------- ------- -------");
82 }
83
84 void
85 G1ParScanThreadState::print_termination_stats(int i,
86 outputStream* const st) const
87 {
88 const double elapsed_ms = elapsed_time() * 1000.0;
89 const double s_roots_ms = strong_roots_time() * 1000.0;
90 const double term_ms = term_time() * 1000.0;
91 st->print_cr("%3d %9.2f %9.2f %6.2f "
123 }
124 return true;
125 }
126
127 bool G1ParScanThreadState::verify_task(StarTask ref) const {
128 if (ref.is_narrow()) {
129 return verify_ref((narrowOop*) ref);
130 } else {
131 return verify_ref((oop*) ref);
132 }
133 }
134 #endif // ASSERT
135
136 void G1ParScanThreadState::trim_queue() {
137 assert(_evac_failure_cl != NULL, "not set");
138
139 StarTask ref;
140 do {
141 // Drain the overflow stack first, so other threads can steal.
142 while (_refs->pop_overflow(ref)) {
143 deal_with_reference(ref);
144 }
145
146 while (_refs->pop_local(ref)) {
147 deal_with_reference(ref);
148 }
149 } while (!_refs->is_empty());
150 }
151
152 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
153 StarTask stolen_task;
154 while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
155 assert(verify_task(stolen_task), "sanity");
156 deal_with_reference(stolen_task);
157
158 // We've just processed a reference and we might have made
159 // available new entries on the queues. So we have to make sure
160 // we drain the queues as necessary.
161 trim_queue();
162 }
163 }
164
165 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
166 size_t word_sz = old->size();
167 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
168 // +1 to make the -1 indexes valid...
169 int young_index = from_region->young_index_in_cset()+1;
170 assert( (from_region->is_young() && young_index > 0) ||
171 (!from_region->is_young() && young_index == 0), "invariant" );
172 G1CollectorPolicy* g1p = _g1h->g1_policy();
173 markOop m = old->mark();
174 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
175 : m->age();
176 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
177 word_sz);
178 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
179 #ifndef PRODUCT
180 // Should this evacuation fail?
181 if (_g1h->evacuation_should_fail()) {
182 if (obj_ptr != NULL) {
183 undo_allocation(alloc_purpose, obj_ptr, word_sz);
184 obj_ptr = NULL;
298 }
299 }
300
301 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
302 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
303 if (obj != NULL) {
304 return obj;
305 }
306 return allocate_slow(purpose, word_sz);
307 }
308
309 void G1ParScanThreadState::retire_alloc_buffers() {
310 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
311 size_t waste = _alloc_buffers[ap]->words_remaining();
312 add_to_alloc_buffer_waste(waste);
313 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
314 true /* end_of_gc */,
315 false /* retain */);
316 }
317 }
318
319 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
320 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
321 "Reference should not be NULL here as such are never pushed to the task queue.");
322 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
323
324 // Although we never intentionally push references outside of the collection
325 // set, due to (benign) races in the claim mechanism during RSet scanning more
326 // than one thread might claim the same card. So the same card may be
327 // processed multiple times. So redo this check.
328 if (_g1h->in_cset_fast_test(obj)) {
329 oop forwardee;
330 if (obj->is_forwarded()) {
331 forwardee = obj->forwardee();
332 } else {
333 forwardee = copy_to_survivor_space(obj);
334 }
335 assert(forwardee != NULL, "forwardee should not be NULL");
336 oopDesc::encode_store_heap_oop(p, forwardee);
337 }
338
339 assert(obj != NULL, "Must be");
340 update_rs(from, p, queue_num());
341 }
342
343 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
344 assert(has_partial_array_mask(p), "invariant");
345 oop from_obj = clear_partial_array_mask(p);
346
347 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
348 assert(from_obj->is_objArray(), "must be obj array");
349 objArrayOop from_obj_array = objArrayOop(from_obj);
350 // The from-space object contains the real length.
351 int length = from_obj_array->length();
352
353 assert(from_obj->is_forwarded(), "must be forwarded");
354 oop to_obj = from_obj->forwardee();
355 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
356 objArrayOop to_obj_array = objArrayOop(to_obj);
357 // We keep track of the next start index in the length field of the
358 // to-space object.
359 int next_index = to_obj_array->length();
360 assert(0 <= next_index && next_index < length,
361 err_msg("invariant, next index: %d, length: %d", next_index, length));
362
363 int start = next_index;
364 int end = length;
365 int remainder = end - start;
366 // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
367 if (remainder > 2 * ParGCArrayScanChunk) {
368 end = start + ParGCArrayScanChunk;
369 to_obj_array->set_length(end);
370 // Push the remainder before we process the range in case another
371 // worker has run out of things to do and can steal it.
372 oop* from_obj_p = set_partial_array_mask(from_obj);
373 push_on_queue(from_obj_p);
374 } else {
375 assert(length == end, "sanity");
376 // We'll process the final range for this object. Restore the length
377 // so that the heap remains parsable in case of evacuation failure.
378 to_obj_array->set_length(end);
379 }
380 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
381 // Process indexes [start,end). It will also process the header
382 // along with the first chunk (i.e., the chunk with start == 0).
383 // Note that at this point the length field of to_obj_array is not
384 // correct given that we are using it to keep track of the next
385 // start index. oop_iterate_range() (thankfully!) ignores the length
386 // field and only relies on the start / end parameters. It does
387 // however return the size of the object which will be incorrect. So
388 // we have to ignore it even if we wanted to use it.
389 to_obj_array->oop_iterate_range(&_scanner, start, end);
390 }
391
392 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
393 if (!has_partial_array_mask(ref_to_scan)) {
394 // Note: we can use "raw" versions of "region_containing" because
395 // "obj_to_scan" is definitely in the heap, and is not in a
396 // humongous region.
397 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
398 do_oop_evac(ref_to_scan, r);
399 } else {
400 do_oop_partial_array((oop*)ref_to_scan);
401 }
402 }
403
404 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
405 assert(verify_task(ref), "sanity");
406 if (ref.is_narrow()) {
407 deal_with_reference((narrowOop*)ref);
408 } else {
409 deal_with_reference((oop*)ref);
410 }
411 }
|
52 // non-young regions (where the age is -1)
53 // We also add a few elements at the beginning and at the end in
54 // an attempt to eliminate cache contention
55 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
56 uint array_length = PADDING_ELEM_NUM +
57 real_length +
58 PADDING_ELEM_NUM;
59 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
60 if (_surviving_young_words_base == NULL)
61 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
62 "Not enough space for young surv histo.");
63 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
64 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
65
66 _alloc_buffers[GCAllocForSurvived] = &_surviving_alloc_buffer;
67 _alloc_buffers[GCAllocForTenured] = &_tenured_alloc_buffer;
68
69 _start = os::elapsedTime();
70 }
71
72 G1ParScanThreadState::~G1ParScanThreadState() {
73 retire_alloc_buffers();
74 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base, mtGC);
75 }
76
77 void
78 G1ParScanThreadState::print_termination_stats_hdr(outputStream* const st)
79 {
80 st->print_raw_cr("GC Termination Stats");
81 st->print_raw_cr(" elapsed --strong roots-- -------termination-------"
82 " ------waste (KiB)------");
83 st->print_raw_cr("thr ms ms % ms % attempts"
84 " total alloc undo");
85 st->print_raw_cr("--- --------- --------- ------ --------- ------ --------"
86 " ------- ------- -------");
87 }
88
89 void
90 G1ParScanThreadState::print_termination_stats(int i,
91 outputStream* const st) const
92 {
93 const double elapsed_ms = elapsed_time() * 1000.0;
94 const double s_roots_ms = strong_roots_time() * 1000.0;
95 const double term_ms = term_time() * 1000.0;
96 st->print_cr("%3d %9.2f %9.2f %6.2f "
128 }
129 return true;
130 }
131
132 bool G1ParScanThreadState::verify_task(StarTask ref) const {
133 if (ref.is_narrow()) {
134 return verify_ref((narrowOop*) ref);
135 } else {
136 return verify_ref((oop*) ref);
137 }
138 }
139 #endif // ASSERT
140
141 void G1ParScanThreadState::trim_queue() {
142 assert(_evac_failure_cl != NULL, "not set");
143
144 StarTask ref;
145 do {
146 // Drain the overflow stack first, so other threads can steal.
147 while (_refs->pop_overflow(ref)) {
148 dispatch_reference(ref);
149 }
150
151 while (_refs->pop_local(ref)) {
152 dispatch_reference(ref);
153 }
154 } while (!_refs->is_empty());
155 }
156
157 oop G1ParScanThreadState::copy_to_survivor_space(oop const old) {
158 size_t word_sz = old->size();
159 HeapRegion* from_region = _g1h->heap_region_containing_raw(old);
160 // +1 to make the -1 indexes valid...
161 int young_index = from_region->young_index_in_cset()+1;
162 assert( (from_region->is_young() && young_index > 0) ||
163 (!from_region->is_young() && young_index == 0), "invariant" );
164 G1CollectorPolicy* g1p = _g1h->g1_policy();
165 markOop m = old->mark();
166 int age = m->has_displaced_mark_helper() ? m->displaced_mark_helper()->age()
167 : m->age();
168 GCAllocPurpose alloc_purpose = g1p->evacuation_destination(from_region, age,
169 word_sz);
170 HeapWord* obj_ptr = allocate(alloc_purpose, word_sz);
171 #ifndef PRODUCT
172 // Should this evacuation fail?
173 if (_g1h->evacuation_should_fail()) {
174 if (obj_ptr != NULL) {
175 undo_allocation(alloc_purpose, obj_ptr, word_sz);
176 obj_ptr = NULL;
290 }
291 }
292
293 HeapWord* G1ParScanThreadState::allocate(GCAllocPurpose purpose, size_t word_sz) {
294 HeapWord* obj = alloc_buffer(purpose)->allocate(word_sz);
295 if (obj != NULL) {
296 return obj;
297 }
298 return allocate_slow(purpose, word_sz);
299 }
300
301 void G1ParScanThreadState::retire_alloc_buffers() {
302 for (int ap = 0; ap < GCAllocPurposeCount; ++ap) {
303 size_t waste = _alloc_buffers[ap]->words_remaining();
304 add_to_alloc_buffer_waste(waste);
305 _alloc_buffers[ap]->flush_stats_and_retire(_g1h->stats_for_purpose((GCAllocPurpose)ap),
306 true /* end_of_gc */,
307 false /* retain */);
308 }
309 }
|