25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1OopClosures.inline.hpp"
28 #include "gc/g1/g1ParScanThreadState.inline.hpp"
29 #include "gc/g1/g1StringDedup.hpp"
30 #include "gc/shared/taskqueue.inline.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/prefetch.inline.hpp"
33
34 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
35 : _g1h(g1h),
36 _refs(g1h->task_queue(queue_num)),
37 _dcq(&g1h->dirty_card_queue_set()),
38 _ct_bs(g1h->g1_barrier_set()),
39 _g1_rem(g1h->g1_rem_set()),
40 _hash_seed(17), _queue_num(queue_num),
41 _term_attempts(0),
42 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
43 _age_table(false), _scanner(g1h, rp),
44 _strong_roots_time(0), _term_time(0),
45 _no_more_space_in_old(false)
46 {
47 _scanner.set_par_scan_thread_state(this);
48 // we allocate G1YoungSurvRateNumRegions plus one entries, since
49 // we "sacrifice" entry 0 to keep track of surviving bytes for
50 // non-young regions (where the age is -1)
51 // We also add a few elements at the beginning and at the end in
52 // an attempt to eliminate cache contention
53 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
54 uint array_length = PADDING_ELEM_NUM +
55 real_length +
56 PADDING_ELEM_NUM;
57 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
58 if (_surviving_young_words_base == NULL)
59 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
60 "Not enough space for young surv histo.");
61 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
62 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
63
64 _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
65
172 if (dest->is_young()) {
173 bool plab_refill_in_old_failed = false;
174 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
175 word_sz,
176 context,
177 &plab_refill_in_old_failed);
178 // Make sure that we won't attempt to copy any other objects out
179 // of a survivor region (given that apparently we cannot allocate
180 // any new ones) to avoid coming into this slow path again and again.
181 // Only consider failed PLAB refill here: failed inline allocations are
182 // typically large, so not indicative of remaining space.
183 if (previous_plab_refill_failed) {
184 _tenuring_threshold = 0;
185 }
186
187 if (obj_ptr != NULL) {
188 dest->set_old();
189 } else {
190 // We just failed to allocate in old gen. The same idea as explained above
191 // for making survivor gen unavailable for allocation applies for old gen.
192 _no_more_space_in_old = plab_refill_in_old_failed;
193 }
194 return obj_ptr;
195 } else {
196 _no_more_space_in_old = previous_plab_refill_failed;
197 assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
198 // no other space to try.
199 return NULL;
200 }
201 }
202
203 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
204 if (state.is_young()) {
205 age = !m->has_displaced_mark_helper() ? m->age()
206 : m->displaced_mark_helper()->age();
207 if (age < _tenuring_threshold) {
208 return state;
209 }
210 }
211 return dest(state);
212 }
213
214 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
215 oop const old,
216 markOop const old_mark) {
217 const size_t word_sz = old->size();
218 HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
219 // +1 to make the -1 indexes valid...
220 const int young_index = from_region->young_index_in_cset()+1;
221 assert( (from_region->is_young() && young_index > 0) ||
222 (!from_region->is_young() && young_index == 0), "invariant" );
223 const AllocationContext_t context = from_region->allocation_context();
224
225 uint age = 0;
226 InCSetState dest_state = next_state(state, old_mark, age);
227 // The second clause is to prevent premature evacuation failure in case there
228 // is still space in survivor, but old gen is full.
229 if (_no_more_space_in_old && dest_state.is_old()) {
230 return handle_evacuation_failure_par(old, old_mark);
231 }
232 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
233
234 // PLAB allocations should succeed most of the time, so we'll
235 // normally check against NULL once and that's it.
236 if (obj_ptr == NULL) {
237 bool plab_refill_failed = false;
238 obj_ptr = _plab_allocator->allocate_inline_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
239 if (obj_ptr == NULL) {
240 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
241 if (obj_ptr == NULL) {
242 // This will either forward-to-self, or detect that someone else has
243 // installed a forwarding pointer.
244 return handle_evacuation_failure_par(old, old_mark);
245 }
246 }
247 }
248
249 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|
25 #include "precompiled.hpp"
26 #include "gc/g1/g1CollectedHeap.inline.hpp"
27 #include "gc/g1/g1OopClosures.inline.hpp"
28 #include "gc/g1/g1ParScanThreadState.inline.hpp"
29 #include "gc/g1/g1StringDedup.hpp"
30 #include "gc/shared/taskqueue.inline.hpp"
31 #include "oops/oop.inline.hpp"
32 #include "runtime/prefetch.inline.hpp"
33
34 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint queue_num, ReferenceProcessor* rp)
35 : _g1h(g1h),
36 _refs(g1h->task_queue(queue_num)),
37 _dcq(&g1h->dirty_card_queue_set()),
38 _ct_bs(g1h->g1_barrier_set()),
39 _g1_rem(g1h->g1_rem_set()),
40 _hash_seed(17), _queue_num(queue_num),
41 _term_attempts(0),
42 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
43 _age_table(false), _scanner(g1h, rp),
44 _strong_roots_time(0), _term_time(0),
45 _last_gen_is_full(false)
46 {
47 _scanner.set_par_scan_thread_state(this);
48 // we allocate G1YoungSurvRateNumRegions plus one entries, since
49 // we "sacrifice" entry 0 to keep track of surviving bytes for
50 // non-young regions (where the age is -1)
51 // We also add a few elements at the beginning and at the end in
52 // an attempt to eliminate cache contention
53 uint real_length = 1 + _g1h->g1_policy()->young_cset_region_length();
54 uint array_length = PADDING_ELEM_NUM +
55 real_length +
56 PADDING_ELEM_NUM;
57 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
58 if (_surviving_young_words_base == NULL)
59 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
60 "Not enough space for young surv histo.");
61 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
62 memset(_surviving_young_words, 0, (size_t) real_length * sizeof(size_t));
63
64 _plab_allocator = G1PLABAllocator::create_allocator(_g1h->allocator());
65
172 if (dest->is_young()) {
173 bool plab_refill_in_old_failed = false;
174 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
175 word_sz,
176 context,
177 &plab_refill_in_old_failed);
178 // Make sure that we won't attempt to copy any other objects out
179 // of a survivor region (given that apparently we cannot allocate
180 // any new ones) to avoid coming into this slow path again and again.
181 // Only consider failed PLAB refill here: failed inline allocations are
182 // typically large, so not indicative of remaining space.
183 if (previous_plab_refill_failed) {
184 _tenuring_threshold = 0;
185 }
186
187 if (obj_ptr != NULL) {
188 dest->set_old();
189 } else {
190 // We just failed to allocate in old gen. The same idea as explained above
191 // for making survivor gen unavailable for allocation applies for old gen.
192 _last_gen_is_full = plab_refill_in_old_failed;
193 }
194 return obj_ptr;
195 } else {
196 _last_gen_is_full = previous_plab_refill_failed;
197 assert(dest->is_old(), err_msg("Unexpected dest: " CSETSTATE_FORMAT, dest->value()));
198 // no other space to try.
199 return NULL;
200 }
201 }
202
203 InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) {
204 if (state.is_young()) {
205 age = !m->has_displaced_mark_helper() ? m->age()
206 : m->displaced_mark_helper()->age();
207 if (age < _tenuring_threshold) {
208 return state;
209 }
210 }
211 return dest(state);
212 }
213
214 oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state,
215 oop const old,
216 markOop const old_mark) {
217 const size_t word_sz = old->size();
218 HeapRegion* const from_region = _g1h->heap_region_containing_raw(old);
219 // +1 to make the -1 indexes valid...
220 const int young_index = from_region->young_index_in_cset()+1;
221 assert( (from_region->is_young() && young_index > 0) ||
222 (!from_region->is_young() && young_index == 0), "invariant" );
223 const AllocationContext_t context = from_region->allocation_context();
224
225 uint age = 0;
226 InCSetState dest_state = next_state(state, old_mark, age);
227 // The second clause is to prevent premature evacuation failure in case there
228 // is still space in survivor, but old gen is full.
229 if (_last_gen_is_full && dest_state.is_old()) {
230 return handle_evacuation_failure_par(old, old_mark);
231 }
232 HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context);
233
234 // PLAB allocations should succeed most of the time, so we'll
235 // normally check against NULL once and that's it.
236 if (obj_ptr == NULL) {
237 bool plab_refill_failed = false;
238 obj_ptr = _plab_allocator->allocate_inline_or_new_plab(dest_state, word_sz, context, &plab_refill_failed);
239 if (obj_ptr == NULL) {
240 obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context, plab_refill_failed);
241 if (obj_ptr == NULL) {
242 // This will either forward-to-self, or detect that someone else has
243 // installed a forwarding pointer.
244 return handle_evacuation_failure_par(old, old_mark);
245 }
246 }
247 }
248
249 assert(obj_ptr != NULL, "when we get here, allocation should have succeeded");
|