26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length)
41 : _g1h(g1h),
42 _refs(g1h->task_queue(worker_id)),
43 _dcq(&g1h->dirty_card_queue_set()),
44 _ct(g1h->card_table()),
45 _closures(NULL),
46 _hash_seed(17),
47 _worker_id(worker_id),
48 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
49 _age_table(false),
50 _scanner(g1h, this),
51 _old_gen_is_full(false)
52 {
53 // we allocate G1YoungSurvRateNumRegions plus one entries, since
54 // we "sacrifice" entry 0 to keep track of surviving bytes for
55 // non-young regions (where the age is -1)
56 // We also add a few elements at the beginning and at the end in
57 // an attempt to eliminate cache contention
58 size_t real_length = 1 + young_cset_length;
59 size_t array_length = PADDING_ELEM_NUM +
60 real_length +
61 PADDING_ELEM_NUM;
62 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
63 if (_surviving_young_words_base == NULL)
64 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
65 "Not enough space for young surv histo.");
66 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
67 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
68
69 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
70
121 } else {
122 oop p = RawAccess<>::oop_load(ref);
123 assert(_g1h->is_in_g1_reserved(p),
124 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
125 }
126 return true;
127 }
128
129 bool G1ParScanThreadState::verify_task(StarTask ref) const {
130 if (ref.is_narrow()) {
131 return verify_ref((narrowOop*) ref);
132 } else {
133 return verify_ref((oop*) ref);
134 }
135 }
136 #endif // ASSERT
137
138 void G1ParScanThreadState::trim_queue() {
139 StarTask ref;
140 do {
141 // Drain the overflow stack first, so other threads can steal.
142 while (_refs->pop_overflow(ref)) {
143 if (!_refs->try_push_to_taskqueue(ref)) {
144 dispatch_reference(ref);
145 }
146 }
147
148 while (_refs->pop_local(ref)) {
149 dispatch_reference(ref);
150 }
151 } while (!_refs->is_empty());
152 }
153
154 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
155 InCSetState* dest,
156 size_t word_sz,
157 bool previous_plab_refill_failed) {
158 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
159 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
160
161 // Right now we only have two types of regions (young / old) so
162 // let's keep the logic here simple. We can generalize it when necessary.
163 if (dest->is_young()) {
164 bool plab_refill_in_old_failed = false;
165 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
166 word_sz,
167 &plab_refill_in_old_failed);
168 // Make sure that we won't attempt to copy any other objects out
169 // of a survivor region (given that apparently we cannot allocate
170 // any new ones) to avoid coming into this slow path again and again.
297 const bool is_from_young = state.is_young();
298 const bool is_to_young = dest_state.is_young();
299 assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
300 "sanity");
301 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
302 "sanity");
303 G1StringDedup::enqueue_from_evacuation(is_from_young,
304 is_to_young,
305 _worker_id,
306 obj);
307 }
308
309 _surviving_young_words[young_index] += word_sz;
310
311 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
312 // We keep track of the next start index in the length field of
313 // the to-space object. The actual length can be found in the
314 // length field of the from-space object.
315 arrayOop(obj)->set_length(0);
316 oop* old_p = set_partial_array_mask(old);
317 push_on_queue(old_p);
318 } else {
319 HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
320 _scanner.set_region(to_region);
321 obj->oop_iterate_backwards(&_scanner);
322 }
323 return obj;
324 } else {
325 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
326 return forward_ptr;
327 }
328 }
329
330 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
331 assert(worker_id < _n_workers, "out of bounds access");
332 if (_states[worker_id] == NULL) {
333 _states[worker_id] = new G1ParScanThreadState(_g1h, worker_id, _young_cset_length);
334 }
335 return _states[worker_id];
336 }
337
|
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length)
41 : _g1h(g1h),
42 _refs(g1h->task_queue(worker_id)),
43 _dcq(&g1h->dirty_card_queue_set()),
44 _ct(g1h->card_table()),
45 _closures(NULL),
46 _plab_allocator(NULL),
47 _age_table(false),
48 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
49 _scanner(g1h, this),
50 _hash_seed(17),
51 _worker_id(worker_id),
52 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
53 _stack_trim_lower_threshold(GCDrainStackTargetSize),
54 _trim_ticks(),
55 _old_gen_is_full(false)
56 {
57 // we allocate G1YoungSurvRateNumRegions plus one entries, since
58 // we "sacrifice" entry 0 to keep track of surviving bytes for
59 // non-young regions (where the age is -1)
60 // We also add a few elements at the beginning and at the end in
61 // an attempt to eliminate cache contention
62 size_t real_length = 1 + young_cset_length;
63 size_t array_length = PADDING_ELEM_NUM +
64 real_length +
65 PADDING_ELEM_NUM;
66 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
67 if (_surviving_young_words_base == NULL)
68 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
69 "Not enough space for young surv histo.");
70 _surviving_young_words = _surviving_young_words_base + PADDING_ELEM_NUM;
71 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
72
73 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
74
125 } else {
126 oop p = RawAccess<>::oop_load(ref);
127 assert(_g1h->is_in_g1_reserved(p),
128 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
129 }
130 return true;
131 }
132
133 bool G1ParScanThreadState::verify_task(StarTask ref) const {
134 if (ref.is_narrow()) {
135 return verify_ref((narrowOop*) ref);
136 } else {
137 return verify_ref((oop*) ref);
138 }
139 }
140 #endif // ASSERT
141
142 void G1ParScanThreadState::trim_queue() {
143 StarTask ref;
144 do {
145 // Fully drain the queue.
146 trim_queue_to_threshold(0);
147 } while (!_refs->is_empty());
148 }
149
150 HeapWord* G1ParScanThreadState::allocate_in_next_plab(InCSetState const state,
151 InCSetState* dest,
152 size_t word_sz,
153 bool previous_plab_refill_failed) {
154 assert(state.is_in_cset_or_humongous(), "Unexpected state: " CSETSTATE_FORMAT, state.value());
155 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: " CSETSTATE_FORMAT, dest->value());
156
157 // Right now we only have two types of regions (young / old) so
158 // let's keep the logic here simple. We can generalize it when necessary.
159 if (dest->is_young()) {
160 bool plab_refill_in_old_failed = false;
161 HeapWord* const obj_ptr = _plab_allocator->allocate(InCSetState::Old,
162 word_sz,
163 &plab_refill_in_old_failed);
164 // Make sure that we won't attempt to copy any other objects out
165 // of a survivor region (given that apparently we cannot allocate
166 // any new ones) to avoid coming into this slow path again and again.
293 const bool is_from_young = state.is_young();
294 const bool is_to_young = dest_state.is_young();
295 assert(is_from_young == _g1h->heap_region_containing(old)->is_young(),
296 "sanity");
297 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
298 "sanity");
299 G1StringDedup::enqueue_from_evacuation(is_from_young,
300 is_to_young,
301 _worker_id,
302 obj);
303 }
304
305 _surviving_young_words[young_index] += word_sz;
306
307 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
308 // We keep track of the next start index in the length field of
309 // the to-space object. The actual length can be found in the
310 // length field of the from-space object.
311 arrayOop(obj)->set_length(0);
312 oop* old_p = set_partial_array_mask(old);
313 do_oop_partial_array(old_p);
314 } else {
315 HeapRegion* const to_region = _g1h->heap_region_containing(obj_ptr);
316 _scanner.set_region(to_region);
317 obj->oop_iterate_backwards(&_scanner);
318 }
319 return obj;
320 } else {
321 _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz);
322 return forward_ptr;
323 }
324 }
325
326 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
327 assert(worker_id < _n_workers, "out of bounds access");
328 if (_states[worker_id] == NULL) {
329 _states[worker_id] = new G1ParScanThreadState(_g1h, worker_id, _young_cset_length);
330 }
331 return _states[worker_id];
332 }
333
|