31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 uint worker_id,
42 size_t young_cset_length,
43 size_t optional_cset_length)
44 : _g1h(g1h),
45 _refs(g1h->task_queue(worker_id)),
46 _dcq(&g1h->dirty_card_queue_set()),
47 _ct(g1h->card_table()),
48 _closures(NULL),
49 _plab_allocator(NULL),
50 _age_table(false),
51 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
52 _scanner(g1h, this),
53 _worker_id(worker_id),
54 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
55 _stack_trim_lower_threshold(GCDrainStackTargetSize),
56 _trim_ticks(),
57 _old_gen_is_full(false),
58 _num_optional_regions(optional_cset_length)
59 {
60 // we allocate G1YoungSurvRateNumRegions plus one entries, since
61 // we "sacrifice" entry 0 to keep track of surviving bytes for
62 // non-young regions (where the age is -1)
63 // We also add a few elements at the beginning and at the end in
64 // an attempt to eliminate cache contention
65 size_t real_length = 1 + young_cset_length;
66 size_t array_length = PADDING_ELEM_NUM +
67 real_length +
68 PADDING_ELEM_NUM;
69 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
70 if (_surviving_young_words_base == NULL)
71 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
74 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
75
76 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
77
78 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
79 // The dest for Young is used when the objects are aged enough to
80 // need to be moved to the next space.
81 _dest[InCSetState::Young] = InCSetState::Old;
82 _dest[InCSetState::Old] = InCSetState::Old;
83
84 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
85
86 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
87 }
88
89 // Pass locally gathered statistics to global state.
90 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
91 _dcq.flush();
92 // Update allocation statistics.
93 _plab_allocator->flush_and_retire_stats();
94 _g1h->g1_policy()->record_age_table(&_age_table);
95
96 uint length = _g1h->collection_set()->young_region_length();
97 for (uint region_index = 0; region_index < length; region_index++) {
98 surviving_young_words[region_index] += _surviving_young_words[region_index];
99 }
100 }
101
102 G1ParScanThreadState::~G1ParScanThreadState() {
103 delete _plab_allocator;
104 delete _closures;
105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
106 delete[] _oops_into_optional_regions;
107 }
108
109 size_t G1ParScanThreadState::lab_waste_words() const {
110 return _plab_allocator->waste();
111 }
112
113 size_t G1ParScanThreadState::lab_undo_waste_words() const {
114 return _plab_allocator->undo_waste();
355 if (pss == NULL) {
356 continue;
357 }
358
359 pss->flush(_surviving_young_words_total);
360 delete pss;
361 _states[worker_index] = NULL;
362 }
363 _flushed = true;
364 }
365
366 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
367 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
368 G1ParScanThreadState* pss = _states[worker_index];
369
370 if (pss == NULL) {
371 continue;
372 }
373
374 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
375 _g1h->g1_policy()->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
376 }
377 }
378
379 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
380 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
381
382 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
383 if (forward_ptr == NULL) {
384 // Forward-to-self succeeded. We are the "owner" of the object.
385 HeapRegion* r = _g1h->heap_region_containing(old);
386
387 if (!r->evacuation_failed()) {
388 r->set_evacuation_failed(true);
389 _g1h->hr_printer()->evac_failure(r);
390 }
391
392 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
393
394 G1ScanInYoungSetter x(&_scanner, r->is_young());
395 old->oop_iterate_backwards(&_scanner);
|
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 uint worker_id,
42 size_t young_cset_length,
43 size_t optional_cset_length)
44 : _g1h(g1h),
45 _refs(g1h->task_queue(worker_id)),
46 _dcq(&g1h->dirty_card_queue_set()),
47 _ct(g1h->card_table()),
48 _closures(NULL),
49 _plab_allocator(NULL),
50 _age_table(false),
51 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
52 _scanner(g1h, this),
53 _worker_id(worker_id),
54 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
55 _stack_trim_lower_threshold(GCDrainStackTargetSize),
56 _trim_ticks(),
57 _old_gen_is_full(false),
58 _num_optional_regions(optional_cset_length)
59 {
60 // we allocate G1YoungSurvRateNumRegions plus one entries, since
61 // we "sacrifice" entry 0 to keep track of surviving bytes for
62 // non-young regions (where the age is -1)
63 // We also add a few elements at the beginning and at the end in
64 // an attempt to eliminate cache contention
65 size_t real_length = 1 + young_cset_length;
66 size_t array_length = PADDING_ELEM_NUM +
67 real_length +
68 PADDING_ELEM_NUM;
69 _surviving_young_words_base = NEW_C_HEAP_ARRAY(size_t, array_length, mtGC);
70 if (_surviving_young_words_base == NULL)
71 vm_exit_out_of_memory(array_length * sizeof(size_t), OOM_MALLOC_ERROR,
74 memset(_surviving_young_words, 0, real_length * sizeof(size_t));
75
76 _plab_allocator = new G1PLABAllocator(_g1h->allocator());
77
78 _dest[InCSetState::NotInCSet] = InCSetState::NotInCSet;
79 // The dest for Young is used when the objects are aged enough to
80 // need to be moved to the next space.
81 _dest[InCSetState::Young] = InCSetState::Old;
82 _dest[InCSetState::Old] = InCSetState::Old;
83
84 _closures = G1EvacuationRootClosures::create_root_closures(this, _g1h);
85
86 _oops_into_optional_regions = new G1OopStarChunkedList[_num_optional_regions];
87 }
88
89 // Pass locally gathered statistics to global state.
90 void G1ParScanThreadState::flush(size_t* surviving_young_words) {
91 _dcq.flush();
92 // Update allocation statistics.
93 _plab_allocator->flush_and_retire_stats();
94 _g1h->policy()->record_age_table(&_age_table);
95
96 uint length = _g1h->collection_set()->young_region_length();
97 for (uint region_index = 0; region_index < length; region_index++) {
98 surviving_young_words[region_index] += _surviving_young_words[region_index];
99 }
100 }
101
102 G1ParScanThreadState::~G1ParScanThreadState() {
103 delete _plab_allocator;
104 delete _closures;
105 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
106 delete[] _oops_into_optional_regions;
107 }
108
109 size_t G1ParScanThreadState::lab_waste_words() const {
110 return _plab_allocator->waste();
111 }
112
113 size_t G1ParScanThreadState::lab_undo_waste_words() const {
114 return _plab_allocator->undo_waste();
355 if (pss == NULL) {
356 continue;
357 }
358
359 pss->flush(_surviving_young_words_total);
360 delete pss;
361 _states[worker_index] = NULL;
362 }
363 _flushed = true;
364 }
365
366 void G1ParScanThreadStateSet::record_unused_optional_region(HeapRegion* hr) {
367 for (uint worker_index = 0; worker_index < _n_workers; ++worker_index) {
368 G1ParScanThreadState* pss = _states[worker_index];
369
370 if (pss == NULL) {
371 continue;
372 }
373
374 size_t used_memory = pss->oops_into_optional_region(hr)->used_memory();
375 _g1h->policy()->phase_times()->record_or_add_thread_work_item(G1GCPhaseTimes::OptScanRS, worker_index, used_memory, G1GCPhaseTimes::OptCSetUsedMemory);
376 }
377 }
378
379 oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) {
380 assert(_g1h->is_in_cset(old), "Object " PTR_FORMAT " should be in the CSet", p2i(old));
381
382 oop forward_ptr = old->forward_to_atomic(old, m, memory_order_relaxed);
383 if (forward_ptr == NULL) {
384 // Forward-to-self succeeded. We are the "owner" of the object.
385 HeapRegion* r = _g1h->heap_region_containing(old);
386
387 if (!r->evacuation_failed()) {
388 r->set_evacuation_failed(true);
389 _g1h->hr_printer()->evac_failure(r);
390 }
391
392 _g1h->preserve_mark_during_evac_failure(_worker_id, old, m);
393
394 G1ScanInYoungSetter x(&_scanner, r->is_young());
395 old->oop_iterate_backwards(&_scanner);
|