16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/oop.inline.hpp"
37 #include "runtime/prefetch.inline.hpp"
38
39 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length)
40 : _g1h(g1h),
41 _refs(g1h->task_queue(worker_id)),
42 _dcq(&g1h->dirty_card_queue_set()),
43 _ct(g1h->card_table()),
44 _closures(NULL),
45 _hash_seed(17),
46 _worker_id(worker_id),
47 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
48 _age_table(false),
49 _scanner(g1h, this),
50 _old_gen_is_full(false)
51 {
52 // we allocate G1YoungSurvRateNumRegions plus one entries, since
53 // we "sacrifice" entry 0 to keep track of surviving bytes for
54 // non-young regions (where the age is -1)
55 // We also add a few elements at the beginning and at the end in
87 for (uint region_index = 0; region_index < length; region_index++) {
88 surviving_young_words[region_index] += _surviving_young_words[region_index];
89 }
90 }
91
92 G1ParScanThreadState::~G1ParScanThreadState() {
93 delete _plab_allocator;
94 delete _closures;
95 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
96 }
97
98 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
99 _plab_allocator->waste(wasted, undo_wasted);
100 }
101
102 #ifdef ASSERT
103 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
104 assert(ref != NULL, "invariant");
105 assert(UseCompressedOops, "sanity");
106 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
107 oop p = oopDesc::load_decode_heap_oop(ref);
108 assert(_g1h->is_in_g1_reserved(p),
109 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
110 return true;
111 }
112
113 bool G1ParScanThreadState::verify_ref(oop* ref) const {
114 assert(ref != NULL, "invariant");
115 if (has_partial_array_mask(ref)) {
116 // Must be in the collection set--it's already been copied.
117 oop p = clear_partial_array_mask(ref);
118 assert(_g1h->is_in_cset(p),
119 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
120 } else {
121 oop p = oopDesc::load_decode_heap_oop(ref);
122 assert(_g1h->is_in_g1_reserved(p),
123 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
124 }
125 return true;
126 }
127
128 bool G1ParScanThreadState::verify_task(StarTask ref) const {
129 if (ref.is_narrow()) {
130 return verify_ref((narrowOop*) ref);
131 } else {
132 return verify_ref((oop*) ref);
133 }
134 }
135 #endif // ASSERT
136
137 void G1ParScanThreadState::trim_queue() {
138 StarTask ref;
139 do {
140 // Drain the overflow stack first, so other threads can steal.
141 while (_refs->pop_overflow(ref)) {
|
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/shared/gcTrace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h, uint worker_id, size_t young_cset_length)
41 : _g1h(g1h),
42 _refs(g1h->task_queue(worker_id)),
43 _dcq(&g1h->dirty_card_queue_set()),
44 _ct(g1h->card_table()),
45 _closures(NULL),
46 _hash_seed(17),
47 _worker_id(worker_id),
48 _tenuring_threshold(g1h->g1_policy()->tenuring_threshold()),
49 _age_table(false),
50 _scanner(g1h, this),
51 _old_gen_is_full(false)
52 {
53 // we allocate G1YoungSurvRateNumRegions plus one entries, since
54 // we "sacrifice" entry 0 to keep track of surviving bytes for
55 // non-young regions (where the age is -1)
56 // We also add a few elements at the beginning and at the end in
88 for (uint region_index = 0; region_index < length; region_index++) {
89 surviving_young_words[region_index] += _surviving_young_words[region_index];
90 }
91 }
92
93 G1ParScanThreadState::~G1ParScanThreadState() {
94 delete _plab_allocator;
95 delete _closures;
96 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
97 }
98
99 void G1ParScanThreadState::waste(size_t& wasted, size_t& undo_wasted) {
100 _plab_allocator->waste(wasted, undo_wasted);
101 }
102
103 #ifdef ASSERT
104 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
105 assert(ref != NULL, "invariant");
106 assert(UseCompressedOops, "sanity");
107 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
108 oop p = RawAccess<>::oop_load(ref);
109 assert(_g1h->is_in_g1_reserved(p),
110 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
111 return true;
112 }
113
114 bool G1ParScanThreadState::verify_ref(oop* ref) const {
115 assert(ref != NULL, "invariant");
116 if (has_partial_array_mask(ref)) {
117 // Must be in the collection set--it's already been copied.
118 oop p = clear_partial_array_mask(ref);
119 assert(_g1h->is_in_cset(p),
120 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
121 } else {
122 oop p = RawAccess<>::oop_load(ref);
123 assert(_g1h->is_in_g1_reserved(p),
124 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
125 }
126 return true;
127 }
128
129 bool G1ParScanThreadState::verify_task(StarTask ref) const {
130 if (ref.is_narrow()) {
131 return verify_ref((narrowOop*) ref);
132 } else {
133 return verify_ref((oop*) ref);
134 }
135 }
136 #endif // ASSERT
137
138 void G1ParScanThreadState::trim_queue() {
139 StarTask ref;
140 do {
141 // Drain the overflow stack first, so other threads can steal.
142 while (_refs->pop_overflow(ref)) {
|