1 /*
2 * Copyright (c) 2014, 2019, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/g1/g1Trace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _refs(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _surviving_young_words_base(NULL),
60 _surviving_young_words(NULL),
61 _surviving_words_length(young_cset_length + 1),
62 _old_gen_is_full(false),
63 _num_optional_regions(optional_cset_length),
64 _numa(g1h->numa()),
65 _obj_alloc_stat(NULL)
66 {
102 return sum;
103 }
104
105 G1ParScanThreadState::~G1ParScanThreadState() {
106 delete _plab_allocator;
107 delete _closures;
108 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
109 delete[] _oops_into_optional_regions;
110 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
111 }
112
113 size_t G1ParScanThreadState::lab_waste_words() const {
114 return _plab_allocator->waste();
115 }
116
117 size_t G1ParScanThreadState::lab_undo_waste_words() const {
118 return _plab_allocator->undo_waste();
119 }
120
121 #ifdef ASSERT
122 bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
123 assert(ref != NULL, "invariant");
124 assert(UseCompressedOops, "sanity");
125 assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
126 oop p = RawAccess<>::oop_load(ref);
127 assert(_g1h->is_in_g1_reserved(p),
128 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
129 return true;
130 }
131
132 bool G1ParScanThreadState::verify_ref(oop* ref) const {
133 assert(ref != NULL, "invariant");
134 if (has_partial_array_mask(ref)) {
135 // Must be in the collection set--it's already been copied.
136 oop p = clear_partial_array_mask(ref);
137 assert(_g1h->is_in_cset(p),
138 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
139 } else {
140 oop p = RawAccess<>::oop_load(ref);
141 assert(_g1h->is_in_g1_reserved(p),
142 "ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
143 }
144 return true;
145 }
146
147 bool G1ParScanThreadState::verify_task(StarTask ref) const {
148 if (ref.is_narrow()) {
149 return verify_ref((narrowOop*) ref);
150 } else {
151 return verify_ref((oop*) ref);
152 }
153 }
154 #endif // ASSERT
155
156 void G1ParScanThreadState::trim_queue() {
157 StarTask ref;
158 do {
159 // Fully drain the queue.
160 trim_queue_to_threshold(0);
161 } while (!_refs->is_empty());
162 }
163
164 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
165 size_t word_sz,
166 bool previous_plab_refill_failed,
167 uint node_index) {
168
169 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
170
171 // Right now we only have two types of regions (young / old) so
172 // let's keep the logic here simple. We can generalize it when necessary.
173 if (dest->is_young()) {
174 bool plab_refill_in_old_failed = false;
175 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
176 word_sz,
177 &plab_refill_in_old_failed,
178 node_index);
179 // Make sure that we won't attempt to copy any other objects out
180 // of a survivor region (given that apparently we cannot allocate
181 // any new ones) to avoid coming into this slow path again and again.
313 if (G1StringDedup::is_enabled()) {
314 const bool is_from_young = region_attr.is_young();
315 const bool is_to_young = dest_attr.is_young();
316 assert(is_from_young == from_region->is_young(),
317 "sanity");
318 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
319 "sanity");
320 G1StringDedup::enqueue_from_evacuation(is_from_young,
321 is_to_young,
322 _worker_id,
323 obj);
324 }
325
326 _surviving_young_words[young_index] += word_sz;
327
328 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
329 // We keep track of the next start index in the length field of
330 // the to-space object. The actual length can be found in the
331 // length field of the from-space object.
332 arrayOop(obj)->set_length(0);
333 oop* old_p = set_partial_array_mask(old);
334 do_oop_partial_array(old_p);
335 } else {
336 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
337 obj->oop_iterate_backwards(&_scanner);
338 }
339 return obj;
340 } else {
341 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
342 return forward_ptr;
343 }
344 }
345
346 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
347 assert(worker_id < _n_workers, "out of bounds access");
348 if (_states[worker_id] == NULL) {
349 _states[worker_id] =
350 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length);
351 }
352 return _states[worker_id];
353 }
354
|
1 /*
2 * Copyright (c) 2014, 2020, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
26 #include "gc/g1/g1Allocator.inline.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1OopClosures.inline.hpp"
30 #include "gc/g1/g1ParScanThreadState.inline.hpp"
31 #include "gc/g1/g1RootClosures.hpp"
32 #include "gc/g1/g1StringDedup.hpp"
33 #include "gc/g1/g1Trace.hpp"
34 #include "gc/shared/taskqueue.inline.hpp"
35 #include "memory/allocation.inline.hpp"
36 #include "oops/access.inline.hpp"
37 #include "oops/oop.inline.hpp"
38 #include "runtime/prefetch.inline.hpp"
39
40 G1ParScanThreadState::G1ParScanThreadState(G1CollectedHeap* g1h,
41 G1RedirtyCardsQueueSet* rdcqs,
42 uint worker_id,
43 size_t young_cset_length,
44 size_t optional_cset_length)
45 : _g1h(g1h),
46 _task_queue(g1h->task_queue(worker_id)),
47 _rdcq(rdcqs),
48 _ct(g1h->card_table()),
49 _closures(NULL),
50 _plab_allocator(NULL),
51 _age_table(false),
52 _tenuring_threshold(g1h->policy()->tenuring_threshold()),
53 _scanner(g1h, this),
54 _worker_id(worker_id),
55 _last_enqueued_card(SIZE_MAX),
56 _stack_trim_upper_threshold(GCDrainStackTargetSize * 2 + 1),
57 _stack_trim_lower_threshold(GCDrainStackTargetSize),
58 _trim_ticks(),
59 _surviving_young_words_base(NULL),
60 _surviving_young_words(NULL),
61 _surviving_words_length(young_cset_length + 1),
62 _old_gen_is_full(false),
63 _num_optional_regions(optional_cset_length),
64 _numa(g1h->numa()),
65 _obj_alloc_stat(NULL)
66 {
102 return sum;
103 }
104
105 G1ParScanThreadState::~G1ParScanThreadState() {
106 delete _plab_allocator;
107 delete _closures;
108 FREE_C_HEAP_ARRAY(size_t, _surviving_young_words_base);
109 delete[] _oops_into_optional_regions;
110 FREE_C_HEAP_ARRAY(size_t, _obj_alloc_stat);
111 }
112
113 size_t G1ParScanThreadState::lab_waste_words() const {
114 return _plab_allocator->waste();
115 }
116
117 size_t G1ParScanThreadState::lab_undo_waste_words() const {
118 return _plab_allocator->undo_waste();
119 }
120
121 #ifdef ASSERT
122 void G1ParScanThreadState::verify_task(narrowOop* task) const {
123 assert(task != NULL, "invariant");
124 assert(UseCompressedOops, "sanity");
125 oop p = RawAccess<>::oop_load(task);
126 assert(_g1h->is_in_g1_reserved(p),
127 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
128 }
129
130 void G1ParScanThreadState::verify_task(oop* task) const {
131 assert(task != NULL, "invariant");
132 oop p = RawAccess<>::oop_load(task);
133 assert(_g1h->is_in_g1_reserved(p),
134 "task=" PTR_FORMAT " p=" PTR_FORMAT, p2i(task), p2i(p));
135 }
136
137 void G1ParScanThreadState::verify_task(PartialArrayScanTask task) const {
138 // Must be in the collection set--it's already been copied.
139 oop p = task.to_source_array();
140 assert(_g1h->is_in_cset(p), "p=" PTR_FORMAT, p2i(p));
141 }
142
143 void G1ParScanThreadState::verify_task(ScannerTask task) const {
144 if (task.is_narrow_oop_ptr()) {
145 verify_task(task.to_narrow_oop_ptr());
146 } else if (task.is_oop_ptr()) {
147 verify_task(task.to_oop_ptr());
148 } else if (task.is_partial_array_task()) {
149 verify_task(task.to_partial_array_task());
150 } else {
151 ShouldNotReachHere();
152 }
153 }
154 #endif // ASSERT
155
156 void G1ParScanThreadState::trim_queue() {
157 do {
158 // Fully drain the queue.
159 trim_queue_to_threshold(0);
160 } while (!_task_queue->is_empty());
161 }
162
163 HeapWord* G1ParScanThreadState::allocate_in_next_plab(G1HeapRegionAttr* dest,
164 size_t word_sz,
165 bool previous_plab_refill_failed,
166 uint node_index) {
167
168 assert(dest->is_in_cset_or_humongous(), "Unexpected dest: %s region attr", dest->get_type_str());
169
170 // Right now we only have two types of regions (young / old) so
171 // let's keep the logic here simple. We can generalize it when necessary.
172 if (dest->is_young()) {
173 bool plab_refill_in_old_failed = false;
174 HeapWord* const obj_ptr = _plab_allocator->allocate(G1HeapRegionAttr::Old,
175 word_sz,
176 &plab_refill_in_old_failed,
177 node_index);
178 // Make sure that we won't attempt to copy any other objects out
179 // of a survivor region (given that apparently we cannot allocate
180 // any new ones) to avoid coming into this slow path again and again.
312 if (G1StringDedup::is_enabled()) {
313 const bool is_from_young = region_attr.is_young();
314 const bool is_to_young = dest_attr.is_young();
315 assert(is_from_young == from_region->is_young(),
316 "sanity");
317 assert(is_to_young == _g1h->heap_region_containing(obj)->is_young(),
318 "sanity");
319 G1StringDedup::enqueue_from_evacuation(is_from_young,
320 is_to_young,
321 _worker_id,
322 obj);
323 }
324
325 _surviving_young_words[young_index] += word_sz;
326
327 if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) {
328 // We keep track of the next start index in the length field of
329 // the to-space object. The actual length can be found in the
330 // length field of the from-space object.
331 arrayOop(obj)->set_length(0);
332 do_partial_array(PartialArrayScanTask(old));
333 } else {
334 G1ScanInYoungSetter x(&_scanner, dest_attr.is_young());
335 obj->oop_iterate_backwards(&_scanner);
336 }
337 return obj;
338 } else {
339 _plab_allocator->undo_allocation(dest_attr, obj_ptr, word_sz, node_index);
340 return forward_ptr;
341 }
342 }
343
344 G1ParScanThreadState* G1ParScanThreadStateSet::state_for_worker(uint worker_id) {
345 assert(worker_id < _n_workers, "out of bounds access");
346 if (_states[worker_id] == NULL) {
347 _states[worker_id] =
348 new G1ParScanThreadState(_g1h, _rdcqs, worker_id, _young_cset_length, _optional_cset_length);
349 }
350 return _states[worker_id];
351 }
352
|