8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1CollectedHeap.hpp"
29 #include "gc/g1/g1CollectorState.hpp"
30 #include "gc/g1/g1ConcurrentMark.inline.hpp"
31 #include "gc/g1/g1SATBCardTableModRefBS.hpp"
32 #include "gc/g1/heapRegionManager.inline.hpp"
33 #include "gc/g1/heapRegionSet.inline.hpp"
34 #include "gc/shared/taskqueue.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36
37 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
38 switch (dest.value()) {
39 case InCSetState::Young:
40 return &_survivor_evac_stats;
41 case InCSetState::Old:
42 return &_old_evac_stats;
43 default:
44 ShouldNotReachHere();
45 return NULL; // Keep some compilers happy
46 }
47 }
48
49 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
50 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
51 // Prevent humongous PLAB sizes for two reasons:
110 // write barrier never queues anything when updating objects on this
111 // block. It is assumed (and in fact we assert) that the block
112 // belongs to a young region.
113 inline void
114 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
115 assert_heap_not_locked();
116
117 // Assign the containing region to containing_hr so that we don't
118 // have to keep calling heap_region_containing() in the
119 // asserts below.
120 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
121 assert(word_size > 0, "pre-condition");
122 assert(containing_hr->is_in(start), "it should contain start");
123 assert(containing_hr->is_young(), "it should be young");
124 assert(!containing_hr->is_humongous(), "it should not be humongous");
125
126 HeapWord* end = start + word_size;
127 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
128
129 MemRegion mr(start, end);
130 g1_barrier_set()->g1_mark_as_young(mr);
131 }
132
133 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
134 return _task_queues->queue(i);
135 }
136
137 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
138 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
139 }
140
141 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
142 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
143 }
144
145 inline bool G1CollectedHeap::is_in_cset(oop obj) {
146 return _in_cset_fast_test.is_in_cset((HeapWord*)obj);
147 }
148
149 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
150 return _in_cset_fast_test.is_in_cset(hr);
222 return false;
223 }
224 _evacuation_failure_alot_count = 0;
225 return true;
226 }
227
228 inline void G1CollectedHeap::reset_evacuation_should_fail() {
229 if (G1EvacuationFailureALot) {
230 _evacuation_failure_alot_gc_number = total_collections();
231 _evacuation_failure_alot_count = 0;
232 _evacuation_failure_alot_for_current_gc = false;
233 }
234 }
235 #endif // #ifndef PRODUCT
236
237 inline bool G1CollectedHeap::is_in_young(const oop obj) {
238 if (obj == NULL) {
239 return false;
240 }
241 return heap_region_containing(obj)->is_young();
242 }
243
244 // We don't need barriers for initializing stores to objects
245 // in the young gen: for the SATB pre-barrier, there is no
246 // pre-value that needs to be remembered; for the remembered-set
247 // update logging post-barrier, we don't maintain remembered set
248 // information for young gen objects.
249 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
250 return is_in_young(new_obj);
251 }
252
253 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
254 if (obj == NULL) {
255 return false;
256 }
257 return is_obj_dead(obj, heap_region_containing(obj));
258 }
259
260 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
261 if (obj == NULL) {
262 return false;
263 }
264 return is_obj_ill(obj, heap_region_containing(obj));
265 }
266
267 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
268 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
269 _humongous_reclaim_candidates.set_candidate(region, value);
270 }
|
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc/g1/g1BarrierSet.hpp"
29 #include "gc/g1/g1CollectedHeap.hpp"
30 #include "gc/g1/g1CollectorState.hpp"
31 #include "gc/g1/g1ConcurrentMark.inline.hpp"
32 #include "gc/g1/heapRegionManager.inline.hpp"
33 #include "gc/g1/heapRegionSet.inline.hpp"
34 #include "gc/shared/taskqueue.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36
37 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
38 switch (dest.value()) {
39 case InCSetState::Young:
40 return &_survivor_evac_stats;
41 case InCSetState::Old:
42 return &_old_evac_stats;
43 default:
44 ShouldNotReachHere();
45 return NULL; // Keep some compilers happy
46 }
47 }
48
49 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
50 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers());
51 // Prevent humongous PLAB sizes for two reasons:
110 // write barrier never queues anything when updating objects on this
111 // block. It is assumed (and in fact we assert) that the block
112 // belongs to a young region.
113 inline void
114 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
115 assert_heap_not_locked();
116
117 // Assign the containing region to containing_hr so that we don't
118 // have to keep calling heap_region_containing() in the
119 // asserts below.
120 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
121 assert(word_size > 0, "pre-condition");
122 assert(containing_hr->is_in(start), "it should contain start");
123 assert(containing_hr->is_young(), "it should be young");
124 assert(!containing_hr->is_humongous(), "it should not be humongous");
125
126 HeapWord* end = start + word_size;
127 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
128
129 MemRegion mr(start, end);
130 g1_card_table()->g1_mark_as_young(mr);
131 }
132
133 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
134 return _task_queues->queue(i);
135 }
136
137 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
138 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
139 }
140
141 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
142 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
143 }
144
145 inline bool G1CollectedHeap::is_in_cset(oop obj) {
146 return _in_cset_fast_test.is_in_cset((HeapWord*)obj);
147 }
148
149 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
150 return _in_cset_fast_test.is_in_cset(hr);
222 return false;
223 }
224 _evacuation_failure_alot_count = 0;
225 return true;
226 }
227
228 inline void G1CollectedHeap::reset_evacuation_should_fail() {
229 if (G1EvacuationFailureALot) {
230 _evacuation_failure_alot_gc_number = total_collections();
231 _evacuation_failure_alot_count = 0;
232 _evacuation_failure_alot_for_current_gc = false;
233 }
234 }
235 #endif // #ifndef PRODUCT
236
237 inline bool G1CollectedHeap::is_in_young(const oop obj) {
238 if (obj == NULL) {
239 return false;
240 }
241 return heap_region_containing(obj)->is_young();
242 }
243
244 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
245 if (obj == NULL) {
246 return false;
247 }
248 return is_obj_dead(obj, heap_region_containing(obj));
249 }
250
251 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
252 if (obj == NULL) {
253 return false;
254 }
255 return is_obj_ill(obj, heap_region_containing(obj));
256 }
257
258 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
259 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
260 _humongous_reclaim_candidates.set_candidate(region, value);
261 }
|