23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
33 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "utilities/taskqueue.hpp"
37
38 // Inline functions for G1CollectedHeap
39
40 // Return the region with the given index. It assumes the index is valid.
41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
42
43 template <class T>
44 inline HeapRegion*
45 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
46 assert(addr != NULL, "invariant");
47 assert(_g1_reserved.contains((const void*) addr),
48 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
49 p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
50 return _hrs.addr_to_region((HeapWord*) addr);
51 }
52
53 template <class T>
54 inline HeapRegion*
55 G1CollectedHeap::heap_region_containing(const T addr) const {
56 HeapRegion* hr = heap_region_containing_raw(addr);
57 if (hr->continuesHumongous()) {
58 return hr->humongous_start_region();
59 }
60 return hr;
61 }
62
155
156 HeapWord* end = start + word_size;
157 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
158
159 MemRegion mr(start, end);
160 g1_barrier_set()->g1_mark_as_young(mr);
161 }
162
163 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
164 return _task_queues->queue(i);
165 }
166
167 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
168 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
169 }
170
171 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
172 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
173 }
174
175
176 // This is a fast test on whether a reference points into the
177 // collection set or not. Assume that the reference
178 // points into the heap.
179 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
180 bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj);
181 // let's make sure the result is consistent with what the slower
182 // test returns
183 assert( ret || !obj_in_cs(obj), "sanity");
184 assert(!ret || obj_in_cs(obj), "sanity");
185 return ret;
186 }
187
188 #ifndef PRODUCT
189 // Support for G1EvacuationFailureALot
190
191 inline bool
192 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
193 bool during_initial_mark,
194 bool during_marking) {
195 bool res = false;
196 if (during_marking) {
197 res |= G1EvacuationFailureALotDuringConcMark;
198 }
199 if (during_initial_mark) {
200 res |= G1EvacuationFailureALotDuringInitialMark;
201 }
202 if (gcs_are_young) {
203 res |= G1EvacuationFailureALotDuringYoungGC;
204 } else {
205 // GCs are mixed
206 res |= G1EvacuationFailureALotDuringMixedGC;
207 }
271 // update logging post-barrier, we don't maintain remembered set
272 // information for young gen objects.
273 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
274 return is_in_young(new_obj);
275 }
276
277 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
278 if (obj == NULL) {
279 return false;
280 }
281 return is_obj_dead(obj, heap_region_containing(obj));
282 }
283
284 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
285 if (obj == NULL) {
286 return false;
287 }
288 return is_obj_ill(obj, heap_region_containing(obj));
289 }
290
291 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
27
28 #include "gc_implementation/g1/concurrentMark.hpp"
29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
33 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
35 #include "runtime/orderAccess.inline.hpp"
36 #include "utilities/taskqueue.hpp"
37
38 // Inline functions for G1CollectedHeap
39
40 // Return the region with the given index. It assumes the index is valid.
41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
42
43 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
44 assert(is_in_reserved(addr),
45 err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
46 p2i(addr), p2i(_reserved.start()), p2i(_reserved.end())));
47 return (uint)(pointer_delta(addr, _reserved.start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
48 }
49
50 template <class T>
51 inline HeapRegion*
52 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
53 assert(addr != NULL, "invariant");
54 assert(_g1_reserved.contains((const void*) addr),
55 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
56 p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end())));
57 return _hrs.addr_to_region((HeapWord*) addr);
58 }
59
60 template <class T>
61 inline HeapRegion*
62 G1CollectedHeap::heap_region_containing(const T addr) const {
63 HeapRegion* hr = heap_region_containing_raw(addr);
64 if (hr->continuesHumongous()) {
65 return hr->humongous_start_region();
66 }
67 return hr;
68 }
69
162
163 HeapWord* end = start + word_size;
164 assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
165
166 MemRegion mr(start, end);
167 g1_barrier_set()->g1_mark_as_young(mr);
168 }
169
170 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
171 return _task_queues->queue(i);
172 }
173
174 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
175 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
176 }
177
178 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
179 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
180 }
181
182 // This is a fast test on whether a reference points into the
183 // collection set or not. Assume that the reference
184 // points into the heap.
185 inline bool G1CollectedHeap::is_in_cset(oop obj) {
186 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
187 // let's make sure the result is consistent with what the slower
188 // test returns
189 assert( ret || !obj_in_cs(obj), "sanity");
190 assert(!ret || obj_in_cs(obj), "sanity");
191 return ret;
192 }
193
194 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
195 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
196 }
197
198 G1FastCSetBiasedMappedArray::in_cset_state_t G1CollectedHeap::in_cset_state(const oop obj) {
199 return _in_cset_fast_test.at((HeapWord*)obj);
200 }
201
202 void G1CollectedHeap::register_humongous_region_with_in_cset_fast_test(uint index) {
203 _in_cset_fast_test.set_humongous(index);
204 }
205
206 #ifndef PRODUCT
207 // Support for G1EvacuationFailureALot
208
209 inline bool
210 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
211 bool during_initial_mark,
212 bool during_marking) {
213 bool res = false;
214 if (during_marking) {
215 res |= G1EvacuationFailureALotDuringConcMark;
216 }
217 if (during_initial_mark) {
218 res |= G1EvacuationFailureALotDuringInitialMark;
219 }
220 if (gcs_are_young) {
221 res |= G1EvacuationFailureALotDuringYoungGC;
222 } else {
223 // GCs are mixed
224 res |= G1EvacuationFailureALotDuringMixedGC;
225 }
289 // update logging post-barrier, we don't maintain remembered set
290 // information for young gen objects.
291 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
292 return is_in_young(new_obj);
293 }
294
295 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
296 if (obj == NULL) {
297 return false;
298 }
299 return is_obj_dead(obj, heap_region_containing(obj));
300 }
301
302 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
303 if (obj == NULL) {
304 return false;
305 }
306 return is_obj_ill(obj, heap_region_containing(obj));
307 }
308
309 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
310 uint region = addr_to_region((HeapWord*)obj);
311 // We not only set the "live" flag in the humongous_is_live table, but also
312 // reset the entry in the _in_cset_fast_test table so that subsequent references
313 // to the same humongous object do not go into the slow path again.
314 // This is racy, as multiple threads may at the same time enter here, but this
315 // is benign.
316 // During collection we only ever set the "live" flag, and only ever clear the
317 // entry in the in_cset_fast_table.
318 // We only ever evaluate the contents of these tables (in the VM thread) after
319 // having synchronized the worker threads with the VM thread, or in the same
320 // thread (i.e. within the VM thread).
321 if (!_humongous_is_live.is_live(region)) {
322 _humongous_is_live.set_live(region);
323 _in_cset_fast_test.clear_humongous(region);
324 }
325 }
326
327 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
|