26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
27
28 #include "gc_implementation/g1/g1ParScanThreadState.hpp"
29 #include "gc_implementation/g1/g1RemSet.inline.hpp"
30 #include "oops/oop.inline.hpp"
31
32 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
33 if (!from->is_survivor()) {
34 _g1_rem->par_write_ref(from, p, tid);
35 }
36 }
37
38 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
39 if (G1DeferredRSUpdate) {
40 deferred_rs_update(from, p, tid);
41 } else {
42 immediate_rs_update(from, p, tid);
43 }
44 }
45
46 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
47 assert(has_partial_array_mask(p), "invariant");
48 oop from_obj = clear_partial_array_mask(p);
49
50 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
51 assert(from_obj->is_objArray(), "must be obj array");
52 objArrayOop from_obj_array = objArrayOop(from_obj);
53 // The from-space object contains the real length.
54 int length = from_obj_array->length();
55
56 assert(from_obj->is_forwarded(), "must be forwarded");
57 oop to_obj = from_obj->forwardee();
58 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
59 objArrayOop to_obj_array = objArrayOop(to_obj);
60 // We keep track of the next start index in the length field of the
61 // to-space object.
62 int next_index = to_obj_array->length();
63 assert(0 <= next_index && next_index < length,
64 err_msg("invariant, next index: %d, length: %d", next_index, length));
65
87 // correct given that we are using it to keep track of the next
88 // start index. oop_iterate_range() (thankfully!) ignores the length
89 // field and only relies on the start / end parameters. It does
90 // however return the size of the object which will be incorrect. So
91 // we have to ignore it even if we wanted to use it.
92 to_obj_array->oop_iterate_range(&_scanner, start, end);
93 }
94
95 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
96 if (!has_partial_array_mask(ref_to_scan)) {
97 // Note: we can use "raw" versions of "region_containing" because
98 // "obj_to_scan" is definitely in the heap, and is not in a
99 // humongous region.
100 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
101 do_oop_evac(ref_to_scan, r);
102 } else {
103 do_oop_partial_array((oop*)ref_to_scan);
104 }
105 }
106
107 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
108 assert(verify_task(ref), "sanity");
109 if (ref.is_narrow()) {
110 deal_with_reference((narrowOop*)ref);
111 } else {
112 deal_with_reference((oop*)ref);
113 }
114 }
115
116 #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
117
|
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP
27
28 #include "gc_implementation/g1/g1ParScanThreadState.hpp"
29 #include "gc_implementation/g1/g1RemSet.inline.hpp"
30 #include "oops/oop.inline.hpp"
31
32 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
33 if (!from->is_survivor()) {
34 _g1_rem->par_write_ref(from, p, tid);
35 }
36 }
37
38 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
39 if (G1DeferredRSUpdate) {
40 deferred_rs_update(from, p, tid);
41 } else {
42 immediate_rs_update(from, p, tid);
43 }
44 }
45
46 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
47 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
48 "Reference should not be NULL here as such are never pushed to the task queue.");
49 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
50
51 // Although we never intentionally push references outside of the collection
52 // set, due to (benign) races in the claim mechanism during RSet scanning more
53 // than one thread might claim the same card. So the same card may be
54 // processed multiple times. So redo this check.
55 if (_g1h->in_cset_fast_test(obj)) {
56 oop forwardee;
57 if (obj->is_forwarded()) {
58 forwardee = obj->forwardee();
59 } else {
60 forwardee = copy_to_survivor_space(obj);
61 }
62 assert(forwardee != NULL, "forwardee should not be NULL");
63 oopDesc::encode_store_heap_oop(p, forwardee);
64 }
65
66 assert(obj != NULL, "Must be");
67 update_rs(from, p, queue_num());
68 }
69
70 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
71 assert(has_partial_array_mask(p), "invariant");
72 oop from_obj = clear_partial_array_mask(p);
73
74 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
75 assert(from_obj->is_objArray(), "must be obj array");
76 objArrayOop from_obj_array = objArrayOop(from_obj);
77 // The from-space object contains the real length.
78 int length = from_obj_array->length();
79
80 assert(from_obj->is_forwarded(), "must be forwarded");
81 oop to_obj = from_obj->forwardee();
82 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
83 objArrayOop to_obj_array = objArrayOop(to_obj);
84 // We keep track of the next start index in the length field of the
85 // to-space object.
86 int next_index = to_obj_array->length();
87 assert(0 <= next_index && next_index < length,
88 err_msg("invariant, next index: %d, length: %d", next_index, length));
89
111 // correct given that we are using it to keep track of the next
112 // start index. oop_iterate_range() (thankfully!) ignores the length
113 // field and only relies on the start / end parameters. It does
114 // however return the size of the object which will be incorrect. So
115 // we have to ignore it even if we wanted to use it.
116 to_obj_array->oop_iterate_range(&_scanner, start, end);
117 }
118
119 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
120 if (!has_partial_array_mask(ref_to_scan)) {
121 // Note: we can use "raw" versions of "region_containing" because
122 // "obj_to_scan" is definitely in the heap, and is not in a
123 // humongous region.
124 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
125 do_oop_evac(ref_to_scan, r);
126 } else {
127 do_oop_partial_array((oop*)ref_to_scan);
128 }
129 }
130
131 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
132 assert(verify_task(ref), "sanity");
133 if (ref.is_narrow()) {
134 deal_with_reference((narrowOop*)ref);
135 } else {
136 deal_with_reference((oop*)ref);
137 }
138 }
139
140 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
141 StarTask stolen_task;
142 while (task_queues->steal(queue_num(), hash_seed(), stolen_task)) {
143 assert(verify_task(stolen_task), "sanity");
144 dispatch_reference(stolen_task);
145
146 // We've just processed a reference and we might have made
147 // available new entries on the queues. So we have to make sure
148 // we drain the queues as necessary.
149 trim_queue();
150 }
151 }
152
153 #endif /* SHARE_VM_GC_IMPLEMENTATION_G1_G1PARSCANTHREADSTATE_INLINE_HPP */
154
|