13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
27
28 #include "gc/g1/g1ParScanThreadState.hpp"
29 #include "gc/g1/g1RemSet.hpp"
30 #include "oops/access.inline.hpp"
31 #include "oops/oop.inline.hpp"
32
33 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
34 // Reference should not be NULL here as such are never pushed to the task queue.
35 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
36
37 // Although we never intentionally push references outside of the collection
38 // set, due to (benign) races in the claim mechanism during RSet scanning more
39 // than one thread might claim the same card. So the same card may be
40 // processed multiple times. So redo this check.
41 const InCSetState in_cset_state = _g1h->in_cset_state(obj);
42 if (in_cset_state.is_in_cset()) {
43 markOop m = obj->mark();
44 if (m->is_marked()) {
45 obj = (oop) m->decode_pointer();
46 } else {
47 obj = copy_to_survivor_space(in_cset_state, obj, m);
48 }
49 RawAccess<>::oop_store(p, obj);
50 } else if (in_cset_state.is_humongous()) {
51 _g1h->set_humongous_is_live(obj);
52 } else {
53 assert(in_cset_state.is_default(),
54 "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value());
55 }
56
57 assert(obj != NULL, "Must be");
58 if (!HeapRegion::is_in_same_region(p, obj)) {
59 update_rs(from, p, obj);
60 }
61 }
62
63 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
64 assert(verify_ref(ref), "sanity");
65 _refs->push(ref);
66 }
67
68 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
69 assert(has_partial_array_mask(p), "invariant");
70 oop from_obj = clear_partial_array_mask(p);
71
72 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
73 assert(from_obj->is_objArray(), "must be obj array");
74 objArrayOop from_obj_array = objArrayOop(from_obj);
75 // The from-space object contains the real length.
76 int length = from_obj_array->length();
77
78 assert(from_obj->is_forwarded(), "must be forwarded");
97 oop* from_obj_p = set_partial_array_mask(from_obj);
98 push_on_queue(from_obj_p);
99 } else {
100 assert(length == end, "sanity");
101 // We'll process the final range for this object. Restore the length
102 // so that the heap remains parsable in case of evacuation failure.
103 to_obj_array->set_length(end);
104 }
105 _scanner.set_region(_g1h->heap_region_containing(to_obj));
106 // Process indexes [start,end). It will also process the header
107 // along with the first chunk (i.e., the chunk with start == 0).
108 // Note that at this point the length field of to_obj_array is not
109 // correct given that we are using it to keep track of the next
110 // start index. oop_iterate_range() (thankfully!) ignores the length
111 // field and only relies on the start / end parameters. It does
112 // however return the size of the object which will be incorrect. So
113 // we have to ignore it even if we wanted to use it.
114 to_obj_array->oop_iterate_range(&_scanner, start, end);
115 }
116
117 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
118 if (!has_partial_array_mask(ref_to_scan)) {
119 HeapRegion* r = _g1h->heap_region_containing(ref_to_scan);
120 do_oop_evac(ref_to_scan, r);
121 } else {
122 do_oop_partial_array((oop*)ref_to_scan);
123 }
124 }
125
126 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
127 assert(verify_task(ref), "sanity");
128 if (ref.is_narrow()) {
129 deal_with_reference((narrowOop*)ref);
130 } else {
131 deal_with_reference((oop*)ref);
132 }
133 }
134
135 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
136 StarTask stolen_task;
137 while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
138 assert(verify_task(stolen_task), "sanity");
139 dispatch_reference(stolen_task);
140
141 // We've just processed a reference and we might have made
142 // available new entries on the queues. So we have to make sure
143 // we drain the queues as necessary.
|
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
27
28 #include "gc/g1/g1ParScanThreadState.hpp"
29 #include "gc/g1/g1RemSet.hpp"
30 #include "oops/access.inline.hpp"
31 #include "oops/oop.inline.hpp"
32
33 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
34 // Reference should not be NULL here as such are never pushed to the task queue.
35 oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
36
37 // Although we never intentionally push references outside of the collection
38 // set, due to (benign) races in the claim mechanism during RSet scanning more
39 // than one thread might claim the same card. So the same card may be
40 // processed multiple times, and so we might get references into old gen here.
41 // So we need to redo this check.
42 const InCSetState in_cset_state = _g1h->in_cset_state(obj);
43 if (in_cset_state.is_in_cset()) {
44 markOop m = obj->mark();
45 if (m->is_marked()) {
46 obj = (oop) m->decode_pointer();
47 } else {
48 obj = copy_to_survivor_space(in_cset_state, obj, m);
49 }
50 RawAccess<OOP_NOT_NULL>::oop_store(p, obj);
51 } else if (in_cset_state.is_humongous()) {
52 _g1h->set_humongous_is_live(obj);
53 } else {
54 assert(in_cset_state.is_default(),
55 "In_cset_state must be NotInCSet here, but is " CSETSTATE_FORMAT, in_cset_state.value());
56 }
57
58 assert(obj != NULL, "Must be");
59 if (!HeapRegion::is_in_same_region(p, obj)) {
60 HeapRegion* from = _g1h->heap_region_containing(p);
61 update_rs(from, p, obj);
62 }
63 }
64
65 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
66 assert(verify_ref(ref), "sanity");
67 _refs->push(ref);
68 }
69
70 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
71 assert(has_partial_array_mask(p), "invariant");
72 oop from_obj = clear_partial_array_mask(p);
73
74 assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
75 assert(from_obj->is_objArray(), "must be obj array");
76 objArrayOop from_obj_array = objArrayOop(from_obj);
77 // The from-space object contains the real length.
78 int length = from_obj_array->length();
79
80 assert(from_obj->is_forwarded(), "must be forwarded");
99 oop* from_obj_p = set_partial_array_mask(from_obj);
100 push_on_queue(from_obj_p);
101 } else {
102 assert(length == end, "sanity");
103 // We'll process the final range for this object. Restore the length
104 // so that the heap remains parsable in case of evacuation failure.
105 to_obj_array->set_length(end);
106 }
107 _scanner.set_region(_g1h->heap_region_containing(to_obj));
108 // Process indexes [start,end). It will also process the header
109 // along with the first chunk (i.e., the chunk with start == 0).
110 // Note that at this point the length field of to_obj_array is not
111 // correct given that we are using it to keep track of the next
112 // start index. oop_iterate_range() (thankfully!) ignores the length
113 // field and only relies on the start / end parameters. It does
114 // however return the size of the object which will be incorrect. So
115 // we have to ignore it even if we wanted to use it.
116 to_obj_array->oop_iterate_range(&_scanner, start, end);
117 }
118
119 inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) {
120 if (!has_partial_array_mask(ref_to_scan)) {
121 do_oop_evac(ref_to_scan);
122 } else {
123 do_oop_partial_array(ref_to_scan);
124 }
125 }
126
127 inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) {
128 assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays.");
129 do_oop_evac(ref_to_scan);
130 }
131
132 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
133 assert(verify_task(ref), "sanity");
134 if (ref.is_narrow()) {
135 deal_with_reference((narrowOop*)ref);
136 } else {
137 deal_with_reference((oop*)ref);
138 }
139 }
140
141 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
142 StarTask stolen_task;
143 while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
144 assert(verify_task(stolen_task), "sanity");
145 dispatch_reference(stolen_task);
146
147 // We've just processed a reference and we might have made
148 // available new entries on the queues. So we have to make sure
149 // we drain the queues as necessary.
|