35 }
36 }
37
38 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
39 if (G1DeferredRSUpdate) {
40 deferred_rs_update(from, p, tid);
41 } else {
42 immediate_rs_update(from, p, tid);
43 }
44 }
45
46 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
47 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
48 "Reference should not be NULL here as such are never pushed to the task queue.");
49 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
50
51 // Although we never intentionally push references outside of the collection
52 // set, due to (benign) races in the claim mechanism during RSet scanning more
53 // than one thread might claim the same card. So the same card may be
54 // processed multiple times. So redo this check.
55 if (_g1h->is_in_cset_or_humongous(obj)) {
56 oop forwardee;
57 if (obj->is_forwarded()) {
58 forwardee = obj->forwardee();
59 } else {
60 forwardee = copy_to_survivor_space(obj);
61 }
62 if (forwardee != NULL) {
63 oopDesc::encode_store_heap_oop(p, forwardee);
64 }
65 }
66
67 assert(obj != NULL, "Must be");
68 update_rs(from, p, queue_num());
69 }
70
71 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
72 assert(has_partial_array_mask(p), "invariant");
73 oop from_obj = clear_partial_array_mask(p);
74
75 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
76 assert(from_obj->is_objArray(), "must be obj array");
77 objArrayOop from_obj_array = objArrayOop(from_obj);
78 // The from-space object contains the real length.
79 int length = from_obj_array->length();
80
81 assert(from_obj->is_forwarded(), "must be forwarded");
82 oop to_obj = from_obj->forwardee();
83 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
84 objArrayOop to_obj_array = objArrayOop(to_obj);
|
35 }
36 }
37
38 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
39 if (G1DeferredRSUpdate) {
40 deferred_rs_update(from, p, tid);
41 } else {
42 immediate_rs_update(from, p, tid);
43 }
44 }
45
46 template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
47 assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
48 "Reference should not be NULL here as such are never pushed to the task queue.");
49 oop obj = oopDesc::load_decode_heap_oop_not_null(p);
50
51 // Although we never intentionally push references outside of the collection
52 // set, due to (benign) races in the claim mechanism during RSet scanning more
53 // than one thread might claim the same card. So the same card may be
54 // processed multiple times. So redo this check.
55 G1FastCSetBiasedMappedArray::in_cset_state_t in_cset_state = _g1h->in_cset_state(obj);
56 if (in_cset_state == G1FastCSetBiasedMappedArray::InCSet) {
57 oop forwardee;
58 if (obj->is_forwarded()) {
59 forwardee = obj->forwardee();
60 } else {
61 forwardee = copy_to_survivor_space(obj);
62 }
63 oopDesc::encode_store_heap_oop(p, forwardee);
64 } else if (in_cset_state == G1FastCSetBiasedMappedArray::IsHumongous) {
65 _g1h->set_humongous_is_live(obj);
66 } else {
67 assert(in_cset_state == G1FastCSetBiasedMappedArray::InNeither,
68 err_msg("In_cset_state must be InNeither here, but is %d", in_cset_state));
69 }
70
71 assert(obj != NULL, "Must be");
72 update_rs(from, p, queue_num());
73 }
74
75 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
76 assert(has_partial_array_mask(p), "invariant");
77 oop from_obj = clear_partial_array_mask(p);
78
79 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
80 assert(from_obj->is_objArray(), "must be obj array");
81 objArrayOop from_obj_array = objArrayOop(from_obj);
82 // The from-space object contains the real length.
83 int length = from_obj_array->length();
84
85 assert(from_obj->is_forwarded(), "must be forwarded");
86 oop to_obj = from_obj->forwardee();
87 assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
88 objArrayOop to_obj_array = objArrayOop(to_obj);
|