22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorState.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1EvacFailure.hpp"
31 #include "gc/g1/g1HeapVerifier.hpp"
32 #include "gc/g1/g1OopClosures.inline.hpp"
33 #include "gc/g1/g1_globals.hpp"
34 #include "gc/g1/heapRegion.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "gc/shared/preservedMarks.inline.hpp"
37 #include "oops/access.inline.hpp"
38 #include "oops/compressedOops.inline.hpp"
39
40 class UpdateRSetDeferred : public ExtendedOopClosure {
41 private:
42 G1CollectedHeap* _g1;
43 DirtyCardQueue* _dcq;
44 G1CardTable* _ct;
45
46 public:
47 UpdateRSetDeferred(DirtyCardQueue* dcq) :
48 _g1(G1CollectedHeap::heap()), _ct(_g1->card_table()), _dcq(dcq) {}
49
50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
51 virtual void do_oop( oop* p) { do_oop_work(p); }
52 template <class T> void do_oop_work(T* p) {
53 assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia");
54 assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
55
56 T const o = RawAccess<>::oop_load(p);
57 if (CompressedOops::is_null(o)) {
58 return;
59 }
60
61 if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
62 return;
63 }
64 size_t card_index = _ct->index_for(p);
65 if (_ct->mark_card_deferred(card_index)) {
66 _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
67 }
68 }
69 };
70
71 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
72 private:
73 G1CollectedHeap* _g1;
74 G1ConcurrentMark* _cm;
75 HeapRegion* _hr;
76 size_t _marked_bytes;
77 UpdateRSetDeferred* _update_rset_cl;
78 bool _during_initial_mark;
79 uint _worker_id;
80 HeapWord* _last_forwarded_object_end;
81
82 public:
83 RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
84 UpdateRSetDeferred* update_rset_cl,
85 bool during_initial_mark,
86 uint worker_id) :
87 _g1(G1CollectedHeap::heap()),
88 _cm(_g1->concurrent_mark()),
89 _hr(hr),
90 _marked_bytes(0),
91 _update_rset_cl(update_rset_cl),
92 _during_initial_mark(during_initial_mark),
93 _worker_id(worker_id),
94 _last_forwarded_object_end(hr->bottom()) { }
95
96 size_t marked_bytes() { return _marked_bytes; }
97
98 // Iterate over the live objects in the region to find self-forwarded objects
99 // that need to be kept live. We need to update the remembered sets of these
100 // objects. Further update the BOT and marks.
101 // We can coalesce and overwrite the remaining heap contents with dummy objects
102 // as they have either been dead or evacuated (which are unreferenced now, i.e.
103 // dead too) already.
104 void do_object(oop obj) {
105 HeapWord* obj_addr = (HeapWord*) obj;
106 assert(_hr->is_in(obj_addr), "sanity");
107
108 if (obj->is_forwarded() && obj->forwardee() == obj) {
|
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/dirtyCardQueue.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectorState.hpp"
29 #include "gc/g1/g1ConcurrentMark.inline.hpp"
30 #include "gc/g1/g1EvacFailure.hpp"
31 #include "gc/g1/g1HeapVerifier.hpp"
32 #include "gc/g1/g1OopClosures.inline.hpp"
33 #include "gc/g1/g1_globals.hpp"
34 #include "gc/g1/heapRegion.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "gc/shared/preservedMarks.inline.hpp"
37 #include "oops/access.inline.hpp"
38 #include "oops/compressedOops.inline.hpp"
39
40 class UpdateRSetDeferred : public ExtendedOopClosure {
41 private:
42 G1CollectedHeap* _g1h;
43 DirtyCardQueue* _dcq;
44 G1CardTable* _ct;
45
46 public:
47 UpdateRSetDeferred(DirtyCardQueue* dcq) :
48 _g1h(G1CollectedHeap::heap()), _ct(_g1h->card_table()), _dcq(dcq) {}
49
50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
51 virtual void do_oop( oop* p) { do_oop_work(p); }
52 template <class T> void do_oop_work(T* p) {
53 assert(_g1h->heap_region_containing(p)->is_in_reserved(p), "paranoia");
54 assert(!_g1h->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
55
56 T const o = RawAccess<>::oop_load(p);
57 if (CompressedOops::is_null(o)) {
58 return;
59 }
60
61 if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
62 return;
63 }
64 size_t card_index = _ct->index_for(p);
65 if (_ct->mark_card_deferred(card_index)) {
66 _dcq->enqueue((jbyte*)_ct->byte_for_index(card_index));
67 }
68 }
69 };
70
71 class RemoveSelfForwardPtrObjClosure: public ObjectClosure {
72 G1CollectedHeap* _g1h;
73 G1ConcurrentMark* _cm;
74 HeapRegion* _hr;
75 size_t _marked_bytes;
76 UpdateRSetDeferred* _update_rset_cl;
77 bool _during_initial_mark;
78 uint _worker_id;
79 HeapWord* _last_forwarded_object_end;
80
81 public:
82 RemoveSelfForwardPtrObjClosure(HeapRegion* hr,
83 UpdateRSetDeferred* update_rset_cl,
84 bool during_initial_mark,
85 uint worker_id) :
86 _g1h(G1CollectedHeap::heap()),
87 _cm(_g1h->concurrent_mark()),
88 _hr(hr),
89 _marked_bytes(0),
90 _update_rset_cl(update_rset_cl),
91 _during_initial_mark(during_initial_mark),
92 _worker_id(worker_id),
93 _last_forwarded_object_end(hr->bottom()) { }
94
95 size_t marked_bytes() { return _marked_bytes; }
96
97 // Iterate over the live objects in the region to find self-forwarded objects
98 // that need to be kept live. We need to update the remembered sets of these
99 // objects. Further update the BOT and marks.
100 // We can coalesce and overwrite the remaining heap contents with dummy objects
101 // as they have either been dead or evacuated (which are unreferenced now, i.e.
102 // dead too) already.
103 void do_object(oop obj) {
104 HeapWord* obj_addr = (HeapWord*) obj;
105 assert(_hr->is_in(obj_addr), "sanity");
106
107 if (obj->is_forwarded() && obj->forwardee() == obj) {
|