15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
27
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1_globals.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/heapRegion.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "utilities/workgroup.hpp"
36
37 // Closures and tasks associated with any self-forwarding pointers
38 // installed as a result of an evacuation failure.
39
40 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
41 private:
42 G1CollectedHeap* _g1;
43 DirtyCardQueue *_dcq;
44 G1SATBCardTableModRefBS* _ct_bs;
45
46 public:
47 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
48 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
49
50 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
51 virtual void do_oop( oop* p) { do_oop_work(p); }
52 template <class T> void do_oop_work(T* p) {
53 assert(_from->is_in_reserved(p), "paranoia");
54 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
103 // of TLAB_2 are coalesced, then the cards of the unrefined part
104 // would point into middle of the filler object.
105 // The current approach is to not coalesce and leave the BOT contents intact.
106 // </original comment>
107 //
108 // We now reset the BOT when we start the object iteration over the
109 // region and refine its entries for every object we come across. So
110 // the above comment is not really relevant and we should be able
111 // to coalesce dead objects if we want to.
112 void do_object(oop obj) {
113 HeapWord* obj_addr = (HeapWord*) obj;
114 assert(_hr->is_in(obj_addr), "sanity");
115 size_t obj_size = obj->size();
116 HeapWord* obj_end = obj_addr + obj_size;
117
118 if (_end_of_last_gap != obj_addr) {
119 // there was a gap before obj_addr
120 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
121 }
122
123 if (obj->is_forwarded() && obj->forwardee() == obj) {
124 // The object failed to move.
125
126 // We consider all objects that we find self-forwarded to be
127 // live. What we'll do is that we'll update the prev marking
128 // info so that they are all under PTAMS and explicitly marked.
129 if (!_cm->isPrevMarked(obj)) {
130 _cm->markPrev(obj);
131 }
132 if (_during_initial_mark) {
133 // For the next marking info we'll only mark the
134 // self-forwarded objects explicitly if we are during
135 // initial-mark (since, normally, we only mark objects pointed
136 // to by roots if we succeed in copying them). By marking all
137 // self-forwarded objects we ensure that we mark any that are
138 // still pointed to be roots. During concurrent marking, and
139 // after initial-mark, we don't need to mark any objects
140 // explicitly and all objects in the CSet are considered
141 // (implicitly) live. So, we won't mark them explicitly and
142 // we'll leave them over NTAMS.
143 _cm->grayRoot(obj, obj_size, _worker_id, _hr);
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP
27
28 #include "gc_implementation/g1/concurrentMark.inline.hpp"
29 #include "gc_implementation/g1/dirtyCardQueue.hpp"
30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
31 #include "gc_implementation/g1/g1_globals.hpp"
32 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
33 #include "gc_implementation/g1/heapRegion.hpp"
34 #include "gc_implementation/g1/heapRegionRemSet.hpp"
35 #include "oops/markOop.inline.hpp"
36 #include "utilities/workgroup.hpp"
37
38 // Closures and tasks associated with any self-forwarding pointers
39 // installed as a result of an evacuation failure.
40
41 class UpdateRSetDeferred : public OopsInHeapRegionClosure {
42 private:
43 G1CollectedHeap* _g1;
44 DirtyCardQueue *_dcq;
45 G1SATBCardTableModRefBS* _ct_bs;
46
47 public:
48 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) :
49 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {}
50
51 virtual void do_oop(narrowOop* p) { do_oop_work(p); }
52 virtual void do_oop( oop* p) { do_oop_work(p); }
53 template <class T> void do_oop_work(T* p) {
54 assert(_from->is_in_reserved(p), "paranoia");
55 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) &&
104 // of TLAB_2 are coalesced, then the cards of the unrefined part
105 // would point into middle of the filler object.
106 // The current approach is to not coalesce and leave the BOT contents intact.
107 // </original comment>
108 //
109 // We now reset the BOT when we start the object iteration over the
110 // region and refine its entries for every object we come across. So
111 // the above comment is not really relevant and we should be able
112 // to coalesce dead objects if we want to.
113 void do_object(oop obj) {
114 HeapWord* obj_addr = (HeapWord*) obj;
115 assert(_hr->is_in(obj_addr), "sanity");
116 size_t obj_size = obj->size();
117 HeapWord* obj_end = obj_addr + obj_size;
118
119 if (_end_of_last_gap != obj_addr) {
120 // there was a gap before obj_addr
121 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr);
122 }
123
124 markOop m = obj->mark();
125 if (m->is_marked() && ((oop)m->decode_pointer() == obj)) {
126
127 // The object failed to move.
128
129 // We consider all objects that we find self-forwarded to be
130 // live. What we'll do is that we'll update the prev marking
131 // info so that they are all under PTAMS and explicitly marked.
132 if (!_cm->isPrevMarked(obj)) {
133 _cm->markPrev(obj);
134 }
135 if (_during_initial_mark) {
136 // For the next marking info we'll only mark the
137 // self-forwarded objects explicitly if we are during
138 // initial-mark (since, normally, we only mark objects pointed
139 // to by roots if we succeed in copying them). By marking all
140 // self-forwarded objects we ensure that we mark any that are
141 // still pointed to be roots. During concurrent marking, and
142 // after initial-mark, we don't need to mark any objects
143 // explicitly and all objects in the CSet are considered
144 // (implicitly) live. So, we won't mark them explicitly and
145 // we'll leave them over NTAMS.
146 _cm->grayRoot(obj, obj_size, _worker_id, _hr);
|