57 // According to SATB, such objects are implicitly kept live and do
58 // not need to be dealt with via SATB buffer processing.
59 //
60 // * A reference to a young generation object. Young objects are
61 // handled separately and are not marked by concurrent marking.
62 //
63 // * A stale reference to a young generation object. If a young
64 // generation object reference is recorded and not filtered out
65 // before being moved by a young collection, the reference becomes
66 // stale.
67 //
68 // * A stale reference to an eagerly reclaimed humongous object. If a
69 // humongous object is recorded and then reclaimed, the reference
70 // becomes stale.
71 //
72 // The stale reference cases are implicitly handled by the NTAMS
73 // comparison. Because of the possibility of stale references, buffer
74 // processing must be somewhat circumspect and not assume entries
75 // in an unfiltered buffer refer to valid objects.
76
77 inline bool requires_marking(const void* entry, G1CollectedHeap* heap) {
78 // Includes rejection of NULL pointers.
79 assert(heap->is_in_reserved(entry),
80 err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry)));
81
82 HeapRegion* region = heap->heap_region_containing_raw(entry);
83 assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry)));
84 if (entry >= region->next_top_at_mark_start()) {
85 return false;
86 }
87
88 assert(((oop)entry)->is_oop(true /* ignore mark word */),
89 err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry)));
90
91 return true;
92 }
93
94 // This method removes entries from a SATB buffer that will not be
95 // useful to the concurrent marking threads. Entries are retained if
96 // they require marking and are not already marked. Retained entries
97 // are compacted toward the top of the buffer.
98
99 void ObjPtrQueue::filter() {
100 G1CollectedHeap* g1h = G1CollectedHeap::heap();
101 void** buf = _buf;
102 size_t sz = _sz;
103
104 if (buf == NULL) {
105 // nothing to do
106 return;
107 }
108
109 // Used for sanity checking at the end of the loop.
110 debug_only(size_t entries = 0; size_t retained = 0;)
111
112 size_t i = sz;
113 size_t new_index = sz;
114
115 while (i > _index) {
116 assert(i > 0, "we should have at least one more entry to process");
117 i -= oopSize;
118 debug_only(entries += 1;)
119 void** p = &buf[byte_index_to_index((int) i)];
120 void* entry = *p;
121 // NULL the entry so that unused parts of the buffer contain NULLs
122 // at the end. If we are going to retain it we will copy it to its
123 // final place. If we have retained all entries we have visited so
124 // far, we'll just end up copying it to the same place.
125 *p = NULL;
126
127 if (requires_marking(entry, g1h) && !g1h->isMarkedNext((oop)entry)) {
128 assert(new_index > 0, "we should not have already filled up the buffer");
129 new_index -= oopSize;
130 assert(new_index >= i,
131 "new_index should never be below i, as we always compact 'up'");
132 void** new_p = &buf[byte_index_to_index((int) new_index)];
133 assert(new_p >= p, "the destination location should never be below "
134 "the source as we always compact 'up'");
135 assert(*new_p == NULL,
136 "we should have already cleared the destination location");
137 *new_p = entry;
138 debug_only(retained += 1;)
139 }
140 }
141
142 #ifdef ASSERT
143 size_t entries_calc = (sz - _index) / oopSize;
144 assert(entries == entries_calc, "the number of entries we counted "
145 "should match the number of entries we calculated");
146 size_t retained_calc = (sz - new_index) / oopSize;
147 assert(retained == retained_calc, "the number of retained entries we counted "
|
57 // According to SATB, such objects are implicitly kept live and do
58 // not need to be dealt with via SATB buffer processing.
59 //
60 // * A reference to a young generation object. Young objects are
61 // handled separately and are not marked by concurrent marking.
62 //
63 // * A stale reference to a young generation object. If a young
64 // generation object reference is recorded and not filtered out
65 // before being moved by a young collection, the reference becomes
66 // stale.
67 //
68 // * A stale reference to an eagerly reclaimed humongous object. If a
69 // humongous object is recorded and then reclaimed, the reference
70 // becomes stale.
71 //
72 // The stale reference cases are implicitly handled by the NTAMS
73 // comparison. Because of the possibility of stale references, buffer
74 // processing must be somewhat circumspect and not assume entries
75 // in an unfiltered buffer refer to valid objects.
76
77 // This method removes entries from a SATB buffer that will not be
78 // useful to the concurrent marking threads. Entries are retained if
79 // they require marking and are not already marked. Retained entries
80 // are compacted toward the top of the buffer.
81
82 void ObjPtrQueue::filter() {
83 CollectedHeap* heap = Universe::heap();
84 void** buf = _buf;
85 size_t sz = _sz;
86
87 if (buf == NULL) {
88 // nothing to do
89 return;
90 }
91
92 // Used for sanity checking at the end of the loop.
93 debug_only(size_t entries = 0; size_t retained = 0;)
94
95 size_t i = sz;
96 size_t new_index = sz;
97
98 while (i > _index) {
99 assert(i > 0, "we should have at least one more entry to process");
100 i -= oopSize;
101 debug_only(entries += 1;)
102 void** p = &buf[byte_index_to_index((int) i)];
103 void* entry = *p;
104 // NULL the entry so that unused parts of the buffer contain NULLs
105 // at the end. If we are going to retain it we will copy it to its
106 // final place. If we have retained all entries we have visited so
107 // far, we'll just end up copying it to the same place.
108 *p = NULL;
109
110 bool retain = heap->is_obj_ill(oop(entry));
111 if (retain) {
112 assert(new_index > 0, "we should not have already filled up the buffer");
113 new_index -= oopSize;
114 assert(new_index >= i,
115 "new_index should never be below i, as we always compact 'up'");
116 void** new_p = &buf[byte_index_to_index((int) new_index)];
117 assert(new_p >= p, "the destination location should never be below "
118 "the source as we always compact 'up'");
119 assert(*new_p == NULL,
120 "we should have already cleared the destination location");
121 *new_p = entry;
122 debug_only(retained += 1;)
123 }
124 }
125
126 #ifdef ASSERT
127 size_t entries_calc = (sz - _index) / oopSize;
128 assert(entries == entries_calc, "the number of entries we counted "
129 "should match the number of entries we calculated");
130 size_t retained_calc = (sz - new_index) / oopSize;
131 assert(retained == retained_calc, "the number of retained entries we counted "
|