97 // compensate for such elided card-marks as follows:
98 // (a) Generational, non-concurrent collectors, such as
99 // GenCollectedHeap(DefNew,Tenured) and
100 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
101 // need the card-mark if and only if the region is
102 // in the old gen, and do not care if the card-mark
103 // succeeds or precedes the initializing stores themselves,
104 // so long as the card-mark is completed before the next
105 // scavenge. For all these cases, we can do a card mark
106 // at the point at which we do a slow path allocation
107 // in the old gen, i.e. in this call.
108 // (b) G1CollectedHeap(G1) uses two kinds of write barriers. When a
109 // G1 concurrent marking is in progress an SATB (pre-write-)barrier
110 // is used to remember the pre-value of any store. Initializing
111 // stores will not need this barrier, so we need not worry about
112 // compensating for the missing pre-barrier here. Turning now
113 // to the post-barrier, we note that G1 needs a RS update barrier
114 // which simply enqueues a (sequence of) dirty cards which may
115 // optionally be refined by the concurrent update threads. Note
116 // that this barrier need only be applied to a non-young write,
117 // but, like in CMS, because of the presence of concurrent refinement
118 // (much like CMS' precleaning), must strictly follow the oop-store.
119 // Thus, using the same protocol for maintaining the intended
120 // invariants turns out, serendepitously, to be the same for both
121 // G1 and CMS.
122 //
123 // For any future collector, this code should be reexamined with
124 // that specific collector in mind, and the documentation above suitably
125 // extended and updated.
126 void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
127 #if COMPILER2_OR_JVMCI
128 if (!ReduceInitialCardMarks) {
129 return;
130 }
131 // If a previous card-mark was deferred, flush it now.
132 flush_deferred_card_mark_barrier(thread);
133 if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
134 // Arrays of non-references don't need a post-barrier.
135 // The deferred_card_mark region should be empty
136 // following the flush above.
137 assert(thread->deferred_card_mark().is_empty(), "Error");
138 } else {
139 MemRegion mr((HeapWord*)new_obj, new_obj->size());
140 assert(!mr.is_empty(), "Error");
141 if (_defer_initial_card_mark) {
|
97 // compensate for such elided card-marks as follows:
98 // (a) Generational, non-concurrent collectors, such as
99 // GenCollectedHeap(DefNew,Tenured) and
100 // ParallelScavengeHeap(ParallelGC, ParallelOldGC)
101 // need the card-mark if and only if the region is
102 // in the old gen, and do not care if the card-mark
103 // succeeds or precedes the initializing stores themselves,
104 // so long as the card-mark is completed before the next
105 // scavenge. For all these cases, we can do a card mark
106 // at the point at which we do a slow path allocation
107 // in the old gen, i.e. in this call.
108 // (b) G1CollectedHeap(G1) uses two kinds of write barriers. When a
109 // G1 concurrent marking is in progress an SATB (pre-write-)barrier
110 // is used to remember the pre-value of any store. Initializing
111 // stores will not need this barrier, so we need not worry about
112 // compensating for the missing pre-barrier here. Turning now
113 // to the post-barrier, we note that G1 needs a RS update barrier
114 // which simply enqueues a (sequence of) dirty cards which may
115 // optionally be refined by the concurrent update threads. Note
116 // that this barrier need only be applied to a non-young write,
117 // but, because of the presence of concurrent refinement,
118 // must strictly follow the oop-store.
119 //
120 // For any future collector, this code should be reexamined with
121 // that specific collector in mind, and the documentation above suitably
122 // extended and updated.
123 void CardTableBarrierSet::on_slowpath_allocation_exit(JavaThread* thread, oop new_obj) {
124 #if COMPILER2_OR_JVMCI
125 if (!ReduceInitialCardMarks) {
126 return;
127 }
128 // If a previous card-mark was deferred, flush it now.
129 flush_deferred_card_mark_barrier(thread);
130 if (new_obj->is_typeArray() || _card_table->is_in_young(new_obj)) {
131 // Arrays of non-references don't need a post-barrier.
132 // The deferred_card_mark region should be empty
133 // following the flush above.
134 assert(thread->deferred_card_mark().is_empty(), "Error");
135 } else {
136 MemRegion mr((HeapWord*)new_obj, new_obj->size());
137 assert(!mr.is_empty(), "Error");
138 if (_defer_initial_card_mark) {
|