170 "Should not clear heap region %u in the collection set", hrm_index());
171
172 set_allocation_context(AllocationContext::system());
173 set_young_index_in_cset(-1);
174 uninstall_surv_rate_group();
175 set_free();
176 reset_pre_dummy_top();
177
178 if (!par) {
179 // If this is parallel, this will be done later.
180 HeapRegionRemSet* hrrs = rem_set();
181 if (locked) {
182 hrrs->clear_locked();
183 } else {
184 hrrs->clear();
185 }
186 }
187 zero_marked_bytes();
188
189 init_top_at_mark_start();
190 if (clear_space) clear(SpaceDecorator::Mangle);
191 }
192
193 void HeapRegion::par_clear() {
194 assert(used() == 0, "the region should have been already cleared");
195 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
196 HeapRegionRemSet* hrrs = rem_set();
197 hrrs->clear();
198 CardTableModRefBS* ct_bs =
199 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
200 ct_bs->clear(MemRegion(bottom(), end()));
201 }
202
203 void HeapRegion::calc_gc_efficiency() {
204 // GC efficiency is the ratio of how much space would be
205 // reclaimed over how long we predict it would take to reclaim it.
206 G1CollectedHeap* g1h = G1CollectedHeap::heap();
207 G1CollectorPolicy* g1p = g1h->g1_policy();
208
209 // Retrieve a prediction of the elapsed time for this region for
1027 HeapWord* end) {
1028 _bot_part.alloc_block(start, end);
1029 return _bot_part.threshold();
1030 }
1031
1032 HeapWord* G1ContiguousSpace::scan_top() const {
1033 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1034 HeapWord* local_top = top();
1035 OrderAccess::loadload();
1036 const unsigned local_time_stamp = _gc_time_stamp;
1037 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
1038 if (local_time_stamp < g1h->get_gc_time_stamp()) {
1039 return local_top;
1040 } else {
1041 return _scan_top;
1042 }
1043 }
1044
1045 void G1ContiguousSpace::record_timestamp() {
1046 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1047 unsigned curr_gc_time_stamp = g1h->get_gc_time_stamp();
1048
1049 if (_gc_time_stamp < curr_gc_time_stamp) {
1050 // Setting the time stamp here tells concurrent readers to look at
1051 // scan_top to know the maximum allowed address to look at.
1052
1053 // scan_top should be bottom for all regions except for the
1054 // retained old alloc region which should have scan_top == top
1055 HeapWord* st = _scan_top;
1056 guarantee(st == _bottom || st == _top, "invariant");
1057
1058 _gc_time_stamp = curr_gc_time_stamp;
1059 }
1060 }
1061
1062 void G1ContiguousSpace::record_retained_region() {
1063 // scan_top is the maximum address where it's safe for the next gc to
1064 // scan this region.
1065 _scan_top = top();
1066 }
1067
|
170 "Should not clear heap region %u in the collection set", hrm_index());
171
172 set_allocation_context(AllocationContext::system());
173 set_young_index_in_cset(-1);
174 uninstall_surv_rate_group();
175 set_free();
176 reset_pre_dummy_top();
177
178 if (!par) {
179 // If this is parallel, this will be done later.
180 HeapRegionRemSet* hrrs = rem_set();
181 if (locked) {
182 hrrs->clear_locked();
183 } else {
184 hrrs->clear();
185 }
186 }
187 zero_marked_bytes();
188
189 init_top_at_mark_start();
190 _gc_time_stamp = G1CollectedHeap::heap()->get_gc_time_stamp();
191 if (clear_space) clear(SpaceDecorator::Mangle);
192 }
193
194 void HeapRegion::par_clear() {
195 assert(used() == 0, "the region should have been already cleared");
196 assert(capacity() == HeapRegion::GrainBytes, "should be back to normal");
197 HeapRegionRemSet* hrrs = rem_set();
198 hrrs->clear();
199 CardTableModRefBS* ct_bs =
200 barrier_set_cast<CardTableModRefBS>(G1CollectedHeap::heap()->barrier_set());
201 ct_bs->clear(MemRegion(bottom(), end()));
202 }
203
204 void HeapRegion::calc_gc_efficiency() {
205 // GC efficiency is the ratio of how much space would be
206 // reclaimed over how long we predict it would take to reclaim it.
207 G1CollectedHeap* g1h = G1CollectedHeap::heap();
208 G1CollectorPolicy* g1p = g1h->g1_policy();
209
210 // Retrieve a prediction of the elapsed time for this region for
1028 HeapWord* end) {
1029 _bot_part.alloc_block(start, end);
1030 return _bot_part.threshold();
1031 }
1032
1033 HeapWord* G1ContiguousSpace::scan_top() const {
1034 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1035 HeapWord* local_top = top();
1036 OrderAccess::loadload();
1037 const unsigned local_time_stamp = _gc_time_stamp;
1038 assert(local_time_stamp <= g1h->get_gc_time_stamp(), "invariant");
1039 if (local_time_stamp < g1h->get_gc_time_stamp()) {
1040 return local_top;
1041 } else {
1042 return _scan_top;
1043 }
1044 }
1045
1046 void G1ContiguousSpace::record_timestamp() {
1047 G1CollectedHeap* g1h = G1CollectedHeap::heap();
1048 uint curr_gc_time_stamp = g1h->get_gc_time_stamp();
1049
1050 if (_gc_time_stamp < curr_gc_time_stamp) {
1051 // Setting the time stamp here tells concurrent readers to look at
1052 // scan_top to know the maximum allowed address to look at.
1053
1054 // scan_top should be bottom for all regions except for the
1055 // retained old alloc region which should have scan_top == top
1056 HeapWord* st = _scan_top;
1057 guarantee(st == _bottom || st == _top, "invariant");
1058
1059 _gc_time_stamp = curr_gc_time_stamp;
1060 }
1061 }
1062
1063 void G1ContiguousSpace::record_retained_region() {
1064 // scan_top is the maximum address where it's safe for the next gc to
1065 // scan this region.
1066 _scan_top = top();
1067 }
1068
|