src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Print this page
rev 7107 : imported patch 8058298
rev 7109 : imported patch rev2


 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;
 180   HeapRegionClaimer* _hrclaimer;
 181 
 182   DirtyCardQueue _dcq;
 183   UpdateRSetDeferred _update_rset_cl;
 184 
 185 public:
 186   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 187                                 uint worker_id,
 188                                 HeapRegionClaimer* hrclaimer) :
 189       _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 190       _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {

 191   }
 192 
 193   bool doHeapRegion(HeapRegion *hr) {
 194     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 195     bool during_conc_mark = _g1h->mark_in_progress();
 196 
 197     assert(!hr->is_humongous(), "sanity");
 198     assert(hr->in_collection_set(), "bad CS");
 199 
 200     if (_hrclaimer->claim_region(hr->hrm_index())) {
 201       if (hr->evacuation_failed()) {
 202         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 203                                             during_initial_mark,
 204                                             during_conc_mark,
 205                                             _worker_id);
 206 
 207         hr->note_self_forwarding_removal_start(during_initial_mark,
 208                                                during_conc_mark);
 209         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 210 
 211         // In the common case (i.e. when there is no evacuation
 212         // failure) we make sure that the following is done when
 213         // the region is freed so that it is "ready-to-go" when it's
 214         // re-allocated. However, when evacuation failure happens, a
 215         // region will remain in the heap and might ultimately be added
 216         // to a CSet in the future. So we have to be careful here and
 217         // make sure the region's RSet is ready for parallel iteration
 218         // whenever this might be required in the future.
 219         hr->rem_set()->reset_for_par_iteration();
 220         hr->reset_bot();
 221         _update_rset_cl.set_region(hr);
 222         hr->object_iterate(&rspc);
 223 
 224         hr->rem_set()->clean_strong_code_roots(hr);
 225 
 226         hr->note_self_forwarding_removal_end(during_initial_mark,
 227                                              during_conc_mark,
 228                                              rspc.marked_bytes());
 229       }
 230     }
 231     return false;
 232   }
 233 };
 234 
 235 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 236 protected:
 237   G1CollectedHeap* _g1h;
 238   HeapRegionClaimer* _hrclaimer;
 239 
 240 public:
 241   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h, HeapRegionClaimer* hrclaimer) :
 242       AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h), _hrclaimer(hrclaimer) {}

 243 
 244   void work(uint worker_id) {
 245     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, _hrclaimer);
 246 
 247     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 248     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 249   }
 250 };
 251 
 252 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP


 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;
 180   HeapRegionClaimer* _hrclaimer;
 181 
 182   DirtyCardQueue _dcq;
 183   UpdateRSetDeferred _update_rset_cl;
 184 
 185 public:
 186   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 187                                 uint worker_id = 0,
 188                                 HeapRegionClaimer* hrclaimer = NULL) :
 189       _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 190       _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {
 191     assert(hrclaimer != NULL || worker_id == 0, "Must use a HeapRegionClaimer when used in parallel.");
 192   }
 193 
 194   bool doHeapRegion(HeapRegion *hr) {
 195     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 196     bool during_conc_mark = _g1h->mark_in_progress();
 197 
 198     assert(!hr->is_humongous(), "sanity");
 199     assert(hr->in_collection_set(), "bad CS");
 200 
 201     if (_hrclaimer == NULL || _hrclaimer->claim_region(hr->hrm_index())) {
 202       if (hr->evacuation_failed()) {
 203         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 204                                             during_initial_mark,
 205                                             during_conc_mark,
 206                                             _worker_id);
 207 
 208         hr->note_self_forwarding_removal_start(during_initial_mark,
 209                                                during_conc_mark);
 210         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 211 
 212         // In the common case (i.e. when there is no evacuation
 213         // failure) we make sure that the following is done when
 214         // the region is freed so that it is "ready-to-go" when it's
 215         // re-allocated. However, when evacuation failure happens, a
 216         // region will remain in the heap and might ultimately be added
 217         // to a CSet in the future. So we have to be careful here and
 218         // make sure the region's RSet is ready for parallel iteration
 219         // whenever this might be required in the future.
 220         hr->rem_set()->reset_for_par_iteration();
 221         hr->reset_bot();
 222         _update_rset_cl.set_region(hr);
 223         hr->object_iterate(&rspc);
 224 
 225         hr->rem_set()->clean_strong_code_roots(hr);
 226 
 227         hr->note_self_forwarding_removal_end(during_initial_mark,
 228                                              during_conc_mark,
 229                                              rspc.marked_bytes());
 230       }
 231     }
 232     return false;
 233   }
 234 };
 235 
 236 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 237 protected:
 238   G1CollectedHeap* _g1h;
 239   HeapRegionClaimer _hrclaimer;
 240 
 241 public:
 242   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
 243       AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h),
 244       _hrclaimer(g1h->workers()->active_workers()) {}
 245 
 246   void work(uint worker_id) {
 247     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer);
 248 
 249     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 250     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 251   }
 252 };
 253 
 254 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP