src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Print this page
rev 7056 : [mq]: 8058298


 160       obj->oop_iterate(_update_rset_cl);
 161     } else {
 162 
 163       // The object has been either evacuated or is dead. Fill it with a
 164       // dummy object.
 165       MemRegion mr(obj_addr, obj_size);
 166       CollectedHeap::fill_with_object(mr);
 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;

 180 
 181   DirtyCardQueue _dcq;
 182   UpdateRSetDeferred _update_rset_cl;
 183 
 184 public:
 185   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 186                                 uint worker_id) :

 187     _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 188     _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {
 189     }
 190 
 191   bool doHeapRegion(HeapRegion *hr) {
 192     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 193     bool during_conc_mark = _g1h->mark_in_progress();
 194 
 195     assert(!hr->isHumongous(), "sanity");
 196     assert(hr->in_collection_set(), "bad CS");
 197 
 198     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
 199       if (hr->evacuation_failed()) {
 200         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 201                                             during_initial_mark,
 202                                             during_conc_mark,
 203                                             _worker_id);
 204 
 205         hr->note_self_forwarding_removal_start(during_initial_mark,
 206                                                during_conc_mark);
 207         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 208 
 209         // In the common case (i.e. when there is no evacuation
 210         // failure) we make sure that the following is done when
 211         // the region is freed so that it is "ready-to-go" when it's
 212         // re-allocated. However, when evacuation failure happens, a
 213         // region will remain in the heap and might ultimately be added
 214         // to a CSet in the future. So we have to be careful here and
 215         // make sure the region's RSet is ready for parallel iteration
 216         // whenever this might be required in the future.
 217         hr->rem_set()->reset_for_par_iteration();
 218         hr->reset_bot();
 219         _update_rset_cl.set_region(hr);
 220         hr->object_iterate(&rspc);
 221 
 222         hr->rem_set()->clean_strong_code_roots(hr);
 223 
 224         hr->note_self_forwarding_removal_end(during_initial_mark,
 225                                              during_conc_mark,
 226                                              rspc.marked_bytes());
 227       }
 228     }
 229     return false;
 230   }
 231 };
 232 
 233 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 234 protected:
 235   G1CollectedHeap* _g1h;

 236 
 237 public:
 238   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
 239     AbstractGangTask("G1 Remove Self-forwarding Pointers"),
 240     _g1h(g1h) { }
 241 
 242   void work(uint worker_id) {
 243     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
 244 
 245     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 246     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 247   }
 248 };
 249 
 250 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP


 160       obj->oop_iterate(_update_rset_cl);
 161     } else {
 162 
 163       // The object has been either evacuated or is dead. Fill it with a
 164       // dummy object.
 165       MemRegion mr(obj_addr, obj_size);
 166       CollectedHeap::fill_with_object(mr);
 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;
 180   HeapRegionClaimer* _hrclaimer;
 181 
 182   DirtyCardQueue _dcq;
 183   UpdateRSetDeferred _update_rset_cl;
 184 
 185 public:
 186   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 187                                 uint worker_id,
 188                                 HeapRegionClaimer* hrclaimer) :
 189       _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 190       _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {
 191   }
 192 
 193   bool doHeapRegion(HeapRegion *hr) {
 194     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 195     bool during_conc_mark = _g1h->mark_in_progress();
 196 
 197     assert(!hr->isHumongous(), "sanity");
 198     assert(hr->in_collection_set(), "bad CS");
 199 
 200     if (_hrclaimer->claim_region(hr->hrm_index())) {
 201       if (hr->evacuation_failed()) {
 202         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 203                                             during_initial_mark,
 204                                             during_conc_mark,
 205                                             _worker_id);
 206 
 207         hr->note_self_forwarding_removal_start(during_initial_mark,
 208                                                during_conc_mark);
 209         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 210 
 211         // In the common case (i.e. when there is no evacuation
 212         // failure) we make sure that the following is done when
 213         // the region is freed so that it is "ready-to-go" when it's
 214         // re-allocated. However, when evacuation failure happens, a
 215         // region will remain in the heap and might ultimately be added
 216         // to a CSet in the future. So we have to be careful here and
 217         // make sure the region's RSet is ready for parallel iteration
 218         // whenever this might be required in the future.
 219         hr->rem_set()->reset_for_par_iteration();
 220         hr->reset_bot();
 221         _update_rset_cl.set_region(hr);
 222         hr->object_iterate(&rspc);
 223 
 224         hr->rem_set()->clean_strong_code_roots(hr);
 225 
 226         hr->note_self_forwarding_removal_end(during_initial_mark,
 227                                              during_conc_mark,
 228                                              rspc.marked_bytes());
 229       }
 230     }
 231     return false;
 232   }
 233 };
 234 
 235 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 236 protected:
 237   G1CollectedHeap* _g1h;
 238   HeapRegionClaimer* _hrclaimer;
 239 
 240 public:
 241   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h, HeapRegionClaimer* hrclaimer) :
 242       AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h), _hrclaimer(hrclaimer) {}

 243 
 244   void work(uint worker_id) {
 245     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, _hrclaimer);
 246 
 247     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 248     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 249   }
 250 };
 251 
 252 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP