src/share/vm/gc_implementation/g1/g1EvacFailure.hpp

Print this page
rev 7107 : imported patch 8058298
rev 7109 : imported patch rev2


 160       obj->oop_iterate(_update_rset_cl);
 161     } else {
 162 
 163       // The object has been either evacuated or is dead. Fill it with a
 164       // dummy object.
 165       MemRegion mr(obj_addr, obj_size);
 166       CollectedHeap::fill_with_object(mr);
 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;

 180 
 181   DirtyCardQueue _dcq;
 182   UpdateRSetDeferred _update_rset_cl;
 183 
 184 public:
 185   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 186                                 uint worker_id) :

 187     _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 188     _worker_id(worker_id), _cm(_g1h->concurrent_mark()) {

 189     }
 190 
 191   bool doHeapRegion(HeapRegion *hr) {
 192     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 193     bool during_conc_mark = _g1h->mark_in_progress();
 194 
 195     assert(!hr->is_humongous(), "sanity");
 196     assert(hr->in_collection_set(), "bad CS");
 197 
 198     if (hr->claimHeapRegion(HeapRegion::ParEvacFailureClaimValue)) {
 199       if (hr->evacuation_failed()) {
 200         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 201                                             during_initial_mark,
 202                                             during_conc_mark,
 203                                             _worker_id);
 204 
 205         hr->note_self_forwarding_removal_start(during_initial_mark,
 206                                                during_conc_mark);
 207         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 208 
 209         // In the common case (i.e. when there is no evacuation
 210         // failure) we make sure that the following is done when
 211         // the region is freed so that it is "ready-to-go" when it's
 212         // re-allocated. However, when evacuation failure happens, a
 213         // region will remain in the heap and might ultimately be added
 214         // to a CSet in the future. So we have to be careful here and
 215         // make sure the region's RSet is ready for parallel iteration
 216         // whenever this might be required in the future.
 217         hr->rem_set()->reset_for_par_iteration();
 218         hr->reset_bot();
 219         _update_rset_cl.set_region(hr);
 220         hr->object_iterate(&rspc);
 221 
 222         hr->rem_set()->clean_strong_code_roots(hr);
 223 
 224         hr->note_self_forwarding_removal_end(during_initial_mark,
 225                                              during_conc_mark,
 226                                              rspc.marked_bytes());
 227       }
 228     }
 229     return false;
 230   }
 231 };
 232 
 233 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 234 protected:
 235   G1CollectedHeap* _g1h;

 236 
 237 public:
 238   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
 239     AbstractGangTask("G1 Remove Self-forwarding Pointers"),
 240     _g1h(g1h) { }
 241 
 242   void work(uint worker_id) {
 243     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id);
 244 
 245     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 246     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 247   }
 248 };
 249 
 250 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP


 160       obj->oop_iterate(_update_rset_cl);
 161     } else {
 162 
 163       // The object has been either evacuated or is dead. Fill it with a
 164       // dummy object.
 165       MemRegion mr(obj_addr, obj_size);
 166       CollectedHeap::fill_with_object(mr);
 167 
 168       // must nuke all dead objects which we skipped when iterating over the region
 169       _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end));
 170     }
 171     _end_of_last_gap = obj_end;
 172     _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end);
 173   }
 174 };
 175 
 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure {
 177   G1CollectedHeap* _g1h;
 178   ConcurrentMark* _cm;
 179   uint _worker_id;
 180   HeapRegionClaimer* _hrclaimer;
 181 
 182   DirtyCardQueue _dcq;
 183   UpdateRSetDeferred _update_rset_cl;
 184 
 185 public:
 186   RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h,
 187                                 uint worker_id = 0,
 188                                 HeapRegionClaimer* hrclaimer = NULL) :
 189       _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq),
 190       _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) {
 191     assert(hrclaimer != NULL || worker_id == 0, "Must use a HeapRegionClaimer when used in parallel.");
 192   }
 193 
 194   bool doHeapRegion(HeapRegion *hr) {
 195     bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause();
 196     bool during_conc_mark = _g1h->mark_in_progress();
 197 
 198     assert(!hr->is_humongous(), "sanity");
 199     assert(hr->in_collection_set(), "bad CS");
 200 
 201     if (_hrclaimer == NULL || _hrclaimer->claim_region(hr->hrm_index())) {
 202       if (hr->evacuation_failed()) {
 203         RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl,
 204                                             during_initial_mark,
 205                                             during_conc_mark,
 206                                             _worker_id);
 207 
 208         hr->note_self_forwarding_removal_start(during_initial_mark,
 209                                                during_conc_mark);
 210         _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr);
 211 
 212         // In the common case (i.e. when there is no evacuation
 213         // failure) we make sure that the following is done when
 214         // the region is freed so that it is "ready-to-go" when it's
 215         // re-allocated. However, when evacuation failure happens, a
 216         // region will remain in the heap and might ultimately be added
 217         // to a CSet in the future. So we have to be careful here and
 218         // make sure the region's RSet is ready for parallel iteration
 219         // whenever this might be required in the future.
 220         hr->rem_set()->reset_for_par_iteration();
 221         hr->reset_bot();
 222         _update_rset_cl.set_region(hr);
 223         hr->object_iterate(&rspc);
 224 
 225         hr->rem_set()->clean_strong_code_roots(hr);
 226 
 227         hr->note_self_forwarding_removal_end(during_initial_mark,
 228                                              during_conc_mark,
 229                                              rspc.marked_bytes());
 230       }
 231     }
 232     return false;
 233   }
 234 };
 235 
 236 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask {
 237 protected:
 238   G1CollectedHeap* _g1h;
 239   HeapRegionClaimer _hrclaimer;
 240 
 241 public:
 242   G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) :
 243       AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h),
 244       _hrclaimer(g1h->workers()->active_workers()) {}
 245 
 246   void work(uint worker_id) {
 247     RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer);
 248 
 249     HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id);
 250     _g1h->collection_set_iterate_from(hr, &rsfp_cl);
 251   }
 252 };
 253 
 254 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP