< prev index next >

src/share/vm/gc/g1/g1EvacFailure.cpp

Print this page
rev 10493 : [mq]: 8077144-concurrent-mark-thread-init-fix


  78     _g1(G1CollectedHeap::heap()),
  79     _cm(_g1->concurrent_mark()),
  80     _hr(hr),
  81     _marked_bytes(0),
  82     _update_rset_cl(update_rset_cl),
  83     _during_initial_mark(during_initial_mark),
  84     _worker_id(worker_id),
  85     _last_forwarded_object_end(hr->bottom()) { }
  86 
  87   size_t marked_bytes() { return _marked_bytes; }
  88 
  89   // Iterate over the live objects in the region to find self-forwarded objects
  90   // that need to be kept live. We need to update the remembered sets of these
  91   // objects. Further update the BOT and marks.
  92   // We can coalesce and overwrite the remaining heap contents with dummy objects
  93   // as they have either been dead or evacuated (which are unreferenced now, i.e.
  94   // dead too) already.
  95   void do_object(oop obj) {
  96     HeapWord* obj_addr = (HeapWord*) obj;
  97     assert(_hr->is_in(obj_addr), "sanity");
  98     size_t obj_size = obj->size();
  99     HeapWord* obj_end = obj_addr + obj_size;
 100 
 101     if (obj->is_forwarded() && obj->forwardee() == obj) {
 102       // The object failed to move.
 103 
 104       zap_dead_objects(_last_forwarded_object_end, obj_addr);
 105       // We consider all objects that we find self-forwarded to be
 106       // live. What we'll do is that we'll update the prev marking
 107       // info so that they are all under PTAMS and explicitly marked.
 108       if (!_cm->isPrevMarked(obj)) {
 109         _cm->markPrev(obj);
 110       }
 111       if (_during_initial_mark) {
 112         // For the next marking info we'll only mark the
 113         // self-forwarded objects explicitly if we are during
 114         // initial-mark (since, normally, we only mark objects pointed
 115         // to by roots if we succeed in copying them). By marking all
 116         // self-forwarded objects we ensure that we mark any that are
 117         // still pointed to be roots. During concurrent marking, and
 118         // after initial-mark, we don't need to mark any objects
 119         // explicitly and all objects in the CSet are considered
 120         // (implicitly) live. So, we won't mark them explicitly and
 121         // we'll leave them over NTAMS.
 122         _cm->grayRoot(obj, obj_size, _worker_id, _hr);
 123       }


 124       _marked_bytes += (obj_size * HeapWordSize);
 125       obj->set_mark(markOopDesc::prototype());
 126 
 127       // While we were processing RSet buffers during the collection,
 128       // we actually didn't scan any cards on the collection set,
 129       // since we didn't want to update remembered sets with entries
 130       // that point into the collection set, given that live objects
 131       // from the collection set are about to move and such entries
 132       // will be stale very soon.
 133       // This change also dealt with a reliability issue which
 134       // involved scanning a card in the collection set and coming
 135       // across an array that was being chunked and looking malformed.
 136       // The problem is that, if evacuation fails, we might have
 137       // remembered set entries missing given that we skipped cards on
 138       // the collection set. So, we'll recreate such entries now.
 139       obj->oop_iterate(_update_rset_cl);
 140 

 141       _last_forwarded_object_end = obj_end;
 142       _hr->cross_threshold(obj_addr, obj_end);
 143     }
 144   }
 145 
 146   // Fill the memory area from start to end with filler objects, and update the BOT
 147   // and the mark bitmap accordingly.
 148   void zap_dead_objects(HeapWord* start, HeapWord* end) {
 149     if (start == end) {
 150       return;
 151     }
 152 
 153     size_t gap_size = pointer_delta(end, start);
 154     MemRegion mr(start, gap_size);
 155     if (gap_size >= CollectedHeap::min_fill_size()) {
 156       CollectedHeap::fill_with_objects(start, gap_size);
 157 
 158       HeapWord* end_first_obj = start + ((oop)start)->size();
 159       _hr->cross_threshold(start, end_first_obj);
 160       // Fill_with_objects() may have created multiple (i.e. two)




  78     _g1(G1CollectedHeap::heap()),
  79     _cm(_g1->concurrent_mark()),
  80     _hr(hr),
  81     _marked_bytes(0),
  82     _update_rset_cl(update_rset_cl),
  83     _during_initial_mark(during_initial_mark),
  84     _worker_id(worker_id),
  85     _last_forwarded_object_end(hr->bottom()) { }
  86 
  87   size_t marked_bytes() { return _marked_bytes; }
  88 
  89   // Iterate over the live objects in the region to find self-forwarded objects
  90   // that need to be kept live. We need to update the remembered sets of these
  91   // objects. Further update the BOT and marks.
  92   // We can coalesce and overwrite the remaining heap contents with dummy objects
  93   // as they have either been dead or evacuated (which are unreferenced now, i.e.
  94   // dead too) already.
  95   void do_object(oop obj) {
  96     HeapWord* obj_addr = (HeapWord*) obj;
  97     assert(_hr->is_in(obj_addr), "sanity");


  98 
  99     if (obj->is_forwarded() && obj->forwardee() == obj) {
 100       // The object failed to move.
 101 
 102       zap_dead_objects(_last_forwarded_object_end, obj_addr);
 103       // We consider all objects that we find self-forwarded to be
 104       // live. What we'll do is that we'll update the prev marking
 105       // info so that they are all under PTAMS and explicitly marked.
 106       if (!_cm->isPrevMarked(obj)) {
 107         _cm->markPrev(obj);
 108       }
 109       if (_during_initial_mark) {
 110         // For the next marking info we'll only mark the
 111         // self-forwarded objects explicitly if we are during
 112         // initial-mark (since, normally, we only mark objects pointed
 113         // to by roots if we succeed in copying them). By marking all
 114         // self-forwarded objects we ensure that we mark any that are
 115         // still pointed to be roots. During concurrent marking, and
 116         // after initial-mark, we don't need to mark any objects
 117         // explicitly and all objects in the CSet are considered
 118         // (implicitly) live. So, we won't mark them explicitly and
 119         // we'll leave them over NTAMS.
 120         _cm->grayRoot(obj, _hr);
 121       }
 122       size_t obj_size = obj->size();
 123 
 124       _marked_bytes += (obj_size * HeapWordSize);
 125       obj->set_mark(markOopDesc::prototype());
 126 
 127       // While we were processing RSet buffers during the collection,
 128       // we actually didn't scan any cards on the collection set,
 129       // since we didn't want to update remembered sets with entries
 130       // that point into the collection set, given that live objects
 131       // from the collection set are about to move and such entries
 132       // will be stale very soon.
 133       // This change also dealt with a reliability issue which
 134       // involved scanning a card in the collection set and coming
 135       // across an array that was being chunked and looking malformed.
 136       // The problem is that, if evacuation fails, we might have
 137       // remembered set entries missing given that we skipped cards on
 138       // the collection set. So, we'll recreate such entries now.
 139       obj->oop_iterate(_update_rset_cl);
 140 
 141       HeapWord* obj_end = obj_addr + obj_size;
 142       _last_forwarded_object_end = obj_end;
 143       _hr->cross_threshold(obj_addr, obj_end);
 144     }
 145   }
 146 
 147   // Fill the memory area from start to end with filler objects, and update the BOT
 148   // and the mark bitmap accordingly.
 149   void zap_dead_objects(HeapWord* start, HeapWord* end) {
 150     if (start == end) {
 151       return;
 152     }
 153 
 154     size_t gap_size = pointer_delta(end, start);
 155     MemRegion mr(start, gap_size);
 156     if (gap_size >= CollectedHeap::min_fill_size()) {
 157       CollectedHeap::fill_with_objects(start, gap_size);
 158 
 159       HeapWord* end_first_obj = start + ((oop)start)->size();
 160       _hr->cross_threshold(start, end_first_obj);
 161       // Fill_with_objects() may have created multiple (i.e. two)


< prev index next >