1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP 27 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/dirtyCardQueue.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1_globals.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 33 #include "gc_implementation/g1/heapRegion.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "oops/markOop.inline.hpp" 36 #include "utilities/workgroup.hpp" 37 38 // Closures and tasks associated with any self-forwarding pointers 39 // installed as a result of an evacuation failure. 40 41 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 42 private: 43 G1CollectedHeap* _g1; 44 DirtyCardQueue *_dcq; 45 G1SATBCardTableModRefBS* _ct_bs; 46 47 public: 48 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 49 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} 50 51 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 52 virtual void do_oop( oop* p) { do_oop_work(p); } 53 template <class T> void do_oop_work(T* p) { 54 assert(_from->is_in_reserved(p), "paranoia"); 55 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && 56 !_from->is_survivor()) { 57 size_t card_index = _ct_bs->index_for(p); 58 if (_ct_bs->mark_card_deferred(card_index)) { 59 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 60 } 61 } 62 } 63 }; 64 65 class RemoveSelfForwardPtrObjClosure: public ObjectClosure { 66 private: 67 G1CollectedHeap* _g1; 68 ConcurrentMark* _cm; 69 HeapRegion* _hr; 70 size_t _marked_bytes; 71 OopsInHeapRegionClosure *_update_rset_cl; 72 bool _during_initial_mark; 73 bool _during_conc_mark; 74 uint _worker_id; 75 HeapWord* _end_of_last_gap; 76 HeapWord* _last_gap_threshold; 77 HeapWord* _last_obj_threshold; 78 79 public: 80 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 81 HeapRegion* hr, 82 OopsInHeapRegionClosure* update_rset_cl, 83 bool during_initial_mark, 84 bool during_conc_mark, 85 uint worker_id) : 86 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0), 87 _update_rset_cl(update_rset_cl), 88 _during_initial_mark(during_initial_mark), 89 _during_conc_mark(during_conc_mark), 90 _worker_id(worker_id), 91 _end_of_last_gap(hr->bottom()), 92 _last_gap_threshold(hr->bottom()), 93 _last_obj_threshold(hr->bottom()) { } 94 95 size_t marked_bytes() { return _marked_bytes; } 96 97 // <original comment> 98 // The original idea here was to coalesce evacuated and dead objects. 99 // However that caused complications with the block offset table (BOT). 100 // In particular if there were two TLABs, one of them partially refined. 101 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 102 // The BOT entries of the unrefined part of TLAB_2 point to the start 103 // of TLAB_2. If the last object of the TLAB_1 and the first object 104 // of TLAB_2 are coalesced, then the cards of the unrefined part 105 // would point into middle of the filler object. 106 // The current approach is to not coalesce and leave the BOT contents intact. 107 // </original comment> 108 // 109 // We now reset the BOT when we start the object iteration over the 110 // region and refine its entries for every object we come across. So 111 // the above comment is not really relevant and we should be able 112 // to coalesce dead objects if we want to. 113 void do_object(oop obj) { 114 HeapWord* obj_addr = (HeapWord*) obj; 115 assert(_hr->is_in(obj_addr), "sanity"); 116 size_t obj_size = obj->size(); 117 HeapWord* obj_end = obj_addr + obj_size; 118 119 if (_end_of_last_gap != obj_addr) { 120 // there was a gap before obj_addr 121 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); 122 } 123 124 markOop m = obj->mark(); 125 if (m->is_marked() && ((oop)m->decode_pointer() == obj)) { 126 127 // The object failed to move. 128 129 // We consider all objects that we find self-forwarded to be 130 // live. What we'll do is that we'll update the prev marking 131 // info so that they are all under PTAMS and explicitly marked. 132 if (!_cm->isPrevMarked(obj)) { 133 _cm->markPrev(obj); 134 } 135 if (_during_initial_mark) { 136 // For the next marking info we'll only mark the 137 // self-forwarded objects explicitly if we are during 138 // initial-mark (since, normally, we only mark objects pointed 139 // to by roots if we succeed in copying them). By marking all 140 // self-forwarded objects we ensure that we mark any that are 141 // still pointed to be roots. During concurrent marking, and 142 // after initial-mark, we don't need to mark any objects 143 // explicitly and all objects in the CSet are considered 144 // (implicitly) live. So, we won't mark them explicitly and 145 // we'll leave them over NTAMS. 146 _cm->grayRoot(obj, obj_size, _worker_id, _hr); 147 } 148 _marked_bytes += (obj_size * HeapWordSize); 149 obj->set_mark(markOopDesc::prototype()); 150 151 // While we were processing RSet buffers during the collection, 152 // we actually didn't scan any cards on the collection set, 153 // since we didn't want to update remembered sets with entries 154 // that point into the collection set, given that live objects 155 // from the collection set are about to move and such entries 156 // will be stale very soon. 157 // This change also dealt with a reliability issue which 158 // involved scanning a card in the collection set and coming 159 // across an array that was being chunked and looking malformed. 160 // The problem is that, if evacuation fails, we might have 161 // remembered set entries missing given that we skipped cards on 162 // the collection set. So, we'll recreate such entries now. 163 obj->oop_iterate(_update_rset_cl); 164 } else { 165 166 // The object has been either evacuated or is dead. Fill it with a 167 // dummy object. 168 MemRegion mr(obj_addr, obj_size); 169 CollectedHeap::fill_with_object(mr); 170 171 // must nuke all dead objects which we skipped when iterating over the region 172 _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); 173 } 174 _end_of_last_gap = obj_end; 175 _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); 176 } 177 }; 178 179 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 180 G1CollectedHeap* _g1h; 181 ConcurrentMark* _cm; 182 uint _worker_id; 183 HeapRegionClaimer* _hrclaimer; 184 185 DirtyCardQueue _dcq; 186 UpdateRSetDeferred _update_rset_cl; 187 188 public: 189 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, 190 uint worker_id, 191 HeapRegionClaimer* hrclaimer) : 192 _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq), 193 _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) { 194 } 195 196 bool doHeapRegion(HeapRegion *hr) { 197 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); 198 bool during_conc_mark = _g1h->mark_in_progress(); 199 200 assert(!hr->is_humongous(), "sanity"); 201 assert(hr->in_collection_set(), "bad CS"); 202 203 if (_hrclaimer->claim_region(hr->hrm_index())) { 204 if (hr->evacuation_failed()) { 205 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl, 206 during_initial_mark, 207 during_conc_mark, 208 _worker_id); 209 210 hr->note_self_forwarding_removal_start(during_initial_mark, 211 during_conc_mark); 212 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr); 213 214 // In the common case (i.e. when there is no evacuation 215 // failure) we make sure that the following is done when 216 // the region is freed so that it is "ready-to-go" when it's 217 // re-allocated. However, when evacuation failure happens, a 218 // region will remain in the heap and might ultimately be added 219 // to a CSet in the future. So we have to be careful here and 220 // make sure the region's RSet is ready for parallel iteration 221 // whenever this might be required in the future. 222 hr->rem_set()->reset_for_par_iteration(); 223 hr->reset_bot(); 224 _update_rset_cl.set_region(hr); 225 hr->object_iterate(&rspc); 226 227 hr->rem_set()->clean_strong_code_roots(hr); 228 229 hr->note_self_forwarding_removal_end(during_initial_mark, 230 during_conc_mark, 231 rspc.marked_bytes()); 232 } 233 } 234 return false; 235 } 236 }; 237 238 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask { 239 protected: 240 G1CollectedHeap* _g1h; 241 HeapRegionClaimer _hrclaimer; 242 243 public: 244 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) : 245 AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h), 246 _hrclaimer(g1h->workers()->active_workers()) {} 247 248 void work(uint worker_id) { 249 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer); 250 251 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); 252 _g1h->collection_set_iterate_from(hr, &rsfp_cl); 253 } 254 }; 255 256 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP