1 /* 2 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP 27 28 #include "gc_implementation/g1/concurrentMark.inline.hpp" 29 #include "gc_implementation/g1/dirtyCardQueue.hpp" 30 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 31 #include "gc_implementation/g1/g1_globals.hpp" 32 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 33 #include "gc_implementation/g1/heapRegion.hpp" 34 #include "gc_implementation/g1/heapRegionRemSet.hpp" 35 #include "utilities/workgroup.hpp" 36 37 // Closures and tasks associated with any self-forwarding pointers 38 // installed as a result of an evacuation failure. 39 40 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 41 private: 42 G1CollectedHeap* _g1; 43 DirtyCardQueue *_dcq; 44 G1SATBCardTableModRefBS* _ct_bs; 45 46 public: 47 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 48 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} 49 50 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 51 virtual void do_oop( oop* p) { do_oop_work(p); } 52 template <class T> void do_oop_work(T* p) { 53 assert(_from->is_in_reserved(p), "paranoia"); 54 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && 55 !_from->is_survivor()) { 56 size_t card_index = _ct_bs->index_for(p); 57 if (_ct_bs->mark_card_deferred(card_index)) { 58 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 59 } 60 } 61 } 62 }; 63 64 class RemoveSelfForwardPtrObjClosure: public ObjectClosure { 65 private: 66 G1CollectedHeap* _g1; 67 ConcurrentMark* _cm; 68 HeapRegion* _hr; 69 size_t _marked_bytes; 70 OopsInHeapRegionClosure *_update_rset_cl; 71 bool _during_initial_mark; 72 bool _during_conc_mark; 73 uint _worker_id; 74 HeapWord* _end_of_last_gap; 75 HeapWord* _last_gap_threshold; 76 HeapWord* _last_obj_threshold; 77 78 public: 79 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 80 HeapRegion* hr, 81 OopsInHeapRegionClosure* update_rset_cl, 82 bool during_initial_mark, 83 bool during_conc_mark, 84 uint worker_id) : 85 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0), 86 _update_rset_cl(update_rset_cl), 87 _during_initial_mark(during_initial_mark), 88 _during_conc_mark(during_conc_mark), 89 _worker_id(worker_id), 90 _end_of_last_gap(hr->bottom()), 91 _last_gap_threshold(hr->bottom()), 92 _last_obj_threshold(hr->bottom()) { } 93 94 size_t marked_bytes() { return _marked_bytes; } 95 96 // <original comment> 97 // The original idea here was to coalesce evacuated and dead objects. 98 // However that caused complications with the block offset table (BOT). 99 // In particular if there were two TLABs, one of them partially refined. 100 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 101 // The BOT entries of the unrefined part of TLAB_2 point to the start 102 // of TLAB_2. If the last object of the TLAB_1 and the first object 103 // of TLAB_2 are coalesced, then the cards of the unrefined part 104 // would point into middle of the filler object. 105 // The current approach is to not coalesce and leave the BOT contents intact. 106 // </original comment> 107 // 108 // We now reset the BOT when we start the object iteration over the 109 // region and refine its entries for every object we come across. So 110 // the above comment is not really relevant and we should be able 111 // to coalesce dead objects if we want to. 112 void do_object(oop obj) { 113 HeapWord* obj_addr = (HeapWord*) obj; 114 assert(_hr->is_in(obj_addr), "sanity"); 115 size_t obj_size = obj->size(); 116 HeapWord* obj_end = obj_addr + obj_size; 117 118 if (_end_of_last_gap != obj_addr) { 119 // there was a gap before obj_addr 120 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); 121 } 122 123 if (obj->is_forwarded() && obj->forwardee() == obj) { 124 // The object failed to move. 125 126 // We consider all objects that we find self-forwarded to be 127 // live. What we'll do is that we'll update the prev marking 128 // info so that they are all under PTAMS and explicitly marked. 129 if (!_cm->isPrevMarked(obj)) { 130 _cm->markPrev(obj); 131 } 132 if (_during_initial_mark) { 133 // For the next marking info we'll only mark the 134 // self-forwarded objects explicitly if we are during 135 // initial-mark (since, normally, we only mark objects pointed 136 // to by roots if we succeed in copying them). By marking all 137 // self-forwarded objects we ensure that we mark any that are 138 // still pointed to be roots. During concurrent marking, and 139 // after initial-mark, we don't need to mark any objects 140 // explicitly and all objects in the CSet are considered 141 // (implicitly) live. So, we won't mark them explicitly and 142 // we'll leave them over NTAMS. 143 _cm->grayRoot(obj, obj_size, _worker_id, _hr); 144 } 145 _marked_bytes += (obj_size * HeapWordSize); 146 obj->set_mark(markOopDesc::prototype()); 147 148 // While we were processing RSet buffers during the collection, 149 // we actually didn't scan any cards on the collection set, 150 // since we didn't want to update remembered sets with entries 151 // that point into the collection set, given that live objects 152 // from the collection set are about to move and such entries 153 // will be stale very soon. 154 // This change also dealt with a reliability issue which 155 // involved scanning a card in the collection set and coming 156 // across an array that was being chunked and looking malformed. 157 // The problem is that, if evacuation fails, we might have 158 // remembered set entries missing given that we skipped cards on 159 // the collection set. So, we'll recreate such entries now. 160 obj->oop_iterate(_update_rset_cl); 161 } else { 162 163 // The object has been either evacuated or is dead. Fill it with a 164 // dummy object. 165 MemRegion mr(obj_addr, obj_size); 166 CollectedHeap::fill_with_object(mr); 167 168 // must nuke all dead objects which we skipped when iterating over the region 169 _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); 170 } 171 _end_of_last_gap = obj_end; 172 _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); 173 } 174 }; 175 176 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 177 G1CollectedHeap* _g1h; 178 ConcurrentMark* _cm; 179 uint _worker_id; 180 HeapRegionClaimer* _hrclaimer; 181 182 DirtyCardQueue _dcq; 183 UpdateRSetDeferred _update_rset_cl; 184 185 public: 186 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, 187 uint worker_id, 188 HeapRegionClaimer* hrclaimer) : 189 _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq), 190 _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) { 191 } 192 193 bool doHeapRegion(HeapRegion *hr) { 194 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); 195 bool during_conc_mark = _g1h->mark_in_progress(); 196 197 assert(!hr->is_humongous(), "sanity"); 198 assert(hr->in_collection_set(), "bad CS"); 199 200 if (_hrclaimer->claim_region(hr->hrm_index())) { 201 if (hr->evacuation_failed()) { 202 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl, 203 during_initial_mark, 204 during_conc_mark, 205 _worker_id); 206 207 hr->note_self_forwarding_removal_start(during_initial_mark, 208 during_conc_mark); 209 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr); 210 211 // In the common case (i.e. when there is no evacuation 212 // failure) we make sure that the following is done when 213 // the region is freed so that it is "ready-to-go" when it's 214 // re-allocated. However, when evacuation failure happens, a 215 // region will remain in the heap and might ultimately be added 216 // to a CSet in the future. So we have to be careful here and 217 // make sure the region's RSet is ready for parallel iteration 218 // whenever this might be required in the future. 219 hr->rem_set()->reset_for_par_iteration(); 220 hr->reset_bot(); 221 _update_rset_cl.set_region(hr); 222 hr->object_iterate(&rspc); 223 224 hr->rem_set()->clean_strong_code_roots(hr); 225 226 hr->note_self_forwarding_removal_end(during_initial_mark, 227 during_conc_mark, 228 rspc.marked_bytes()); 229 } 230 } 231 return false; 232 } 233 }; 234 235 class G1ParRemoveSelfForwardPtrsTask: public AbstractGangTask { 236 protected: 237 G1CollectedHeap* _g1h; 238 HeapRegionClaimer _hrclaimer; 239 240 public: 241 G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) : 242 AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h), 243 _hrclaimer(g1h->workers()->active_workers()) {} 244 245 void work(uint worker_id) { 246 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer); 247 248 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); 249 _g1h->collection_set_iterate_from(hr, &rsfp_cl); 250 } 251 }; 252 253 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1EVACFAILURE_HPP