1 /* 2 * Copyright (c) 2012, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #include "precompiled.hpp" 26 #include "gc_implementation/g1/concurrentMark.inline.hpp" 27 #include "gc_implementation/g1/dirtyCardQueue.hpp" 28 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" 29 #include "gc_implementation/g1/g1EvacFailure.hpp" 30 #include "gc_implementation/g1/g1_globals.hpp" 31 #include "gc_implementation/g1/g1OopClosures.inline.hpp" 32 #include "gc_implementation/g1/heapRegion.hpp" 33 #include "gc_implementation/g1/heapRegionRemSet.hpp" 34 35 class UpdateRSetDeferred : public OopsInHeapRegionClosure { 36 private: 37 G1CollectedHeap* _g1; 38 DirtyCardQueue *_dcq; 39 G1SATBCardTableModRefBS* _ct_bs; 40 41 public: 42 UpdateRSetDeferred(G1CollectedHeap* g1, DirtyCardQueue* dcq) : 43 _g1(g1), _ct_bs(_g1->g1_barrier_set()), _dcq(dcq) {} 44 45 virtual void do_oop(narrowOop* p) { do_oop_work(p); } 46 virtual void do_oop( oop* p) { do_oop_work(p); } 47 template <class T> void do_oop_work(T* p) { 48 assert(_from->is_in_reserved(p), "paranoia"); 49 if (!_from->is_in_reserved(oopDesc::load_decode_heap_oop(p)) && 50 !_from->is_survivor()) { 51 size_t card_index = _ct_bs->index_for(p); 52 if (_ct_bs->mark_card_deferred(card_index)) { 53 _dcq->enqueue((jbyte*)_ct_bs->byte_for_index(card_index)); 54 } 55 } 56 } 57 }; 58 59 class RemoveSelfForwardPtrObjClosure: public ObjectClosure { 60 private: 61 G1CollectedHeap* _g1; 62 ConcurrentMark* _cm; 63 HeapRegion* _hr; 64 size_t _marked_bytes; 65 OopsInHeapRegionClosure *_update_rset_cl; 66 bool _during_initial_mark; 67 bool _during_conc_mark; 68 uint _worker_id; 69 HeapWord* _end_of_last_gap; 70 HeapWord* _last_gap_threshold; 71 HeapWord* _last_obj_threshold; 72 73 public: 74 RemoveSelfForwardPtrObjClosure(G1CollectedHeap* g1, ConcurrentMark* cm, 75 HeapRegion* hr, 76 OopsInHeapRegionClosure* update_rset_cl, 77 bool during_initial_mark, 78 bool during_conc_mark, 79 uint worker_id) : 80 _g1(g1), _cm(cm), _hr(hr), _marked_bytes(0), 81 _update_rset_cl(update_rset_cl), 82 _during_initial_mark(during_initial_mark), 83 _during_conc_mark(during_conc_mark), 84 _worker_id(worker_id), 85 _end_of_last_gap(hr->bottom()), 86 _last_gap_threshold(hr->bottom()), 87 _last_obj_threshold(hr->bottom()) { } 88 89 size_t marked_bytes() { return _marked_bytes; } 90 91 // <original comment> 92 // The original idea here was to coalesce evacuated and dead objects. 93 // However that caused complications with the block offset table (BOT). 94 // In particular if there were two TLABs, one of them partially refined. 95 // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| 96 // The BOT entries of the unrefined part of TLAB_2 point to the start 97 // of TLAB_2. If the last object of the TLAB_1 and the first object 98 // of TLAB_2 are coalesced, then the cards of the unrefined part 99 // would point into middle of the filler object. 100 // The current approach is to not coalesce and leave the BOT contents intact. 101 // </original comment> 102 // 103 // We now reset the BOT when we start the object iteration over the 104 // region and refine its entries for every object we come across. So 105 // the above comment is not really relevant and we should be able 106 // to coalesce dead objects if we want to. 107 void do_object(oop obj) { 108 HeapWord* obj_addr = (HeapWord*) obj; 109 assert(_hr->is_in(obj_addr), "sanity"); 110 size_t obj_size = obj->size(); 111 HeapWord* obj_end = obj_addr + obj_size; 112 113 if (_end_of_last_gap != obj_addr) { 114 // there was a gap before obj_addr 115 _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); 116 } 117 118 if (obj->is_forwarded() && obj->forwardee() == obj) { 119 // The object failed to move. 120 121 // We consider all objects that we find self-forwarded to be 122 // live. What we'll do is that we'll update the prev marking 123 // info so that they are all under PTAMS and explicitly marked. 124 if (!_cm->isPrevMarked(obj)) { 125 _cm->markPrev(obj); 126 } 127 if (_during_initial_mark) { 128 // For the next marking info we'll only mark the 129 // self-forwarded objects explicitly if we are during 130 // initial-mark (since, normally, we only mark objects pointed 131 // to by roots if we succeed in copying them). By marking all 132 // self-forwarded objects we ensure that we mark any that are 133 // still pointed to be roots. During concurrent marking, and 134 // after initial-mark, we don't need to mark any objects 135 // explicitly and all objects in the CSet are considered 136 // (implicitly) live. So, we won't mark them explicitly and 137 // we'll leave them over NTAMS. 138 _cm->grayRoot(obj, obj_size, _worker_id, _hr); 139 } 140 _marked_bytes += (obj_size * HeapWordSize); 141 obj->set_mark(markOopDesc::prototype()); 142 143 // While we were processing RSet buffers during the collection, 144 // we actually didn't scan any cards on the collection set, 145 // since we didn't want to update remembered sets with entries 146 // that point into the collection set, given that live objects 147 // from the collection set are about to move and such entries 148 // will be stale very soon. 149 // This change also dealt with a reliability issue which 150 // involved scanning a card in the collection set and coming 151 // across an array that was being chunked and looking malformed. 152 // The problem is that, if evacuation fails, we might have 153 // remembered set entries missing given that we skipped cards on 154 // the collection set. So, we'll recreate such entries now. 155 obj->oop_iterate(_update_rset_cl); 156 } else { 157 158 // The object has been either evacuated or is dead. Fill it with a 159 // dummy object. 160 MemRegion mr(obj_addr, obj_size); 161 CollectedHeap::fill_with_object(mr); 162 163 // must nuke all dead objects which we skipped when iterating over the region 164 _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); 165 } 166 _end_of_last_gap = obj_end; 167 _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); 168 } 169 }; 170 171 class RemoveSelfForwardPtrHRClosure: public HeapRegionClosure { 172 G1CollectedHeap* _g1h; 173 ConcurrentMark* _cm; 174 uint _worker_id; 175 HeapRegionClaimer* _hrclaimer; 176 177 DirtyCardQueue _dcq; 178 UpdateRSetDeferred _update_rset_cl; 179 180 public: 181 RemoveSelfForwardPtrHRClosure(G1CollectedHeap* g1h, 182 uint worker_id, 183 HeapRegionClaimer* hrclaimer) : 184 _g1h(g1h), _dcq(&g1h->dirty_card_queue_set()), _update_rset_cl(g1h, &_dcq), 185 _worker_id(worker_id), _cm(_g1h->concurrent_mark()), _hrclaimer(hrclaimer) { 186 } 187 188 bool doHeapRegion(HeapRegion *hr) { 189 bool during_initial_mark = _g1h->g1_policy()->during_initial_mark_pause(); 190 bool during_conc_mark = _g1h->mark_in_progress(); 191 192 assert(!hr->is_humongous(), "sanity"); 193 assert(hr->in_collection_set(), "bad CS"); 194 195 if (_hrclaimer->claim_region(hr->hrm_index())) { 196 if (hr->evacuation_failed()) { 197 RemoveSelfForwardPtrObjClosure rspc(_g1h, _cm, hr, &_update_rset_cl, 198 during_initial_mark, 199 during_conc_mark, 200 _worker_id); 201 202 hr->note_self_forwarding_removal_start(during_initial_mark, 203 during_conc_mark); 204 _g1h->check_bitmaps("Self-Forwarding Ptr Removal", hr); 205 206 // In the common case (i.e. when there is no evacuation 207 // failure) we make sure that the following is done when 208 // the region is freed so that it is "ready-to-go" when it's 209 // re-allocated. However, when evacuation failure happens, a 210 // region will remain in the heap and might ultimately be added 211 // to a CSet in the future. So we have to be careful here and 212 // make sure the region's RSet is ready for parallel iteration 213 // whenever this might be required in the future. 214 hr->rem_set()->reset_for_par_iteration(); 215 hr->reset_bot(); 216 _update_rset_cl.set_region(hr); 217 hr->object_iterate(&rspc); 218 219 hr->rem_set()->clean_strong_code_roots(hr); 220 221 hr->note_self_forwarding_removal_end(during_initial_mark, 222 during_conc_mark, 223 rspc.marked_bytes()); 224 } 225 } 226 return false; 227 } 228 }; 229 230 G1ParRemoveSelfForwardPtrsTask::G1ParRemoveSelfForwardPtrsTask(G1CollectedHeap* g1h) : 231 AbstractGangTask("G1 Remove Self-forwarding Pointers"), _g1h(g1h), 232 _hrclaimer(g1h->workers()->active_workers()) {} 233 234 void G1ParRemoveSelfForwardPtrsTask::work(uint worker_id) { 235 RemoveSelfForwardPtrHRClosure rsfp_cl(_g1h, worker_id, &_hrclaimer); 236 237 HeapRegion* hr = _g1h->start_cset_region_for_worker(worker_id); 238 _g1h->collection_set_iterate_from(hr, &rsfp_cl); 239 }