1 /* 2 * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc/g1/concurrentMark.hpp" 29 #include "gc/g1/g1CollectedHeap.hpp" 30 #include "gc/g1/g1CollectorPolicy.hpp" 31 #include "gc/g1/g1CollectorState.hpp" 32 #include "gc/g1/g1SATBCardTableModRefBS.hpp" 33 #include "gc/g1/heapRegionManager.inline.hpp" 34 #include "gc/g1/heapRegionSet.inline.hpp" 35 #include "gc/shared/taskqueue.hpp" 36 #include "runtime/orderAccess.inline.hpp" 37 38 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { 39 switch (dest.value()) { 40 case InCSetState::Young: 41 return &_survivor_evac_stats; 42 case InCSetState::Old: 43 return &_old_evac_stats; 44 default: 45 ShouldNotReachHere(); 46 return NULL; // Keep some compilers happy 47 } 48 } 49 50 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { 51 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(G1CollectedHeap::heap()->workers()->active_workers()); 52 // Prevent humongous PLAB sizes for two reasons: 53 // * PLABs are allocated using a similar paths as oops, but should 54 // never be in a humongous region 55 // * Allowing humongous PLABs needlessly churns the region free lists 56 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 57 } 58 59 // Inline functions for G1CollectedHeap 60 61 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() { 62 return _allocation_context_stats; 63 } 64 65 // Return the region with the given index. It assumes the index is valid. 66 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 67 68 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 69 assert(is_in_reserved(addr), 70 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", 71 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())); 72 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 73 } 74 75 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 76 return _hrm.reserved().start() + index * HeapRegion::GrainWords; 77 } 78 79 template <class T> 80 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const { 81 assert(addr != NULL, "invariant"); 82 assert(is_in_g1_reserved((const void*) addr), 83 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", 84 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); 85 return _hrm.addr_to_region((HeapWord*) addr); 86 } 87 88 template <class T> 89 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { 90 HeapRegion* hr = heap_region_containing_raw(addr); 91 if (hr->is_continues_humongous()) { 92 return hr->humongous_start_region(); 93 } 94 return hr; 95 } 96 97 inline void G1CollectedHeap::reset_gc_time_stamp() { 98 _gc_time_stamp = 0; 99 OrderAccess::fence(); 100 // Clear the cached CSet starting regions and time stamps. 101 // Their validity is dependent on the GC timestamp. 102 clear_cset_start_regions(); 103 } 104 105 inline void G1CollectedHeap::increment_gc_time_stamp() { 106 ++_gc_time_stamp; 107 OrderAccess::fence(); 108 } 109 110 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) { 111 _old_set.add(hr); 112 } 113 114 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 115 _old_set.remove(hr); 116 } 117 118 // It dirties the cards that cover the block so that so that the post 119 // write barrier never queues anything when updating objects on this 120 // block. It is assumed (and in fact we assert) that the block 121 // belongs to a young region. 122 inline void 123 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 124 assert_heap_not_locked(); 125 126 // Assign the containing region to containing_hr so that we don't 127 // have to keep calling heap_region_containing_raw() in the 128 // asserts below. 129 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) 130 assert(word_size > 0, "pre-condition"); 131 assert(containing_hr->is_in(start), "it should contain start"); 132 assert(containing_hr->is_young(), "it should be young"); 133 assert(!containing_hr->is_humongous(), "it should not be humongous"); 134 135 HeapWord* end = start + word_size; 136 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 137 138 MemRegion mr(start, end); 139 g1_barrier_set()->g1_mark_as_young(mr); 140 } 141 142 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { 143 return _task_queues->queue(i); 144 } 145 146 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 147 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 148 } 149 150 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { 151 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); 152 } 153 154 // This is a fast test on whether a reference points into the 155 // collection set or not. Assume that the reference 156 // points into the heap. 157 inline bool G1CollectedHeap::is_in_cset(oop obj) { 158 bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj); 159 // let's make sure the result is consistent with what the slower 160 // test returns 161 assert( ret || !obj_in_cs(obj), "sanity"); 162 assert(!ret || obj_in_cs(obj), "sanity"); 163 return ret; 164 } 165 166 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) { 167 return _in_cset_fast_test.is_in_cset(hr); 168 } 169 170 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { 171 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); 172 } 173 174 InCSetState G1CollectedHeap::in_cset_state(const oop obj) { 175 return _in_cset_fast_test.at((HeapWord*)obj); 176 } 177 178 void G1CollectedHeap::register_humongous_region_with_cset(uint index) { 179 _in_cset_fast_test.set_humongous(index); 180 } 181 182 #ifndef PRODUCT 183 // Support for G1EvacuationFailureALot 184 185 inline bool 186 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, 187 bool during_initial_mark, 188 bool during_marking) { 189 bool res = false; 190 if (during_marking) { 191 res |= G1EvacuationFailureALotDuringConcMark; 192 } 193 if (during_initial_mark) { 194 res |= G1EvacuationFailureALotDuringInitialMark; 195 } 196 if (gcs_are_young) { 197 res |= G1EvacuationFailureALotDuringYoungGC; 198 } else { 199 // GCs are mixed 200 res |= G1EvacuationFailureALotDuringMixedGC; 201 } 202 return res; 203 } 204 205 inline void 206 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { 207 if (G1EvacuationFailureALot) { 208 // Note we can't assert that _evacuation_failure_alot_for_current_gc 209 // is clear here. It may have been set during a previous GC but that GC 210 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to 211 // trigger an evacuation failure and clear the flags and and counts. 212 213 // Check if we have gone over the interval. 214 const size_t gc_num = total_collections(); 215 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; 216 217 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); 218 219 // Now check if G1EvacuationFailureALot is enabled for the current GC type. 220 const bool gcs_are_young = collector_state()->gcs_are_young(); 221 const bool during_im = collector_state()->during_initial_mark_pause(); 222 const bool during_marking = collector_state()->mark_in_progress(); 223 224 _evacuation_failure_alot_for_current_gc &= 225 evacuation_failure_alot_for_gc_type(gcs_are_young, 226 during_im, 227 during_marking); 228 } 229 } 230 231 inline bool G1CollectedHeap::evacuation_should_fail() { 232 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 233 return false; 234 } 235 // G1EvacuationFailureALot is in effect for current GC 236 // Access to _evacuation_failure_alot_count is not atomic; 237 // the value does not have to be exact. 238 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { 239 return false; 240 } 241 _evacuation_failure_alot_count = 0; 242 return true; 243 } 244 245 inline void G1CollectedHeap::reset_evacuation_should_fail() { 246 if (G1EvacuationFailureALot) { 247 _evacuation_failure_alot_gc_number = total_collections(); 248 _evacuation_failure_alot_count = 0; 249 _evacuation_failure_alot_for_current_gc = false; 250 } 251 } 252 #endif // #ifndef PRODUCT 253 254 inline bool G1CollectedHeap::is_in_young(const oop obj) { 255 if (obj == NULL) { 256 return false; 257 } 258 return heap_region_containing(obj)->is_young(); 259 } 260 261 // We don't need barriers for initializing stores to objects 262 // in the young gen: for the SATB pre-barrier, there is no 263 // pre-value that needs to be remembered; for the remembered-set 264 // update logging post-barrier, we don't maintain remembered set 265 // information for young gen objects. 266 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 267 return is_in_young(new_obj); 268 } 269 270 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { 271 if (obj == NULL) { 272 return false; 273 } 274 return is_obj_dead(obj, heap_region_containing(obj)); 275 } 276 277 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { 278 if (obj == NULL) { 279 return false; 280 } 281 return is_obj_ill(obj, heap_region_containing(obj)); 282 } 283 284 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) { 285 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); 286 _humongous_reclaim_candidates.set_candidate(region, value); 287 } 288 289 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) { 290 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); 291 return _humongous_reclaim_candidates.is_candidate(region); 292 } 293 294 inline void G1CollectedHeap::set_humongous_is_live(oop obj) { 295 uint region = addr_to_region((HeapWord*)obj); 296 // Clear the flag in the humongous_reclaim_candidates table. Also 297 // reset the entry in the _in_cset_fast_test table so that subsequent references 298 // to the same humongous object do not go into the slow path again. 299 // This is racy, as multiple threads may at the same time enter here, but this 300 // is benign. 301 // During collection we only ever clear the "candidate" flag, and only ever clear the 302 // entry in the in_cset_fast_table. 303 // We only ever evaluate the contents of these tables (in the VM thread) after 304 // having synchronized the worker threads with the VM thread, or in the same 305 // thread (i.e. within the VM thread). 306 if (is_humongous_reclaim_candidate(region)) { 307 set_humongous_reclaim_candidate(region, false); 308 _in_cset_fast_test.clear_humongous(region); 309 } 310 } 311 312 #endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP