1 /* 2 * Copyright (c) 2001, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc/g1/g1BarrierSet.hpp" 29 #include "gc/g1/g1CollectedHeap.hpp" 30 #include "gc/g1/g1CollectorState.hpp" 31 #include "gc/g1/heapRegionManager.inline.hpp" 32 #include "gc/g1/heapRegionSet.inline.hpp" 33 #include "gc/shared/taskqueue.inline.hpp" 34 #include "runtime/orderAccess.hpp" 35 36 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) { 37 switch (dest.value()) { 38 case InCSetState::Young: 39 return &_survivor_evac_stats; 40 case InCSetState::Old: 41 return &_old_evac_stats; 42 default: 43 ShouldNotReachHere(); 44 return NULL; // Keep some compilers happy 45 } 46 } 47 48 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) { 49 size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers()); 50 // Prevent humongous PLAB sizes for two reasons: 51 // * PLABs are allocated using a similar paths as oops, but should 52 // never be in a humongous region 53 // * Allowing humongous PLABs needlessly churns the region free lists 54 return MIN2(_humongous_object_threshold_in_words, gclab_word_size); 55 } 56 57 // Inline functions for G1CollectedHeap 58 59 // Return the region with the given index. It assumes the index is valid. 60 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); } 61 62 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid. 63 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm.at_or_null(index); } 64 65 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const { 66 return _hrm.next_region_in_humongous(hr); 67 } 68 69 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const { 70 assert(is_in_reserved(addr), 71 "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")", 72 p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())); 73 return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes); 74 } 75 76 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const { 77 return _hrm.reserved().start() + index * HeapRegion::GrainWords; 78 } 79 80 template <class T> 81 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { 82 assert(addr != NULL, "invariant"); 83 assert(is_in_g1_reserved((const void*) addr), 84 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", 85 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); 86 return _hrm.addr_to_region((HeapWord*) addr); 87 } 88 89 template <class T> 90 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const { 91 assert(addr != NULL, "invariant"); 92 assert(is_in_g1_reserved((const void*) addr), 93 "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")", 94 p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())); 95 uint const region_idx = addr_to_region(addr); 96 return region_at_or_null(region_idx); 97 } 98 99 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) { 100 _old_set.add(hr); 101 } 102 103 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 104 _old_set.remove(hr); 105 } 106 107 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) { 108 _archive_set.add(hr); 109 } 110 111 // It dirties the cards that cover the block so that the post 112 // write barrier never queues anything when updating objects on this 113 // block. It is assumed (and in fact we assert) that the block 114 // belongs to a young region. 115 inline void 116 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 117 assert_heap_not_locked(); 118 119 // Assign the containing region to containing_hr so that we don't 120 // have to keep calling heap_region_containing() in the 121 // asserts below. 122 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);) 123 assert(word_size > 0, "pre-condition"); 124 assert(containing_hr->is_in(start), "it should contain start"); 125 assert(containing_hr->is_young(), "it should be young"); 126 assert(!containing_hr->is_humongous(), "it should not be humongous"); 127 128 HeapWord* end = start + word_size; 129 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 130 131 MemRegion mr(start, end); 132 card_table()->g1_mark_as_young(mr); 133 } 134 135 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const { 136 return _task_queues->queue(i); 137 } 138 139 inline bool G1CollectedHeap::is_marked_next(oop obj) const { 140 return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj); 141 } 142 143 inline bool G1CollectedHeap::is_in_cset(oop obj) { 144 return is_in_cset((HeapWord*)obj); 145 } 146 147 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) { 148 return _in_cset_fast_test.is_in_cset(addr); 149 } 150 151 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) { 152 return _in_cset_fast_test.is_in_cset(hr); 153 } 154 155 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) { 156 return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj); 157 } 158 159 InCSetState G1CollectedHeap::in_cset_state(const oop obj) { 160 return _in_cset_fast_test.at((HeapWord*)obj); 161 } 162 163 void G1CollectedHeap::register_humongous_region_with_cset(uint index) { 164 _in_cset_fast_test.set_humongous(index); 165 } 166 167 #ifndef PRODUCT 168 // Support for G1EvacuationFailureALot 169 170 inline bool 171 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc, 172 bool during_initial_mark, 173 bool mark_or_rebuild_in_progress) { 174 bool res = false; 175 if (mark_or_rebuild_in_progress) { 176 res |= G1EvacuationFailureALotDuringConcMark; 177 } 178 if (during_initial_mark) { 179 res |= G1EvacuationFailureALotDuringInitialMark; 180 } 181 if (for_young_gc) { 182 res |= G1EvacuationFailureALotDuringYoungGC; 183 } else { 184 // GCs are mixed 185 res |= G1EvacuationFailureALotDuringMixedGC; 186 } 187 return res; 188 } 189 190 inline void 191 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { 192 if (G1EvacuationFailureALot) { 193 // Note we can't assert that _evacuation_failure_alot_for_current_gc 194 // is clear here. It may have been set during a previous GC but that GC 195 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to 196 // trigger an evacuation failure and clear the flags and and counts. 197 198 // Check if we have gone over the interval. 199 const size_t gc_num = total_collections(); 200 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; 201 202 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); 203 204 // Now check if G1EvacuationFailureALot is enabled for the current GC type. 205 const bool in_young_only_phase = collector_state()->in_young_only_phase(); 206 const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc(); 207 const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress(); 208 209 _evacuation_failure_alot_for_current_gc &= 210 evacuation_failure_alot_for_gc_type(in_young_only_phase, 211 in_initial_mark_gc, 212 mark_or_rebuild_in_progress); 213 } 214 } 215 216 inline bool G1CollectedHeap::evacuation_should_fail() { 217 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 218 return false; 219 } 220 // G1EvacuationFailureALot is in effect for current GC 221 // Access to _evacuation_failure_alot_count is not atomic; 222 // the value does not have to be exact. 223 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { 224 return false; 225 } 226 _evacuation_failure_alot_count = 0; 227 return true; 228 } 229 230 inline void G1CollectedHeap::reset_evacuation_should_fail() { 231 if (G1EvacuationFailureALot) { 232 _evacuation_failure_alot_gc_number = total_collections(); 233 _evacuation_failure_alot_count = 0; 234 _evacuation_failure_alot_for_current_gc = false; 235 } 236 } 237 #endif // #ifndef PRODUCT 238 239 inline bool G1CollectedHeap::is_in_young(const oop obj) { 240 if (obj == NULL) { 241 return false; 242 } 243 return heap_region_containing(obj)->is_young(); 244 } 245 246 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { 247 if (obj == NULL) { 248 return false; 249 } 250 return is_obj_dead(obj, heap_region_containing(obj)); 251 } 252 253 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { 254 if (obj == NULL) { 255 return false; 256 } 257 return is_obj_ill(obj, heap_region_containing(obj)); 258 } 259 260 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const { 261 return !is_marked_next(obj) && !hr->is_archive(); 262 } 263 264 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const { 265 return is_obj_dead_full(obj, heap_region_containing(obj)); 266 } 267 268 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) { 269 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); 270 _humongous_reclaim_candidates.set_candidate(region, value); 271 } 272 273 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) { 274 assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object"); 275 return _humongous_reclaim_candidates.is_candidate(region); 276 } 277 278 inline void G1CollectedHeap::set_humongous_is_live(oop obj) { 279 uint region = addr_to_region((HeapWord*)obj); 280 // Clear the flag in the humongous_reclaim_candidates table. Also 281 // reset the entry in the _in_cset_fast_test table so that subsequent references 282 // to the same humongous object do not go into the slow path again. 283 // This is racy, as multiple threads may at the same time enter here, but this 284 // is benign. 285 // During collection we only ever clear the "candidate" flag, and only ever clear the 286 // entry in the in_cset_fast_table. 287 // We only ever evaluate the contents of these tables (in the VM thread) after 288 // having synchronized the worker threads with the VM thread, or in the same 289 // thread (i.e. within the VM thread). 290 if (is_humongous_reclaim_candidate(region)) { 291 set_humongous_reclaim_candidate(region, false); 292 _in_cset_fast_test.clear_humongous(region); 293 } 294 } 295 296 #endif // SHARE_VM_GC_G1_G1COLLECTEDHEAP_INLINE_HPP