1 /* 2 * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc_implementation/g1/concurrentMark.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.hpp" 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 33 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 35 #include "runtime/orderAccess.inline.hpp" 36 #include "utilities/taskqueue.hpp" 37 38 // Inline functions for G1CollectedHeap 39 40 // Return the region with the given index. It assumes the index is valid. 41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } 42 43 template <class T> 44 inline HeapRegion* 45 G1CollectedHeap::heap_region_containing(const T addr) const { 46 HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); 47 // hr can be null if addr in perm_gen 48 if (hr != NULL && hr->continuesHumongous()) { 49 hr = hr->humongous_start_region(); 50 } 51 return hr; 52 } 53 54 template <class T> 55 inline HeapRegion* 56 G1CollectedHeap::heap_region_containing_raw(const T addr) const { 57 assert(_g1_reserved.contains((const void*) addr), "invariant"); 58 HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr); 59 return res; 60 } 61 62 inline void G1CollectedHeap::reset_gc_time_stamp() { 63 _gc_time_stamp = 0; 64 OrderAccess::fence(); 65 // Clear the cached CSet starting regions and time stamps. 66 // Their validity is dependent on the GC timestamp. 67 clear_cset_start_regions(); 68 } 69 70 inline void G1CollectedHeap::increment_gc_time_stamp() { 71 ++_gc_time_stamp; 72 OrderAccess::fence(); 73 } 74 75 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 76 _old_set.remove(hr); 77 } 78 79 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 80 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); 81 return r != NULL && r->in_collection_set(); 82 } 83 84 inline HeapWord* 85 G1CollectedHeap::attempt_allocation(size_t word_size, 86 unsigned int* gc_count_before_ret, 87 int* gclocker_retry_count_ret) { 88 assert_heap_not_locked_and_not_at_safepoint(); 89 assert(!isHumongous(word_size), "attempt_allocation() should not " 90 "be called for humongous allocation requests"); 91 92 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, 93 false /* bot_updates */); 94 if (result == NULL) { 95 result = attempt_allocation_slow(word_size, 96 gc_count_before_ret, 97 gclocker_retry_count_ret); 98 } 99 assert_heap_not_locked(); 100 if (result != NULL) { 101 dirty_young_block(result, word_size); 102 } 103 return result; 104 } 105 106 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t 107 word_size) { 108 assert(!isHumongous(word_size), 109 "we should not be seeing humongous-size allocations in this path"); 110 111 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, 112 false /* bot_updates */); 113 if (result == NULL) { 114 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 115 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, 116 false /* bot_updates */); 117 } 118 if (result != NULL) { 119 dirty_young_block(result, word_size); 120 } 121 return result; 122 } 123 124 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { 125 assert(!isHumongous(word_size), 126 "we should not be seeing humongous-size allocations in this path"); 127 128 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, 129 true /* bot_updates */); 130 if (result == NULL) { 131 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 132 result = _old_gc_alloc_region.attempt_allocation_locked(word_size, 133 true /* bot_updates */); 134 } 135 return result; 136 } 137 138 // It dirties the cards that cover the block so that so that the post 139 // write barrier never queues anything when updating objects on this 140 // block. It is assumed (and in fact we assert) that the block 141 // belongs to a young region. 142 inline void 143 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 144 assert_heap_not_locked(); 145 146 // Assign the containing region to containing_hr so that we don't 147 // have to keep calling heap_region_containing_raw() in the 148 // asserts below. 149 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) 150 assert(containing_hr != NULL && start != NULL && word_size > 0, 151 "pre-condition"); 152 assert(containing_hr->is_in(start), "it should contain start"); 153 assert(containing_hr->is_young(), "it should be young"); 154 assert(!containing_hr->isHumongous(), "it should not be humongous"); 155 156 HeapWord* end = start + word_size; 157 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 158 159 MemRegion mr(start, end); 160 g1_barrier_set()->g1_mark_as_young(mr); 161 } 162 163 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { 164 return _task_queues->queue(i); 165 } 166 167 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 168 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 169 } 170 171 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { 172 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); 173 } 174 175 176 // This is a fast test on whether a reference points into the 177 // collection set or not. Assume that the reference 178 // points into the heap. 179 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { 180 bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj); 181 // let's make sure the result is consistent with what the slower 182 // test returns 183 assert( ret || !obj_in_cs(obj), "sanity"); 184 assert(!ret || obj_in_cs(obj), "sanity"); 185 return ret; 186 } 187 188 #ifndef PRODUCT 189 // Support for G1EvacuationFailureALot 190 191 inline bool 192 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, 193 bool during_initial_mark, 194 bool during_marking) { 195 bool res = false; 196 if (during_marking) { 197 res |= G1EvacuationFailureALotDuringConcMark; 198 } 199 if (during_initial_mark) { 200 res |= G1EvacuationFailureALotDuringInitialMark; 201 } 202 if (gcs_are_young) { 203 res |= G1EvacuationFailureALotDuringYoungGC; 204 } else { 205 // GCs are mixed 206 res |= G1EvacuationFailureALotDuringMixedGC; 207 } 208 return res; 209 } 210 211 inline void 212 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { 213 if (G1EvacuationFailureALot) { 214 // Note we can't assert that _evacuation_failure_alot_for_current_gc 215 // is clear here. It may have been set during a previous GC but that GC 216 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to 217 // trigger an evacuation failure and clear the flags and and counts. 218 219 // Check if we have gone over the interval. 220 const size_t gc_num = total_collections(); 221 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; 222 223 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); 224 225 // Now check if G1EvacuationFailureALot is enabled for the current GC type. 226 const bool gcs_are_young = g1_policy()->gcs_are_young(); 227 const bool during_im = g1_policy()->during_initial_mark_pause(); 228 const bool during_marking = mark_in_progress(); 229 230 _evacuation_failure_alot_for_current_gc &= 231 evacuation_failure_alot_for_gc_type(gcs_are_young, 232 during_im, 233 during_marking); 234 } 235 } 236 237 inline bool 238 G1CollectedHeap::evacuation_should_fail() { 239 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 240 return false; 241 } 242 // G1EvacuationFailureALot is in effect for current GC 243 // Access to _evacuation_failure_alot_count is not atomic; 244 // the value does not have to be exact. 245 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { 246 return false; 247 } 248 _evacuation_failure_alot_count = 0; 249 return true; 250 } 251 252 inline void G1CollectedHeap::reset_evacuation_should_fail() { 253 if (G1EvacuationFailureALot) { 254 _evacuation_failure_alot_gc_number = total_collections(); 255 _evacuation_failure_alot_count = 0; 256 _evacuation_failure_alot_for_current_gc = false; 257 } 258 } 259 #endif // #ifndef PRODUCT 260 261 inline bool G1CollectedHeap::is_in_young(const oop obj) { 262 HeapRegion* hr = heap_region_containing(obj); 263 return hr != NULL && hr->is_young(); 264 } 265 266 // We don't need barriers for initializing stores to objects 267 // in the young gen: for the SATB pre-barrier, there is no 268 // pre-value that needs to be remembered; for the remembered-set 269 // update logging post-barrier, we don't maintain remembered set 270 // information for young gen objects. 271 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 272 return is_in_young(new_obj); 273 } 274 275 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { 276 const HeapRegion* hr = heap_region_containing(obj); 277 if (hr == NULL) { 278 if (obj == NULL) return false; 279 else return true; 280 } 281 else return is_obj_dead(obj, hr); 282 } 283 284 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { 285 const HeapRegion* hr = heap_region_containing(obj); 286 if (hr == NULL) { 287 if (obj == NULL) return false; 288 else return true; 289 } 290 else return is_obj_ill(obj, hr); 291 } 292 293 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP