1 /* 2 * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP 27 28 #include "gc_implementation/g1/concurrentMark.hpp" 29 #include "gc_implementation/g1/g1CollectedHeap.hpp" 30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp" 31 #include "gc_implementation/g1/g1CollectorPolicy.hpp" 32 #include "gc_implementation/g1/g1RemSet.inline.hpp" 33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" 34 #include "gc_implementation/g1/heapRegionSet.inline.hpp" 35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp" 36 #include "utilities/taskqueue.hpp" 37 38 // Inline functions for G1CollectedHeap 39 40 // Return the region with the given index. It assumes the index is valid. 41 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); } 42 43 template <class T> 44 inline HeapRegion* 45 G1CollectedHeap::heap_region_containing_raw(const T addr) const { 46 assert(addr != NULL, "invariant"); 47 assert(_g1_reserved.contains((const void*) addr), 48 err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")", 49 p2i((void*)addr), p2i(_g1_reserved.start()), p2i(_g1_reserved.end()))); 50 return _hrs.addr_to_region((HeapWord*) addr); 51 } 52 53 template <class T> 54 inline HeapRegion* 55 G1CollectedHeap::heap_region_containing(const T addr) const { 56 HeapRegion* hr = heap_region_containing_raw(addr); 57 if (hr->continuesHumongous()) { 58 return hr->humongous_start_region(); 59 } 60 return hr; 61 } 62 63 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) { 64 _old_set.remove(hr); 65 } 66 67 inline bool G1CollectedHeap::obj_in_cs(oop obj) { 68 HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj); 69 return r != NULL && r->in_collection_set(); 70 } 71 72 inline HeapWord* 73 G1CollectedHeap::attempt_allocation(size_t word_size, 74 unsigned int* gc_count_before_ret, 75 int* gclocker_retry_count_ret) { 76 assert_heap_not_locked_and_not_at_safepoint(); 77 assert(!isHumongous(word_size), "attempt_allocation() should not " 78 "be called for humongous allocation requests"); 79 80 HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size, 81 false /* bot_updates */); 82 if (result == NULL) { 83 result = attempt_allocation_slow(word_size, 84 gc_count_before_ret, 85 gclocker_retry_count_ret); 86 } 87 assert_heap_not_locked(); 88 if (result != NULL) { 89 dirty_young_block(result, word_size); 90 } 91 return result; 92 } 93 94 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t 95 word_size) { 96 assert(!isHumongous(word_size), 97 "we should not be seeing humongous-size allocations in this path"); 98 99 HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size, 100 false /* bot_updates */); 101 if (result == NULL) { 102 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 103 result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size, 104 false /* bot_updates */); 105 } 106 if (result != NULL) { 107 dirty_young_block(result, word_size); 108 } 109 return result; 110 } 111 112 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) { 113 assert(!isHumongous(word_size), 114 "we should not be seeing humongous-size allocations in this path"); 115 116 HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size, 117 true /* bot_updates */); 118 if (result == NULL) { 119 MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag); 120 result = _old_gc_alloc_region.attempt_allocation_locked(word_size, 121 true /* bot_updates */); 122 } 123 return result; 124 } 125 126 // It dirties the cards that cover the block so that so that the post 127 // write barrier never queues anything when updating objects on this 128 // block. It is assumed (and in fact we assert) that the block 129 // belongs to a young region. 130 inline void 131 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) { 132 assert_heap_not_locked(); 133 134 // Assign the containing region to containing_hr so that we don't 135 // have to keep calling heap_region_containing_raw() in the 136 // asserts below. 137 DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);) 138 assert(word_size > 0, "pre-condition"); 139 assert(containing_hr->is_in(start), "it should contain start"); 140 assert(containing_hr->is_young(), "it should be young"); 141 assert(!containing_hr->isHumongous(), "it should not be humongous"); 142 143 HeapWord* end = start + word_size; 144 assert(containing_hr->is_in(end - 1), "it should also contain end - 1"); 145 146 MemRegion mr(start, end); 147 g1_barrier_set()->g1_mark_as_young(mr); 148 } 149 150 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const { 151 return _task_queues->queue(i); 152 } 153 154 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const { 155 return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj); 156 } 157 158 inline bool G1CollectedHeap::isMarkedNext(oop obj) const { 159 return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj); 160 } 161 162 163 // This is a fast test on whether a reference points into the 164 // collection set or not. Assume that the reference 165 // points into the heap. 166 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) { 167 bool ret = _in_cset_fast_test.get_by_address((HeapWord*)obj); 168 // let's make sure the result is consistent with what the slower 169 // test returns 170 assert( ret || !obj_in_cs(obj), "sanity"); 171 assert(!ret || obj_in_cs(obj), "sanity"); 172 return ret; 173 } 174 175 #ifndef PRODUCT 176 // Support for G1EvacuationFailureALot 177 178 inline bool 179 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young, 180 bool during_initial_mark, 181 bool during_marking) { 182 bool res = false; 183 if (during_marking) { 184 res |= G1EvacuationFailureALotDuringConcMark; 185 } 186 if (during_initial_mark) { 187 res |= G1EvacuationFailureALotDuringInitialMark; 188 } 189 if (gcs_are_young) { 190 res |= G1EvacuationFailureALotDuringYoungGC; 191 } else { 192 // GCs are mixed 193 res |= G1EvacuationFailureALotDuringMixedGC; 194 } 195 return res; 196 } 197 198 inline void 199 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() { 200 if (G1EvacuationFailureALot) { 201 // Note we can't assert that _evacuation_failure_alot_for_current_gc 202 // is clear here. It may have been set during a previous GC but that GC 203 // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to 204 // trigger an evacuation failure and clear the flags and and counts. 205 206 // Check if we have gone over the interval. 207 const size_t gc_num = total_collections(); 208 const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number; 209 210 _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval); 211 212 // Now check if G1EvacuationFailureALot is enabled for the current GC type. 213 const bool gcs_are_young = g1_policy()->gcs_are_young(); 214 const bool during_im = g1_policy()->during_initial_mark_pause(); 215 const bool during_marking = mark_in_progress(); 216 217 _evacuation_failure_alot_for_current_gc &= 218 evacuation_failure_alot_for_gc_type(gcs_are_young, 219 during_im, 220 during_marking); 221 } 222 } 223 224 inline bool 225 G1CollectedHeap::evacuation_should_fail() { 226 if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) { 227 return false; 228 } 229 // G1EvacuationFailureALot is in effect for current GC 230 // Access to _evacuation_failure_alot_count is not atomic; 231 // the value does not have to be exact. 232 if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) { 233 return false; 234 } 235 _evacuation_failure_alot_count = 0; 236 return true; 237 } 238 239 inline void G1CollectedHeap::reset_evacuation_should_fail() { 240 if (G1EvacuationFailureALot) { 241 _evacuation_failure_alot_gc_number = total_collections(); 242 _evacuation_failure_alot_count = 0; 243 _evacuation_failure_alot_for_current_gc = false; 244 } 245 } 246 #endif // #ifndef PRODUCT 247 248 inline bool G1CollectedHeap::is_in_young(const oop obj) { 249 if (obj == NULL) { 250 return false; 251 } 252 return heap_region_containing(obj)->is_young(); 253 } 254 255 // We don't need barriers for initializing stores to objects 256 // in the young gen: for the SATB pre-barrier, there is no 257 // pre-value that needs to be remembered; for the remembered-set 258 // update logging post-barrier, we don't maintain remembered set 259 // information for young gen objects. 260 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) { 261 return is_in_young(new_obj); 262 } 263 264 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const { 265 if (obj == NULL) { 266 return false; 267 } 268 return is_obj_dead(obj, heap_region_containing(obj)); 269 } 270 271 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const { 272 if (obj == NULL) { 273 return false; 274 } 275 return is_obj_ill(obj, heap_region_containing(obj)); 276 } 277 278 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) { 279 if (!from->is_survivor()) { 280 _g1_rem->par_write_ref(from, p, tid); 281 } 282 } 283 284 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) { 285 if (G1DeferredRSUpdate) { 286 deferred_rs_update(from, p, tid); 287 } else { 288 immediate_rs_update(from, p, tid); 289 } 290 } 291 292 293 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) { 294 assert(has_partial_array_mask(p), "invariant"); 295 oop from_obj = clear_partial_array_mask(p); 296 297 assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap."); 298 assert(from_obj->is_objArray(), "must be obj array"); 299 objArrayOop from_obj_array = objArrayOop(from_obj); 300 // The from-space object contains the real length. 301 int length = from_obj_array->length(); 302 303 assert(from_obj->is_forwarded(), "must be forwarded"); 304 oop to_obj = from_obj->forwardee(); 305 assert(from_obj != to_obj, "should not be chunking self-forwarded objects"); 306 objArrayOop to_obj_array = objArrayOop(to_obj); 307 // We keep track of the next start index in the length field of the 308 // to-space object. 309 int next_index = to_obj_array->length(); 310 assert(0 <= next_index && next_index < length, 311 err_msg("invariant, next index: %d, length: %d", next_index, length)); 312 313 int start = next_index; 314 int end = length; 315 int remainder = end - start; 316 // We'll try not to push a range that's smaller than ParGCArrayScanChunk. 317 if (remainder > 2 * ParGCArrayScanChunk) { 318 end = start + ParGCArrayScanChunk; 319 to_obj_array->set_length(end); 320 // Push the remainder before we process the range in case another 321 // worker has run out of things to do and can steal it. 322 oop* from_obj_p = set_partial_array_mask(from_obj); 323 push_on_queue(from_obj_p); 324 } else { 325 assert(length == end, "sanity"); 326 // We'll process the final range for this object. Restore the length 327 // so that the heap remains parsable in case of evacuation failure. 328 to_obj_array->set_length(end); 329 } 330 _scanner.set_region(_g1h->heap_region_containing_raw(to_obj)); 331 // Process indexes [start,end). It will also process the header 332 // along with the first chunk (i.e., the chunk with start == 0). 333 // Note that at this point the length field of to_obj_array is not 334 // correct given that we are using it to keep track of the next 335 // start index. oop_iterate_range() (thankfully!) ignores the length 336 // field and only relies on the start / end parameters. It does 337 // however return the size of the object which will be incorrect. So 338 // we have to ignore it even if we wanted to use it. 339 to_obj_array->oop_iterate_range(&_scanner, start, end); 340 } 341 342 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) { 343 if (!has_partial_array_mask(ref_to_scan)) { 344 // Note: we can use "raw" versions of "region_containing" because 345 // "obj_to_scan" is definitely in the heap, and is not in a 346 // humongous region. 347 HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan); 348 do_oop_evac(ref_to_scan, r); 349 } else { 350 do_oop_partial_array((oop*)ref_to_scan); 351 } 352 } 353 354 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) { 355 assert(verify_task(ref), "sanity"); 356 if (ref.is_narrow()) { 357 deal_with_reference((narrowOop*)ref); 358 } else { 359 deal_with_reference((oop*)ref); 360 } 361 } 362 363 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP