1 /*
   2  * Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  33 #include "gc_implementation/g1/heapRegionManager.inline.hpp"
  34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  35 #include "runtime/orderAccess.inline.hpp"
  36 #include "utilities/taskqueue.hpp"
  37 
  38 PLABStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
  39   switch (dest.value()) {
  40     case InCSetState::Young:
  41       return &_survivor_plab_stats;
  42     case InCSetState::Old:
  43       return &_old_plab_stats;
  44     default:
  45       ShouldNotReachHere();
  46       return NULL; // Keep some compilers happy
  47   }
  48 }
  49 
  50 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
  51   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz();
  52   // Prevent humongous PLAB sizes for two reasons:
  53   // * PLABs are allocated using a similar paths as oops, but should
  54   //   never be in a humongous region
  55   // * Allowing humongous PLABs needlessly churns the region free lists
  56   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  57 }
  58 
  59 HeapWord* G1CollectedHeap::par_allocate_during_gc(InCSetState dest,
  60                                                   size_t word_size,
  61                                                   AllocationContext_t context) {
  62   switch (dest.value()) {
  63     case InCSetState::Young:
  64       return survivor_attempt_allocation(word_size, context);
  65     case InCSetState::Old:
  66       return old_attempt_allocation(word_size, context);
  67     default:
  68       ShouldNotReachHere();
  69       return NULL; // Keep some compilers happy
  70   }
  71 }
  72 
  73 // Inline functions for G1CollectedHeap
  74 
  75 inline AllocationContextStats& G1CollectedHeap::allocation_context_stats() {
  76   return _allocation_context_stats;
  77 }
  78 
  79 // Return the region with the given index. It assumes the index is valid.
  80 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm.at(index); }
  81 
  82 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
  83   assert(is_in_reserved(addr),
  84          err_msg("Cannot calculate region index for address "PTR_FORMAT" that is outside of the heap ["PTR_FORMAT", "PTR_FORMAT")",
  85                  p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end())));
  86   return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
  87 }
  88 
  89 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
  90   return _hrm.reserved().start() + index * HeapRegion::GrainWords;
  91 }
  92 
  93 template <class T>
  94 inline HeapRegion* G1CollectedHeap::heap_region_containing_raw(const T addr) const {
  95   assert(addr != NULL, "invariant");
  96   assert(is_in_g1_reserved((const void*) addr),
  97       err_msg("Address "PTR_FORMAT" is outside of the heap ranging from ["PTR_FORMAT" to "PTR_FORMAT")",
  98           p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end())));
  99   return _hrm.addr_to_region((HeapWord*) addr);
 100 }
 101 
 102 template <class T>
 103 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
 104   HeapRegion* hr = heap_region_containing_raw(addr);
 105   if (hr->is_continues_humongous()) {
 106     return hr->humongous_start_region();
 107   }
 108   return hr;
 109 }
 110 
 111 inline void G1CollectedHeap::reset_gc_time_stamp() {
 112   _gc_time_stamp = 0;
 113   OrderAccess::fence();
 114   // Clear the cached CSet starting regions and time stamps.
 115   // Their validity is dependent on the GC timestamp.
 116   clear_cset_start_regions();
 117 }
 118 
 119 inline void G1CollectedHeap::increment_gc_time_stamp() {
 120   ++_gc_time_stamp;
 121   OrderAccess::fence();
 122 }
 123 
 124 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 125   _old_set.remove(hr);
 126 }
 127 
 128 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
 129   HeapRegion* r = _hrm.addr_to_region((HeapWord*) obj);
 130   return r != NULL && r->in_collection_set();
 131 }
 132 
 133 inline HeapWord* G1CollectedHeap::attempt_allocation(size_t word_size,
 134                                                      uint* gc_count_before_ret,
 135                                                      uint* gclocker_retry_count_ret) {
 136   assert_heap_not_locked_and_not_at_safepoint();
 137   assert(!is_humongous(word_size), "attempt_allocation() should not "
 138          "be called for humongous allocation requests");
 139 
 140   AllocationContext_t context = AllocationContext::current();
 141   HeapWord* result = _allocator->mutator_alloc_region(context)->attempt_allocation(word_size,
 142                                                                                    false /* bot_updates */);
 143   if (result == NULL) {
 144     result = attempt_allocation_slow(word_size,
 145                                      context,
 146                                      gc_count_before_ret,
 147                                      gclocker_retry_count_ret);
 148   }
 149   assert_heap_not_locked();
 150   if (result != NULL) {
 151     dirty_young_block(result, word_size);
 152   }
 153   return result;
 154 }
 155 
 156 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t word_size,
 157                                                               AllocationContext_t context) {
 158   assert(!is_humongous(word_size),
 159          "we should not be seeing humongous-size allocations in this path");
 160 
 161   HeapWord* result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation(word_size,
 162                                                                                        false /* bot_updates */);
 163   if (result == NULL) {
 164     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 165     result = _allocator->survivor_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 166                                                                                       false /* bot_updates */);
 167   }
 168   if (result != NULL) {
 169     dirty_young_block(result, word_size);
 170   }
 171   return result;
 172 }
 173 
 174 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size,
 175                                                          AllocationContext_t context) {
 176   assert(!is_humongous(word_size),
 177          "we should not be seeing humongous-size allocations in this path");
 178 
 179   HeapWord* result = _allocator->old_gc_alloc_region(context)->attempt_allocation(word_size,
 180                                                                                   true /* bot_updates */);
 181   if (result == NULL) {
 182     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 183     result = _allocator->old_gc_alloc_region(context)->attempt_allocation_locked(word_size,
 184                                                                                  true /* bot_updates */);
 185   }
 186   return result;
 187 }
 188 
 189 // It dirties the cards that cover the block so that so that the post
 190 // write barrier never queues anything when updating objects on this
 191 // block. It is assumed (and in fact we assert) that the block
 192 // belongs to a young region.
 193 inline void
 194 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 195   assert_heap_not_locked();
 196 
 197   // Assign the containing region to containing_hr so that we don't
 198   // have to keep calling heap_region_containing_raw() in the
 199   // asserts below.
 200   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
 201   assert(word_size > 0, "pre-condition");
 202   assert(containing_hr->is_in(start), "it should contain start");
 203   assert(containing_hr->is_young(), "it should be young");
 204   assert(!containing_hr->is_humongous(), "it should not be humongous");
 205 
 206   HeapWord* end = start + word_size;
 207   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 208 
 209   MemRegion mr(start, end);
 210   g1_barrier_set()->g1_mark_as_young(mr);
 211 }
 212 
 213 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
 214   return _task_queues->queue(i);
 215 }
 216 
 217 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
 218   return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
 219 }
 220 
 221 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
 222   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 223 }
 224 
 225 // This is a fast test on whether a reference points into the
 226 // collection set or not. Assume that the reference
 227 // points into the heap.
 228 inline bool G1CollectedHeap::is_in_cset(oop obj) {
 229   bool ret = _in_cset_fast_test.is_in_cset((HeapWord*)obj);
 230   // let's make sure the result is consistent with what the slower
 231   // test returns
 232   assert( ret || !obj_in_cs(obj), "sanity");
 233   assert(!ret ||  obj_in_cs(obj), "sanity");
 234   return ret;
 235 }
 236 
 237 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
 238   return _in_cset_fast_test.is_in_cset(hr);
 239 }
 240 
 241 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
 242   return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
 243 }
 244 
 245 InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
 246   return _in_cset_fast_test.at((HeapWord*)obj);
 247 }
 248 
 249 void G1CollectedHeap::register_humongous_region_with_cset(uint index) {
 250   _in_cset_fast_test.set_humongous(index);
 251 }
 252 
 253 #ifndef PRODUCT
 254 // Support for G1EvacuationFailureALot
 255 
 256 inline bool
 257 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
 258                                                      bool during_initial_mark,
 259                                                      bool during_marking) {
 260   bool res = false;
 261   if (during_marking) {
 262     res |= G1EvacuationFailureALotDuringConcMark;
 263   }
 264   if (during_initial_mark) {
 265     res |= G1EvacuationFailureALotDuringInitialMark;
 266   }
 267   if (gcs_are_young) {
 268     res |= G1EvacuationFailureALotDuringYoungGC;
 269   } else {
 270     // GCs are mixed
 271     res |= G1EvacuationFailureALotDuringMixedGC;
 272   }
 273   return res;
 274 }
 275 
 276 inline void
 277 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 278   if (G1EvacuationFailureALot) {
 279     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 280     // is clear here. It may have been set during a previous GC but that GC
 281     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 282     // trigger an evacuation failure and clear the flags and and counts.
 283 
 284     // Check if we have gone over the interval.
 285     const size_t gc_num = total_collections();
 286     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 287 
 288     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 289 
 290     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 291     const bool gcs_are_young = g1_policy()->gcs_are_young();
 292     const bool during_im = g1_policy()->during_initial_mark_pause();
 293     const bool during_marking = mark_in_progress();
 294 
 295     _evacuation_failure_alot_for_current_gc &=
 296       evacuation_failure_alot_for_gc_type(gcs_are_young,
 297                                           during_im,
 298                                           during_marking);
 299   }
 300 }
 301 
 302 inline bool G1CollectedHeap::evacuation_should_fail() {
 303   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 304     return false;
 305   }
 306   // G1EvacuationFailureALot is in effect for current GC
 307   // Access to _evacuation_failure_alot_count is not atomic;
 308   // the value does not have to be exact.
 309   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 310     return false;
 311   }
 312   _evacuation_failure_alot_count = 0;
 313   return true;
 314 }
 315 
 316 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 317   if (G1EvacuationFailureALot) {
 318     _evacuation_failure_alot_gc_number = total_collections();
 319     _evacuation_failure_alot_count = 0;
 320     _evacuation_failure_alot_for_current_gc = false;
 321   }
 322 }
 323 #endif  // #ifndef PRODUCT
 324 
 325 inline bool G1CollectedHeap::is_in_young(const oop obj) {
 326   if (obj == NULL) {
 327     return false;
 328   }
 329   return heap_region_containing(obj)->is_young();
 330 }
 331 
 332 // We don't need barriers for initializing stores to objects
 333 // in the young gen: for the SATB pre-barrier, there is no
 334 // pre-value that needs to be remembered; for the remembered-set
 335 // update logging post-barrier, we don't maintain remembered set
 336 // information for young gen objects.
 337 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 338   return is_in_young(new_obj);
 339 }
 340 
 341 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
 342   if (obj == NULL) {
 343     return false;
 344   }
 345   return is_obj_dead(obj, heap_region_containing(obj));
 346 }
 347 
 348 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
 349   if (obj == NULL) {
 350     return false;
 351   }
 352   return is_obj_ill(obj, heap_region_containing(obj));
 353 }
 354 
 355 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
 356   assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
 357   _humongous_reclaim_candidates.set_candidate(region, value);
 358 }
 359 
 360 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
 361   assert(_hrm.at(region)->is_starts_humongous(), "Must start a humongous object");
 362   return _humongous_reclaim_candidates.is_candidate(region);
 363 }
 364 
 365 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
 366   uint region = addr_to_region((HeapWord*)obj);
 367   // Clear the flag in the humongous_reclaim_candidates table.  Also
 368   // reset the entry in the _in_cset_fast_test table so that subsequent references
 369   // to the same humongous object do not go into the slow path again.
 370   // This is racy, as multiple threads may at the same time enter here, but this
 371   // is benign.
 372   // During collection we only ever clear the "candidate" flag, and only ever clear the
 373   // entry in the in_cset_fast_table.
 374   // We only ever evaluate the contents of these tables (in the VM thread) after
 375   // having synchronized the worker threads with the VM thread, or in the same
 376   // thread (i.e. within the VM thread).
 377   if (is_humongous_reclaim_candidate(region)) {
 378     set_humongous_reclaim_candidate(region, false);
 379     _in_cset_fast_test.clear_humongous(region);
 380   }
 381 }
 382 
 383 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP