1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/heapRegionManager.inline.hpp"
  33 #include "gc/g1/heapRegionSet.inline.hpp"
  34 #include "gc/shared/taskqueue.inline.hpp"
  35 #include "runtime/orderAccess.hpp"
  36 
  37 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
  38   return _policy->phase_times();
  39 }
  40 
  41 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(InCSetState dest) {
  42   switch (dest.value()) {
  43     case InCSetState::Young:
  44       return &_survivor_evac_stats;
  45     case InCSetState::Old:
  46       return &_old_evac_stats;
  47     default:
  48       ShouldNotReachHere();
  49       return NULL; // Keep some compilers happy
  50   }
  51 }
  52 
  53 size_t G1CollectedHeap::desired_plab_sz(InCSetState dest) {
  54   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
  55   // Prevent humongous PLAB sizes for two reasons:
  56   // * PLABs are allocated using a similar paths as oops, but should
  57   //   never be in a humongous region
  58   // * Allowing humongous PLABs needlessly churns the region free lists
  59   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  60 }
  61 
  62 // Inline functions for G1CollectedHeap
  63 
  64 // Return the region with the given index. It assumes the index is valid.
  65 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
  66 
  67 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
  68 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
  69 
  70 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
  71   return _hrm->next_region_in_humongous(hr);
  72 }
  73 
  74 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
  75   assert(is_in_reserved(addr),
  76          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
  77          p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
  78   return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
  79 }
  80 
  81 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
  82   return _hrm->reserved().start() + index * HeapRegion::GrainWords;
  83 }
  84 
  85 template <class T>
  86 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
  87   assert(addr != NULL, "invariant");
  88   assert(is_in_g1_reserved((const void*) addr),
  89          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
  90          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
  91   return _hrm->addr_to_region((HeapWord*) addr);
  92 }
  93 
  94 template <class T>
  95 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
  96   assert(addr != NULL, "invariant");
  97   assert(is_in_g1_reserved((const void*) addr),
  98          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
  99          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
 100   uint const region_idx = addr_to_region(addr);
 101   return region_at_or_null(region_idx);
 102 }
 103 
 104 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
 105   _old_set.add(hr);
 106 }
 107 
 108 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 109   _old_set.remove(hr);
 110 }
 111 
 112 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
 113   _archive_set.add(hr);
 114 }
 115 
 116 // It dirties the cards that cover the block so that the post
 117 // write barrier never queues anything when updating objects on this
 118 // block. It is assumed (and in fact we assert) that the block
 119 // belongs to a young region.
 120 inline void
 121 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 122   assert_heap_not_locked();
 123 
 124   // Assign the containing region to containing_hr so that we don't
 125   // have to keep calling heap_region_containing() in the
 126   // asserts below.
 127   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
 128   assert(word_size > 0, "pre-condition");
 129   assert(containing_hr->is_in(start), "it should contain start");
 130   assert(containing_hr->is_young(), "it should be young");
 131   assert(!containing_hr->is_humongous(), "it should not be humongous");
 132 
 133   HeapWord* end = start + word_size;
 134   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 135 
 136   MemRegion mr(start, end);
 137   card_table()->g1_mark_as_young(mr);
 138 }
 139 
 140 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
 141   return _task_queues->queue(i);
 142 }
 143 
 144 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
 145   return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
 146 }
 147 
 148 inline bool G1CollectedHeap::is_in_cset(oop obj) {
 149   return is_in_cset((HeapWord*)obj);
 150 }
 151 
 152 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
 153   return _in_cset_fast_test.is_in_cset(addr);
 154 }
 155 
 156 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
 157   return _in_cset_fast_test.is_in_cset(hr);
 158 }
 159 
 160 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
 161   return _in_cset_fast_test.is_in_cset_or_humongous((HeapWord*)obj);
 162 }
 163 
 164 InCSetState G1CollectedHeap::in_cset_state(const oop obj) {
 165   return _in_cset_fast_test.at((HeapWord*)obj);
 166 }
 167 
 168 void G1CollectedHeap::register_humongous_region_with_cset(uint index) {
 169   _in_cset_fast_test.set_humongous(index);
 170 }
 171 
 172 #ifndef PRODUCT
 173 // Support for G1EvacuationFailureALot
 174 
 175 inline bool
 176 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
 177                                                      bool during_initial_mark,
 178                                                      bool mark_or_rebuild_in_progress) {
 179   bool res = false;
 180   if (mark_or_rebuild_in_progress) {
 181     res |= G1EvacuationFailureALotDuringConcMark;
 182   }
 183   if (during_initial_mark) {
 184     res |= G1EvacuationFailureALotDuringInitialMark;
 185   }
 186   if (for_young_gc) {
 187     res |= G1EvacuationFailureALotDuringYoungGC;
 188   } else {
 189     // GCs are mixed
 190     res |= G1EvacuationFailureALotDuringMixedGC;
 191   }
 192   return res;
 193 }
 194 
 195 inline void
 196 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 197   if (G1EvacuationFailureALot) {
 198     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 199     // is clear here. It may have been set during a previous GC but that GC
 200     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 201     // trigger an evacuation failure and clear the flags and and counts.
 202 
 203     // Check if we have gone over the interval.
 204     const size_t gc_num = total_collections();
 205     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 206 
 207     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 208 
 209     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 210     const bool in_young_only_phase = collector_state()->in_young_only_phase();
 211     const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
 212     const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
 213 
 214     _evacuation_failure_alot_for_current_gc &=
 215       evacuation_failure_alot_for_gc_type(in_young_only_phase,
 216                                           in_initial_mark_gc,
 217                                           mark_or_rebuild_in_progress);
 218   }
 219 }
 220 
 221 inline bool G1CollectedHeap::evacuation_should_fail() {
 222   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 223     return false;
 224   }
 225   // G1EvacuationFailureALot is in effect for current GC
 226   // Access to _evacuation_failure_alot_count is not atomic;
 227   // the value does not have to be exact.
 228   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 229     return false;
 230   }
 231   _evacuation_failure_alot_count = 0;
 232   return true;
 233 }
 234 
 235 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 236   if (G1EvacuationFailureALot) {
 237     _evacuation_failure_alot_gc_number = total_collections();
 238     _evacuation_failure_alot_count = 0;
 239     _evacuation_failure_alot_for_current_gc = false;
 240   }
 241 }
 242 #endif  // #ifndef PRODUCT
 243 
 244 inline bool G1CollectedHeap::is_in_young(const oop obj) {
 245   if (obj == NULL) {
 246     return false;
 247   }
 248   return heap_region_containing(obj)->is_young();
 249 }
 250 
 251 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
 252   if (obj == NULL) {
 253     return false;
 254   }
 255   return is_obj_dead(obj, heap_region_containing(obj));
 256 }
 257 
 258 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
 259   if (obj == NULL) {
 260     return false;
 261   }
 262   return is_obj_ill(obj, heap_region_containing(obj));
 263 }
 264 
 265 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
 266    return !is_marked_next(obj) && !hr->is_archive();
 267 }
 268 
 269 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
 270     return is_obj_dead_full(obj, heap_region_containing(obj));
 271 }
 272 
 273 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
 274   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 275   _humongous_reclaim_candidates.set_candidate(region, value);
 276 }
 277 
 278 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
 279   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 280   return _humongous_reclaim_candidates.is_candidate(region);
 281 }
 282 
 283 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
 284   uint region = addr_to_region((HeapWord*)obj);
 285   // Clear the flag in the humongous_reclaim_candidates table.  Also
 286   // reset the entry in the _in_cset_fast_test table so that subsequent references
 287   // to the same humongous object do not go into the slow path again.
 288   // This is racy, as multiple threads may at the same time enter here, but this
 289   // is benign.
 290   // During collection we only ever clear the "candidate" flag, and only ever clear the
 291   // entry in the in_cset_fast_table.
 292   // We only ever evaluate the contents of these tables (in the VM thread) after
 293   // having synchronized the worker threads with the VM thread, or in the same
 294   // thread (i.e. within the VM thread).
 295   if (is_humongous_reclaim_candidate(region)) {
 296     set_humongous_reclaim_candidate(region, false);
 297     _in_cset_fast_test.clear_humongous(region);
 298   }
 299 }
 300 
 301 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP