1 /*
   2  * Copyright (c) 2001, 2020, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/heapRegionManager.inline.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/g1/heapRegionSet.inline.hpp"
  36 #include "gc/shared/markBitMap.inline.hpp"
  37 #include "gc/shared/taskqueue.inline.hpp"
  38 
  39 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
  40   return _policy->phase_times();
  41 }
  42 
  43 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
  44   switch (dest.type()) {
  45     case G1HeapRegionAttr::Young:
  46       return &_survivor_evac_stats;
  47     case G1HeapRegionAttr::Old:
  48       return &_old_evac_stats;
  49     default:
  50       ShouldNotReachHere();
  51       return NULL; // Keep some compilers happy
  52   }
  53 }
  54 
  55 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
  56   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
  57   // Prevent humongous PLAB sizes for two reasons:
  58   // * PLABs are allocated using a similar paths as oops, but should
  59   //   never be in a humongous region
  60   // * Allowing humongous PLABs needlessly churns the region free lists
  61   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  62 }
  63 
  64 // Inline functions for G1CollectedHeap
  65 
  66 // Return the region with the given index. It assumes the index is valid.
  67 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
  68 
  69 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
  70 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
  71 
  72 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
  73   return _hrm->next_region_in_humongous(hr);
  74 }
  75 
  76 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
  77   assert(is_in_reserved(addr),
  78          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
  79          p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
  80   return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
  81 }
  82 
  83 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
  84   return _hrm->reserved().start() + index * HeapRegion::GrainWords;
  85 }
  86 
  87 template <class T>
  88 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
  89   assert(addr != NULL, "invariant");
  90   assert(is_in_g1_reserved((const void*) addr),
  91          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
  92          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
  93   return _hrm->addr_to_region((HeapWord*)(void*) addr);
  94 }
  95 
  96 template <class T>
  97 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
  98   assert(addr != NULL, "invariant");
  99   assert(is_in_g1_reserved((const void*) addr),
 100          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
 101          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
 102   uint const region_idx = addr_to_region(addr);
 103   return region_at_or_null(region_idx);
 104 }
 105 
 106 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
 107   _old_set.add(hr);
 108 }
 109 
 110 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 111   _old_set.remove(hr);
 112 }
 113 
 114 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
 115   _archive_set.add(hr);
 116 }
 117 
 118 // It dirties the cards that cover the block so that the post
 119 // write barrier never queues anything when updating objects on this
 120 // block. It is assumed (and in fact we assert) that the block
 121 // belongs to a young region.
 122 inline void
 123 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 124   assert_heap_not_locked();
 125 
 126   // Assign the containing region to containing_hr so that we don't
 127   // have to keep calling heap_region_containing() in the
 128   // asserts below.
 129   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
 130   assert(word_size > 0, "pre-condition");
 131   assert(containing_hr->is_in(start), "it should contain start");
 132   assert(containing_hr->is_young(), "it should be young");
 133   assert(!containing_hr->is_humongous(), "it should not be humongous");
 134 
 135   HeapWord* end = start + word_size;
 136   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 137 
 138   MemRegion mr(start, end);
 139   card_table()->g1_mark_as_young(mr);
 140 }
 141 
 142 inline ScannerTasksQueue* G1CollectedHeap::task_queue(uint i) const {
 143   return _task_queues->queue(i);
 144 }
 145 
 146 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
 147   return _cm->next_mark_bitmap()->is_marked(obj);
 148 }
 149 
 150 inline bool G1CollectedHeap::is_in_cset(oop obj) {
 151   return is_in_cset(cast_from_oop<HeapWord*>(obj));
 152 }
 153 
 154 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
 155   return _region_attr.is_in_cset(addr);
 156 }
 157 
 158 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
 159   return _region_attr.is_in_cset(hr);
 160 }
 161 
 162 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
 163   return _region_attr.is_in_cset_or_humongous(cast_from_oop<HeapWord*>(obj));
 164 }
 165 
 166 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
 167   return _region_attr.at((HeapWord*)addr);
 168 }
 169 
 170 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
 171   return _region_attr.get_by_index(idx);
 172 }
 173 
 174 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
 175   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
 176 }
 177 
 178 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
 179   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
 180 }
 181 
 182 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
 183   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
 184   _rem_set->exclude_region_from_scan(r->hrm_index());
 185 }
 186 
 187 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
 188   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
 189 }
 190 
 191 #ifndef PRODUCT
 192 // Support for G1EvacuationFailureALot
 193 
 194 inline bool
 195 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
 196                                                      bool during_initial_mark,
 197                                                      bool mark_or_rebuild_in_progress) {
 198   bool res = false;
 199   if (mark_or_rebuild_in_progress) {
 200     res |= G1EvacuationFailureALotDuringConcMark;
 201   }
 202   if (during_initial_mark) {
 203     res |= G1EvacuationFailureALotDuringInitialMark;
 204   }
 205   if (for_young_gc) {
 206     res |= G1EvacuationFailureALotDuringYoungGC;
 207   } else {
 208     // GCs are mixed
 209     res |= G1EvacuationFailureALotDuringMixedGC;
 210   }
 211   return res;
 212 }
 213 
 214 inline void
 215 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 216   if (G1EvacuationFailureALot) {
 217     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 218     // is clear here. It may have been set during a previous GC but that GC
 219     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 220     // trigger an evacuation failure and clear the flags and and counts.
 221 
 222     // Check if we have gone over the interval.
 223     const size_t gc_num = total_collections();
 224     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 225 
 226     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 227 
 228     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 229     const bool in_young_only_phase = collector_state()->in_young_only_phase();
 230     const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
 231     const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
 232 
 233     _evacuation_failure_alot_for_current_gc &=
 234       evacuation_failure_alot_for_gc_type(in_young_only_phase,
 235                                           in_initial_mark_gc,
 236                                           mark_or_rebuild_in_progress);
 237   }
 238 }
 239 
 240 inline bool G1CollectedHeap::evacuation_should_fail() {
 241   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 242     return false;
 243   }
 244   // G1EvacuationFailureALot is in effect for current GC
 245   // Access to _evacuation_failure_alot_count is not atomic;
 246   // the value does not have to be exact.
 247   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 248     return false;
 249   }
 250   _evacuation_failure_alot_count = 0;
 251   return true;
 252 }
 253 
 254 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 255   if (G1EvacuationFailureALot) {
 256     _evacuation_failure_alot_gc_number = total_collections();
 257     _evacuation_failure_alot_count = 0;
 258     _evacuation_failure_alot_for_current_gc = false;
 259   }
 260 }
 261 #endif  // #ifndef PRODUCT
 262 
 263 inline bool G1CollectedHeap::is_in_young(const oop obj) {
 264   if (obj == NULL) {
 265     return false;
 266   }
 267   return heap_region_containing(obj)->is_young();
 268 }
 269 
 270 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
 271   if (obj == NULL) {
 272     return false;
 273   }
 274   return is_obj_dead(obj, heap_region_containing(obj));
 275 }
 276 
 277 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
 278   if (obj == NULL) {
 279     return false;
 280   }
 281   return is_obj_ill(obj, heap_region_containing(obj));
 282 }
 283 
 284 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
 285    return !is_marked_next(obj) && !hr->is_archive();
 286 }
 287 
 288 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
 289     return is_obj_dead_full(obj, heap_region_containing(obj));
 290 }
 291 
 292 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
 293   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 294   _humongous_reclaim_candidates.set_candidate(region, value);
 295 }
 296 
 297 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
 298   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 299   return _humongous_reclaim_candidates.is_candidate(region);
 300 }
 301 
 302 inline void G1CollectedHeap::set_has_humongous_reclaim_candidate(bool value) {
 303   _has_humongous_reclaim_candidates = value;
 304 }
 305 
 306 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
 307   uint region = addr_to_region(cast_from_oop<HeapWord*>(obj));
 308   // Clear the flag in the humongous_reclaim_candidates table.  Also
 309   // reset the entry in the region attribute table so that subsequent references
 310   // to the same humongous object do not go into the slow path again.
 311   // This is racy, as multiple threads may at the same time enter here, but this
 312   // is benign.
 313   // During collection we only ever clear the "candidate" flag, and only ever clear the
 314   // entry in the in_cset_fast_table.
 315   // We only ever evaluate the contents of these tables (in the VM thread) after
 316   // having synchronized the worker threads with the VM thread, or in the same
 317   // thread (i.e. within the VM thread).
 318   if (is_humongous_reclaim_candidate(region)) {
 319     set_humongous_reclaim_candidate(region, false);
 320     _region_attr.clear_humongous(region);
 321   }
 322 }
 323 
 324 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP