1 /*
   2  * Copyright (c) 2001, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc/g1/g1BarrierSet.hpp"
  29 #include "gc/g1/g1CollectedHeap.hpp"
  30 #include "gc/g1/g1CollectorState.hpp"
  31 #include "gc/g1/g1Policy.hpp"
  32 #include "gc/g1/g1RemSet.hpp"
  33 #include "gc/g1/heapRegionManager.inline.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/g1/heapRegionSet.inline.hpp"
  36 #include "gc/shared/taskqueue.inline.hpp"
  37 
  38 G1GCPhaseTimes* G1CollectedHeap::phase_times() const {
  39   return _policy->phase_times();
  40 }
  41 
  42 G1EvacStats* G1CollectedHeap::alloc_buffer_stats(G1HeapRegionAttr dest) {
  43   switch (dest.type()) {
  44     case G1HeapRegionAttr::Young:
  45       return &_survivor_evac_stats;
  46     case G1HeapRegionAttr::Old:
  47       return &_old_evac_stats;
  48     default:
  49       ShouldNotReachHere();
  50       return NULL; // Keep some compilers happy
  51   }
  52 }
  53 
  54 size_t G1CollectedHeap::desired_plab_sz(G1HeapRegionAttr dest) {
  55   size_t gclab_word_size = alloc_buffer_stats(dest)->desired_plab_sz(workers()->active_workers());
  56   // Prevent humongous PLAB sizes for two reasons:
  57   // * PLABs are allocated using a similar paths as oops, but should
  58   //   never be in a humongous region
  59   // * Allowing humongous PLABs needlessly churns the region free lists
  60   return MIN2(_humongous_object_threshold_in_words, gclab_word_size);
  61 }
  62 
  63 // Inline functions for G1CollectedHeap
  64 
  65 // Return the region with the given index. It assumes the index is valid.
  66 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrm->at(index); }
  67 
  68 // Return the region with the given index, or NULL if unmapped. It assumes the index is valid.
  69 inline HeapRegion* G1CollectedHeap::region_at_or_null(uint index) const { return _hrm->at_or_null(index); }
  70 
  71 inline HeapRegion* G1CollectedHeap::next_region_in_humongous(HeapRegion* hr) const {
  72   return _hrm->next_region_in_humongous(hr);
  73 }
  74 
  75 inline uint G1CollectedHeap::addr_to_region(HeapWord* addr) const {
  76   assert(is_in_reserved(addr),
  77          "Cannot calculate region index for address " PTR_FORMAT " that is outside of the heap [" PTR_FORMAT ", " PTR_FORMAT ")",
  78          p2i(addr), p2i(reserved_region().start()), p2i(reserved_region().end()));
  79   return (uint)(pointer_delta(addr, reserved_region().start(), sizeof(uint8_t)) >> HeapRegion::LogOfHRGrainBytes);
  80 }
  81 
  82 inline HeapWord* G1CollectedHeap::bottom_addr_for_region(uint index) const {
  83   return _hrm->reserved().start() + index * HeapRegion::GrainWords;
  84 }
  85 
  86 template <class T>
  87 inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const {
  88   assert(addr != NULL, "invariant");
  89   assert(is_in_g1_reserved((const void*) addr),
  90          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
  91          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
  92   return _hrm->addr_to_region((HeapWord*) addr);
  93 }
  94 
  95 template <class T>
  96 inline HeapRegion* G1CollectedHeap::heap_region_containing_or_null(const T addr) const {
  97   assert(addr != NULL, "invariant");
  98   assert(is_in_g1_reserved((const void*) addr),
  99          "Address " PTR_FORMAT " is outside of the heap ranging from [" PTR_FORMAT " to " PTR_FORMAT ")",
 100          p2i((void*)addr), p2i(g1_reserved().start()), p2i(g1_reserved().end()));
 101   uint const region_idx = addr_to_region(addr);
 102   return region_at_or_null(region_idx);
 103 }
 104 
 105 inline void G1CollectedHeap::old_set_add(HeapRegion* hr) {
 106   _old_set.add(hr);
 107 }
 108 
 109 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
 110   _old_set.remove(hr);
 111 }
 112 
 113 inline void G1CollectedHeap::archive_set_add(HeapRegion* hr) {
 114   _archive_set.add(hr);
 115 }
 116 
 117 // It dirties the cards that cover the block so that the post
 118 // write barrier never queues anything when updating objects on this
 119 // block. It is assumed (and in fact we assert) that the block
 120 // belongs to a young region.
 121 inline void
 122 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 123   assert_heap_not_locked();
 124 
 125   // Assign the containing region to containing_hr so that we don't
 126   // have to keep calling heap_region_containing() in the
 127   // asserts below.
 128   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing(start);)
 129   assert(word_size > 0, "pre-condition");
 130   assert(containing_hr->is_in(start), "it should contain start");
 131   assert(containing_hr->is_young(), "it should be young");
 132   assert(!containing_hr->is_humongous(), "it should not be humongous");
 133 
 134   HeapWord* end = start + word_size;
 135   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 136 
 137   MemRegion mr(start, end);
 138   card_table()->g1_mark_as_young(mr);
 139 }
 140 
 141 inline RefToScanQueue* G1CollectedHeap::task_queue(uint i) const {
 142   return _task_queues->queue(i);
 143 }
 144 
 145 inline bool G1CollectedHeap::is_marked_next(oop obj) const {
 146   return _cm->next_mark_bitmap()->is_marked((HeapWord*)obj);
 147 }
 148 
 149 inline bool G1CollectedHeap::is_in_cset(oop obj) {
 150   return is_in_cset((HeapWord*)obj);
 151 }
 152 
 153 inline bool G1CollectedHeap::is_in_cset(HeapWord* addr) {
 154   return _region_attr.is_in_cset(addr);
 155 }
 156 
 157 bool G1CollectedHeap::is_in_cset(const HeapRegion* hr) {
 158   return _region_attr.is_in_cset(hr);
 159 }
 160 
 161 bool G1CollectedHeap::is_in_cset_or_humongous(const oop obj) {
 162   return _region_attr.is_in_cset_or_humongous((HeapWord*)obj);
 163 }
 164 
 165 G1HeapRegionAttr G1CollectedHeap::region_attr(const void* addr) const {
 166   return _region_attr.at((HeapWord*)addr);
 167 }
 168 
 169 G1HeapRegionAttr G1CollectedHeap::region_attr(uint idx) const {
 170   return _region_attr.get_by_index(idx);
 171 }
 172 
 173 void G1CollectedHeap::register_humongous_region_with_region_attr(uint index) {
 174   _region_attr.set_humongous(index, region_at(index)->rem_set()->is_tracked());
 175 }
 176 
 177 void G1CollectedHeap::register_region_with_region_attr(HeapRegion* r) {
 178   _region_attr.set_has_remset(r->hrm_index(), r->rem_set()->is_tracked());
 179 }
 180 
 181 void G1CollectedHeap::register_old_region_with_region_attr(HeapRegion* r) {
 182   _region_attr.set_in_old(r->hrm_index(), r->rem_set()->is_tracked());
 183   _rem_set->prepare_for_scan_heap_roots(r->hrm_index());
 184 }
 185 
 186 void G1CollectedHeap::register_optional_region_with_region_attr(HeapRegion* r) {
 187   _region_attr.set_optional(r->hrm_index(), r->rem_set()->is_tracked());
 188 }
 189 
 190 #ifndef PRODUCT
 191 // Support for G1EvacuationFailureALot
 192 
 193 inline bool
 194 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool for_young_gc,
 195                                                      bool during_initial_mark,
 196                                                      bool mark_or_rebuild_in_progress) {
 197   bool res = false;
 198   if (mark_or_rebuild_in_progress) {
 199     res |= G1EvacuationFailureALotDuringConcMark;
 200   }
 201   if (during_initial_mark) {
 202     res |= G1EvacuationFailureALotDuringInitialMark;
 203   }
 204   if (for_young_gc) {
 205     res |= G1EvacuationFailureALotDuringYoungGC;
 206   } else {
 207     // GCs are mixed
 208     res |= G1EvacuationFailureALotDuringMixedGC;
 209   }
 210   return res;
 211 }
 212 
 213 inline void
 214 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 215   if (G1EvacuationFailureALot) {
 216     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 217     // is clear here. It may have been set during a previous GC but that GC
 218     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 219     // trigger an evacuation failure and clear the flags and and counts.
 220 
 221     // Check if we have gone over the interval.
 222     const size_t gc_num = total_collections();
 223     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 224 
 225     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 226 
 227     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 228     const bool in_young_only_phase = collector_state()->in_young_only_phase();
 229     const bool in_initial_mark_gc = collector_state()->in_initial_mark_gc();
 230     const bool mark_or_rebuild_in_progress = collector_state()->mark_or_rebuild_in_progress();
 231 
 232     _evacuation_failure_alot_for_current_gc &=
 233       evacuation_failure_alot_for_gc_type(in_young_only_phase,
 234                                           in_initial_mark_gc,
 235                                           mark_or_rebuild_in_progress);
 236   }
 237 }
 238 
 239 inline bool G1CollectedHeap::evacuation_should_fail() {
 240   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 241     return false;
 242   }
 243   // G1EvacuationFailureALot is in effect for current GC
 244   // Access to _evacuation_failure_alot_count is not atomic;
 245   // the value does not have to be exact.
 246   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 247     return false;
 248   }
 249   _evacuation_failure_alot_count = 0;
 250   return true;
 251 }
 252 
 253 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 254   if (G1EvacuationFailureALot) {
 255     _evacuation_failure_alot_gc_number = total_collections();
 256     _evacuation_failure_alot_count = 0;
 257     _evacuation_failure_alot_for_current_gc = false;
 258   }
 259 }
 260 #endif  // #ifndef PRODUCT
 261 
 262 inline bool G1CollectedHeap::is_in_young(const oop obj) {
 263   if (obj == NULL) {
 264     return false;
 265   }
 266   return heap_region_containing(obj)->is_young();
 267 }
 268 
 269 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
 270   if (obj == NULL) {
 271     return false;
 272   }
 273   return is_obj_dead(obj, heap_region_containing(obj));
 274 }
 275 
 276 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
 277   if (obj == NULL) {
 278     return false;
 279   }
 280   return is_obj_ill(obj, heap_region_containing(obj));
 281 }
 282 
 283 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj, const HeapRegion* hr) const {
 284    return !is_marked_next(obj) && !hr->is_archive();
 285 }
 286 
 287 inline bool G1CollectedHeap::is_obj_dead_full(const oop obj) const {
 288     return is_obj_dead_full(obj, heap_region_containing(obj));
 289 }
 290 
 291 inline void G1CollectedHeap::set_humongous_reclaim_candidate(uint region, bool value) {
 292   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 293   _humongous_reclaim_candidates.set_candidate(region, value);
 294 }
 295 
 296 inline bool G1CollectedHeap::is_humongous_reclaim_candidate(uint region) {
 297   assert(_hrm->at(region)->is_starts_humongous(), "Must start a humongous object");
 298   return _humongous_reclaim_candidates.is_candidate(region);
 299 }
 300 
 301 inline void G1CollectedHeap::set_humongous_is_live(oop obj) {
 302   uint region = addr_to_region((HeapWord*)obj);
 303   // Clear the flag in the humongous_reclaim_candidates table.  Also
 304   // reset the entry in the region attribute table so that subsequent references
 305   // to the same humongous object do not go into the slow path again.
 306   // This is racy, as multiple threads may at the same time enter here, but this
 307   // is benign.
 308   // During collection we only ever clear the "candidate" flag, and only ever clear the
 309   // entry in the in_cset_fast_table.
 310   // We only ever evaluate the contents of these tables (in the VM thread) after
 311   // having synchronized the worker threads with the VM thread, or in the same
 312   // thread (i.e. within the VM thread).
 313   if (is_humongous_reclaim_candidate(region)) {
 314     set_humongous_reclaim_candidate(region, false);
 315     _region_attr.clear_humongous(region);
 316   }
 317 }
 318 
 319 #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP