1 /*
   2  * Copyright (c) 2001, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  32 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  33 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  34 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  35 #include "utilities/taskqueue.hpp"
  36 
  37 // Inline functions for G1CollectedHeap
  38 
  39 template <class T>
  40 inline HeapRegion*
  41 G1CollectedHeap::heap_region_containing(const T addr) const {
  42   HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
  43   // hr can be null if addr in perm_gen
  44   if (hr != NULL && hr->continuesHumongous()) {
  45     hr = hr->humongous_start_region();
  46   }
  47   return hr;
  48 }
  49 
  50 template <class T>
  51 inline HeapRegion*
  52 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
  53   assert(_g1_reserved.contains((const void*) addr), "invariant");
  54   HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
  55   return res;
  56 }
  57 
  58 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  59   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
  60   return r != NULL && r->in_collection_set();
  61 }
  62 
  63 inline HeapWord*
  64 G1CollectedHeap::attempt_allocation(size_t word_size,
  65                                     unsigned int* gc_count_before_ret,
  66                                     int* gclocker_retry_count_ret) {
  67   assert_heap_not_locked_and_not_at_safepoint();
  68   assert(!isHumongous(word_size), "attempt_allocation() should not "
  69          "be called for humongous allocation requests");
  70 
  71   HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
  72                                                       false /* bot_updates */);
  73   if (result == NULL) {
  74     result = attempt_allocation_slow(word_size,
  75                                      gc_count_before_ret,
  76                                      gclocker_retry_count_ret);
  77   }
  78   assert_heap_not_locked();
  79   if (result != NULL) {
  80     dirty_young_block(result, word_size);
  81   }
  82   return result;
  83 }
  84 
  85 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
  86                                                               word_size) {
  87   assert(!isHumongous(word_size),
  88          "we should not be seeing humongous-size allocations in this path");
  89 
  90   HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
  91                                                       false /* bot_updates */);
  92   if (result == NULL) {
  93     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
  94     result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
  95                                                       false /* bot_updates */);
  96   }
  97   if (result != NULL) {
  98     dirty_young_block(result, word_size);
  99   }
 100   return result;
 101 }
 102 
 103 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
 104   assert(!isHumongous(word_size),
 105          "we should not be seeing humongous-size allocations in this path");
 106 
 107   HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
 108                                                        true /* bot_updates */);
 109   if (result == NULL) {
 110     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 111     result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
 112                                                        true /* bot_updates */);
 113   }
 114   return result;
 115 }
 116 
 117 // It dirties the cards that cover the block so that so that the post
 118 // write barrier never queues anything when updating objects on this
 119 // block. It is assumed (and in fact we assert) that the block
 120 // belongs to a young region.
 121 inline void
 122 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 123   assert_heap_not_locked();
 124 
 125   // Assign the containing region to containing_hr so that we don't
 126   // have to keep calling heap_region_containing_raw() in the
 127   // asserts below.
 128   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
 129   assert(containing_hr != NULL && start != NULL && word_size > 0,
 130          "pre-condition");
 131   assert(containing_hr->is_in(start), "it should contain start");
 132   assert(containing_hr->is_young(), "it should be young");
 133   assert(!containing_hr->isHumongous(), "it should not be humongous");
 134 
 135   HeapWord* end = start + word_size;
 136   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 137 
 138   MemRegion mr(start, end);
 139   g1_barrier_set()->g1_mark_as_young(mr);
 140 }
 141 
 142 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
 143   return _task_queues->queue(i);
 144 }
 145 
 146 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
 147   return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
 148 }
 149 
 150 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
 151   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 152 }
 153 
 154 #ifndef PRODUCT
 155 // Support for G1EvacuationFailureALot
 156 
 157 inline bool
 158 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
 159                                                      bool during_initial_mark,
 160                                                      bool during_marking) {
 161   bool res = false;
 162   if (during_marking) {
 163     res |= G1EvacuationFailureALotDuringConcMark;
 164   }
 165   if (during_initial_mark) {
 166     res |= G1EvacuationFailureALotDuringInitialMark;
 167   }
 168   if (gcs_are_young) {
 169     res |= G1EvacuationFailureALotDuringYoungGC;
 170   } else {
 171     // GCs are mixed
 172     res |= G1EvacuationFailureALotDuringMixedGC;
 173   }
 174   return res;
 175 }
 176 
 177 inline void
 178 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 179   if (G1EvacuationFailureALot) {
 180     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 181     // is clear here. It may have been set during a previous GC but that GC
 182     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 183     // trigger an evacuation failure and clear the flags and and counts.
 184 
 185     // Check if we have gone over the interval.
 186     const size_t gc_num = total_collections();
 187     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 188 
 189     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 190 
 191     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 192     const bool gcs_are_young = g1_policy()->gcs_are_young();
 193     const bool during_im = g1_policy()->during_initial_mark_pause();
 194     const bool during_marking = mark_in_progress();
 195 
 196     _evacuation_failure_alot_for_current_gc &=
 197       evacuation_failure_alot_for_gc_type(gcs_are_young,
 198                                           during_im,
 199                                           during_marking);
 200   }
 201 }
 202 
 203 inline bool
 204 G1CollectedHeap::evacuation_should_fail() {
 205   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 206     return false;
 207   }
 208   // G1EvacuationFailureALot is in effect for current GC
 209   // Access to _evacuation_failure_alot_count is not atomic;
 210   // the value does not have to be exact.
 211   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 212     return false;
 213   }
 214   _evacuation_failure_alot_count = 0;
 215   return true;
 216 }
 217 
 218 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 219   if (G1EvacuationFailureALot) {
 220     _evacuation_failure_alot_gc_number = total_collections();
 221     _evacuation_failure_alot_count = 0;
 222     _evacuation_failure_alot_for_current_gc = false;
 223   }
 224 }
 225 #endif  // #ifndef PRODUCT
 226 
 227 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP