1 /*
   2  * Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  26 #define SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP
  27 
  28 #include "gc_implementation/g1/concurrentMark.hpp"
  29 #include "gc_implementation/g1/g1CollectedHeap.hpp"
  30 #include "gc_implementation/g1/g1AllocRegion.inline.hpp"
  31 #include "gc_implementation/g1/g1CollectorPolicy.hpp"
  32 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  33 #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp"
  34 #include "gc_implementation/g1/heapRegionSet.inline.hpp"
  35 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  36 #include "runtime/orderAccess.inline.hpp"
  37 #include "utilities/taskqueue.hpp"
  38 
  39 // Inline functions for G1CollectedHeap
  40 
  41 // Return the region with the given index. It assumes the index is valid.
  42 inline HeapRegion* G1CollectedHeap::region_at(uint index) const { return _hrs.at(index); }
  43 
  44 template <class T>
  45 inline HeapRegion*
  46 G1CollectedHeap::heap_region_containing(const T addr) const {
  47   HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr);
  48   // hr can be null if addr in perm_gen
  49   if (hr != NULL && hr->continuesHumongous()) {
  50     hr = hr->humongous_start_region();
  51   }
  52   return hr;
  53 }
  54 
  55 template <class T>
  56 inline HeapRegion*
  57 G1CollectedHeap::heap_region_containing_raw(const T addr) const {
  58   assert(_g1_reserved.contains((const void*) addr), "invariant");
  59   HeapRegion* res = _hrs.addr_to_region_unsafe((HeapWord*) addr);
  60   return res;
  61 }
  62 
  63 inline void G1CollectedHeap::reset_gc_time_stamp() {
  64   _gc_time_stamp = 0;
  65   OrderAccess::fence();
  66   // Clear the cached CSet starting regions and time stamps.
  67   // Their validity is dependent on the GC timestamp.
  68   clear_cset_start_regions();
  69 }
  70 
  71 inline void G1CollectedHeap::increment_gc_time_stamp() {
  72   ++_gc_time_stamp;
  73   OrderAccess::fence();
  74 }
  75 
  76 inline void G1CollectedHeap::old_set_remove(HeapRegion* hr) {
  77   _old_set.remove(hr);
  78 }
  79 
  80 inline bool G1CollectedHeap::obj_in_cs(oop obj) {
  81   HeapRegion* r = _hrs.addr_to_region((HeapWord*) obj);
  82   return r != NULL && r->in_collection_set();
  83 }
  84 
  85 inline HeapWord*
  86 G1CollectedHeap::attempt_allocation(size_t word_size,
  87                                     unsigned int* gc_count_before_ret,
  88                                     int* gclocker_retry_count_ret) {
  89   assert_heap_not_locked_and_not_at_safepoint();
  90   assert(!isHumongous(word_size), "attempt_allocation() should not "
  91          "be called for humongous allocation requests");
  92 
  93   HeapWord* result = _mutator_alloc_region.attempt_allocation(word_size,
  94                                                       false /* bot_updates */);
  95   if (result == NULL) {
  96     result = attempt_allocation_slow(word_size,
  97                                      gc_count_before_ret,
  98                                      gclocker_retry_count_ret);
  99   }
 100   assert_heap_not_locked();
 101   if (result != NULL) {
 102     dirty_young_block(result, word_size);
 103   }
 104   return result;
 105 }
 106 
 107 inline HeapWord* G1CollectedHeap::survivor_attempt_allocation(size_t
 108                                                               word_size) {
 109   assert(!isHumongous(word_size),
 110          "we should not be seeing humongous-size allocations in this path");
 111 
 112   HeapWord* result = _survivor_gc_alloc_region.attempt_allocation(word_size,
 113                                                       false /* bot_updates */);
 114   if (result == NULL) {
 115     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 116     result = _survivor_gc_alloc_region.attempt_allocation_locked(word_size,
 117                                                       false /* bot_updates */);
 118   }
 119   if (result != NULL) {
 120     dirty_young_block(result, word_size);
 121   }
 122   return result;
 123 }
 124 
 125 inline HeapWord* G1CollectedHeap::old_attempt_allocation(size_t word_size) {
 126   assert(!isHumongous(word_size),
 127          "we should not be seeing humongous-size allocations in this path");
 128 
 129   HeapWord* result = _old_gc_alloc_region.attempt_allocation(word_size,
 130                                                        true /* bot_updates */);
 131   if (result == NULL) {
 132     MutexLockerEx x(FreeList_lock, Mutex::_no_safepoint_check_flag);
 133     result = _old_gc_alloc_region.attempt_allocation_locked(word_size,
 134                                                        true /* bot_updates */);
 135   }
 136   return result;
 137 }
 138 
 139 // It dirties the cards that cover the block so that so that the post
 140 // write barrier never queues anything when updating objects on this
 141 // block. It is assumed (and in fact we assert) that the block
 142 // belongs to a young region.
 143 inline void
 144 G1CollectedHeap::dirty_young_block(HeapWord* start, size_t word_size) {
 145   assert_heap_not_locked();
 146 
 147   // Assign the containing region to containing_hr so that we don't
 148   // have to keep calling heap_region_containing_raw() in the
 149   // asserts below.
 150   DEBUG_ONLY(HeapRegion* containing_hr = heap_region_containing_raw(start);)
 151   assert(containing_hr != NULL && start != NULL && word_size > 0,
 152          "pre-condition");
 153   assert(containing_hr->is_in(start), "it should contain start");
 154   assert(containing_hr->is_young(), "it should be young");
 155   assert(!containing_hr->isHumongous(), "it should not be humongous");
 156 
 157   HeapWord* end = start + word_size;
 158   assert(containing_hr->is_in(end - 1), "it should also contain end - 1");
 159 
 160   MemRegion mr(start, end);
 161   g1_barrier_set()->g1_mark_as_young(mr);
 162 }
 163 
 164 inline RefToScanQueue* G1CollectedHeap::task_queue(int i) const {
 165   return _task_queues->queue(i);
 166 }
 167 
 168 inline bool G1CollectedHeap::isMarkedPrev(oop obj) const {
 169   return _cm->prevMarkBitMap()->isMarked((HeapWord *)obj);
 170 }
 171 
 172 inline bool G1CollectedHeap::isMarkedNext(oop obj) const {
 173   return _cm->nextMarkBitMap()->isMarked((HeapWord *)obj);
 174 }
 175 
 176 
 177 // This is a fast test on whether a reference points into the
 178 // collection set or not. Assume that the reference
 179 // points into the heap.
 180 inline bool G1CollectedHeap::in_cset_fast_test(oop obj) {
 181   assert(_in_cset_fast_test != NULL, "sanity");
 182   assert(_g1_committed.contains((HeapWord*) obj), err_msg("Given reference outside of heap, is "PTR_FORMAT, p2i((HeapWord*)obj)));
 183   // no need to subtract the bottom of the heap from obj,
 184   // _in_cset_fast_test is biased
 185   uintx index = cast_from_oop<uintx>(obj) >> HeapRegion::LogOfHRGrainBytes;
 186   bool ret = _in_cset_fast_test[index];
 187   // let's make sure the result is consistent with what the slower
 188   // test returns
 189   assert( ret || !obj_in_cs(obj), "sanity");
 190   assert(!ret ||  obj_in_cs(obj), "sanity");
 191   return ret;
 192 }
 193 
 194 #ifndef PRODUCT
 195 // Support for G1EvacuationFailureALot
 196 
 197 inline bool
 198 G1CollectedHeap::evacuation_failure_alot_for_gc_type(bool gcs_are_young,
 199                                                      bool during_initial_mark,
 200                                                      bool during_marking) {
 201   bool res = false;
 202   if (during_marking) {
 203     res |= G1EvacuationFailureALotDuringConcMark;
 204   }
 205   if (during_initial_mark) {
 206     res |= G1EvacuationFailureALotDuringInitialMark;
 207   }
 208   if (gcs_are_young) {
 209     res |= G1EvacuationFailureALotDuringYoungGC;
 210   } else {
 211     // GCs are mixed
 212     res |= G1EvacuationFailureALotDuringMixedGC;
 213   }
 214   return res;
 215 }
 216 
 217 inline void
 218 G1CollectedHeap::set_evacuation_failure_alot_for_current_gc() {
 219   if (G1EvacuationFailureALot) {
 220     // Note we can't assert that _evacuation_failure_alot_for_current_gc
 221     // is clear here. It may have been set during a previous GC but that GC
 222     // did not copy enough objects (i.e. G1EvacuationFailureALotCount) to
 223     // trigger an evacuation failure and clear the flags and and counts.
 224 
 225     // Check if we have gone over the interval.
 226     const size_t gc_num = total_collections();
 227     const size_t elapsed_gcs = gc_num - _evacuation_failure_alot_gc_number;
 228 
 229     _evacuation_failure_alot_for_current_gc = (elapsed_gcs >= G1EvacuationFailureALotInterval);
 230 
 231     // Now check if G1EvacuationFailureALot is enabled for the current GC type.
 232     const bool gcs_are_young = g1_policy()->gcs_are_young();
 233     const bool during_im = g1_policy()->during_initial_mark_pause();
 234     const bool during_marking = mark_in_progress();
 235 
 236     _evacuation_failure_alot_for_current_gc &=
 237       evacuation_failure_alot_for_gc_type(gcs_are_young,
 238                                           during_im,
 239                                           during_marking);
 240   }
 241 }
 242 
 243 inline bool
 244 G1CollectedHeap::evacuation_should_fail() {
 245   if (!G1EvacuationFailureALot || !_evacuation_failure_alot_for_current_gc) {
 246     return false;
 247   }
 248   // G1EvacuationFailureALot is in effect for current GC
 249   // Access to _evacuation_failure_alot_count is not atomic;
 250   // the value does not have to be exact.
 251   if (++_evacuation_failure_alot_count < G1EvacuationFailureALotCount) {
 252     return false;
 253   }
 254   _evacuation_failure_alot_count = 0;
 255   return true;
 256 }
 257 
 258 inline void G1CollectedHeap::reset_evacuation_should_fail() {
 259   if (G1EvacuationFailureALot) {
 260     _evacuation_failure_alot_gc_number = total_collections();
 261     _evacuation_failure_alot_count = 0;
 262     _evacuation_failure_alot_for_current_gc = false;
 263   }
 264 }
 265 #endif  // #ifndef PRODUCT
 266 
 267 inline bool G1CollectedHeap::is_in_young(const oop obj) {
 268   HeapRegion* hr = heap_region_containing(obj);
 269   return hr != NULL && hr->is_young();
 270 }
 271 
 272 // We don't need barriers for initializing stores to objects
 273 // in the young gen: for the SATB pre-barrier, there is no
 274 // pre-value that needs to be remembered; for the remembered-set
 275 // update logging post-barrier, we don't maintain remembered set
 276 // information for young gen objects.
 277 inline bool G1CollectedHeap::can_elide_initializing_store_barrier(oop new_obj) {
 278   return is_in_young(new_obj);
 279 }
 280 
 281 inline bool G1CollectedHeap::is_obj_dead(const oop obj) const {
 282   const HeapRegion* hr = heap_region_containing(obj);
 283   if (hr == NULL) {
 284     if (obj == NULL) return false;
 285     else return true;
 286   }
 287   else return is_obj_dead(obj, hr);
 288 }
 289 
 290 inline bool G1CollectedHeap::is_obj_ill(const oop obj) const {
 291   const HeapRegion* hr = heap_region_containing(obj);
 292   if (hr == NULL) {
 293     if (obj == NULL) return false;
 294     else return true;
 295   }
 296   else return is_obj_ill(obj, hr);
 297 }
 298 
 299 template <class T> inline void G1ParScanThreadState::immediate_rs_update(HeapRegion* from, T* p, int tid) {
 300   if (!from->is_survivor()) {
 301     _g1_rem->par_write_ref(from, p, tid);
 302   }
 303 }
 304 
 305 template <class T> void G1ParScanThreadState::update_rs(HeapRegion* from, T* p, int tid) {
 306   if (G1DeferredRSUpdate) {
 307     deferred_rs_update(from, p, tid);
 308   } else {
 309     immediate_rs_update(from, p, tid);
 310   }
 311 }
 312 
 313 
 314 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
 315   assert(has_partial_array_mask(p), "invariant");
 316   oop from_obj = clear_partial_array_mask(p);
 317 
 318   assert(Universe::heap()->is_in_reserved(from_obj), "must be in heap.");
 319   assert(from_obj->is_objArray(), "must be obj array");
 320   objArrayOop from_obj_array = objArrayOop(from_obj);
 321   // The from-space object contains the real length.
 322   int length                 = from_obj_array->length();
 323 
 324   assert(from_obj->is_forwarded(), "must be forwarded");
 325   oop to_obj                 = from_obj->forwardee();
 326   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
 327   objArrayOop to_obj_array   = objArrayOop(to_obj);
 328   // We keep track of the next start index in the length field of the
 329   // to-space object.
 330   int next_index             = to_obj_array->length();
 331   assert(0 <= next_index && next_index < length,
 332          err_msg("invariant, next index: %d, length: %d", next_index, length));
 333 
 334   int start                  = next_index;
 335   int end                    = length;
 336   int remainder              = end - start;
 337   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
 338   if (remainder > 2 * ParGCArrayScanChunk) {
 339     end = start + ParGCArrayScanChunk;
 340     to_obj_array->set_length(end);
 341     // Push the remainder before we process the range in case another
 342     // worker has run out of things to do and can steal it.
 343     oop* from_obj_p = set_partial_array_mask(from_obj);
 344     push_on_queue(from_obj_p);
 345   } else {
 346     assert(length == end, "sanity");
 347     // We'll process the final range for this object. Restore the length
 348     // so that the heap remains parsable in case of evacuation failure.
 349     to_obj_array->set_length(end);
 350   }
 351   _scanner.set_region(_g1h->heap_region_containing_raw(to_obj));
 352   // Process indexes [start,end). It will also process the header
 353   // along with the first chunk (i.e., the chunk with start == 0).
 354   // Note that at this point the length field of to_obj_array is not
 355   // correct given that we are using it to keep track of the next
 356   // start index. oop_iterate_range() (thankfully!) ignores the length
 357   // field and only relies on the start / end parameters.  It does
 358   // however return the size of the object which will be incorrect. So
 359   // we have to ignore it even if we wanted to use it.
 360   to_obj_array->oop_iterate_range(&_scanner, start, end);
 361 }
 362 
 363 template <class T> inline void G1ParScanThreadState::deal_with_reference(T* ref_to_scan) {
 364   if (!has_partial_array_mask(ref_to_scan)) {
 365     // Note: we can use "raw" versions of "region_containing" because
 366     // "obj_to_scan" is definitely in the heap, and is not in a
 367     // humongous region.
 368     HeapRegion* r = _g1h->heap_region_containing_raw(ref_to_scan);
 369     do_oop_evac(ref_to_scan, r);
 370   } else {
 371     do_oop_partial_array((oop*)ref_to_scan);
 372   }
 373 }
 374 
 375 inline void G1ParScanThreadState::deal_with_reference(StarTask ref) {
 376   assert(verify_task(ref), "sanity");
 377   if (ref.is_narrow()) {
 378     deal_with_reference((narrowOop*)ref);
 379   } else {
 380     deal_with_reference((oop*)ref);
 381   }
 382 }
 383 
 384 #endif // SHARE_VM_GC_IMPLEMENTATION_G1_G1COLLECTEDHEAP_INLINE_HPP