1 /*
   2  * Copyright (c) 2014, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  27 
  28 #include "gc/g1/g1ParScanThreadState.hpp"
  29 #include "gc/g1/g1RemSet.hpp"
  30 #include "oops/access.inline.hpp"
  31 #include "oops/oop.inline.hpp"
  32 
  33 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
  34   // Reference should not be NULL here as such are never pushed to the task queue.
  35   oop obj = RawAccess<IS_NOT_NULL>::oop_load(p);
  36 
  37   // Although we never intentionally push references outside of the collection
  38   // set, due to (benign) races in the claim mechanism during RSet scanning more
  39   // than one thread might claim the same card. So the same card may be
  40   // processed multiple times, and so we might get references into old gen here.
  41   // So we need to redo this check.
  42   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
  43   if (!in_cset_state.is_in_cset()) {
  44     // In this case somebody else already did all the work.
  45     return;
  46   }
  47 
  48   markOop m = obj->mark_raw();
  49   if (m->is_marked()) {
  50     obj = (oop) m->decode_pointer();
  51   } else {
  52     obj = copy_to_survivor_space(in_cset_state, obj, m);
  53   }
  54   RawAccess<IS_NOT_NULL>::oop_store(p, obj);
  55 
  56   assert(obj != NULL, "Must be");
  57   if (!HeapRegion::is_in_same_region(p, obj)) {
  58     HeapRegion* from = _g1h->heap_region_containing(p);
  59     update_rs(from, p, obj);
  60   }
  61 }
  62 
  63 template <class T> inline void G1ParScanThreadState::push_on_queue(T* ref) {
  64   assert(verify_ref(ref), "sanity");
  65   _refs->push(ref);
  66 }
  67 
  68 inline void G1ParScanThreadState::do_oop_partial_array(oop* p) {
  69   assert(has_partial_array_mask(p), "invariant");
  70   oop from_obj = clear_partial_array_mask(p);
  71 
  72   assert(_g1h->is_in_reserved(from_obj), "must be in heap.");
  73   assert(from_obj->is_objArray(), "must be obj array");
  74   objArrayOop from_obj_array = objArrayOop(from_obj);
  75   // The from-space object contains the real length.
  76   int length                 = from_obj_array->length();
  77 
  78   assert(from_obj->is_forwarded(), "must be forwarded");
  79   oop to_obj                 = from_obj->forwardee();
  80   assert(from_obj != to_obj, "should not be chunking self-forwarded objects");
  81   objArrayOop to_obj_array   = objArrayOop(to_obj);
  82   // We keep track of the next start index in the length field of the
  83   // to-space object.
  84   int next_index             = to_obj_array->length();
  85   assert(0 <= next_index && next_index < length,
  86          "invariant, next index: %d, length: %d", next_index, length);
  87 
  88   int start                  = next_index;
  89   int end                    = length;
  90   int remainder              = end - start;
  91   // We'll try not to push a range that's smaller than ParGCArrayScanChunk.
  92   if (remainder > 2 * ParGCArrayScanChunk) {
  93     end = start + ParGCArrayScanChunk;
  94     to_obj_array->set_length(end);
  95     // Push the remainder before we process the range in case another
  96     // worker has run out of things to do and can steal it.
  97     oop* from_obj_p = set_partial_array_mask(from_obj);
  98     push_on_queue(from_obj_p);
  99   } else {
 100     assert(length == end, "sanity");
 101     // We'll process the final range for this object. Restore the length
 102     // so that the heap remains parsable in case of evacuation failure.
 103     to_obj_array->set_length(end);
 104   }
 105   _scanner.set_region(_g1h->heap_region_containing(to_obj));
 106   // Process indexes [start,end). It will also process the header
 107   // along with the first chunk (i.e., the chunk with start == 0).
 108   // Note that at this point the length field of to_obj_array is not
 109   // correct given that we are using it to keep track of the next
 110   // start index. oop_iterate_range() (thankfully!) ignores the length
 111   // field and only relies on the start / end parameters.  It does
 112   // however return the size of the object which will be incorrect. So
 113   // we have to ignore it even if we wanted to use it.
 114   to_obj_array->oop_iterate_range(&_scanner, start, end);
 115 }
 116 
 117 inline void G1ParScanThreadState::deal_with_reference(oop* ref_to_scan) {
 118   if (!has_partial_array_mask(ref_to_scan)) {
 119     do_oop_evac(ref_to_scan);
 120   } else {
 121     do_oop_partial_array(ref_to_scan);
 122   }
 123 }
 124 
 125 inline void G1ParScanThreadState::deal_with_reference(narrowOop* ref_to_scan) {
 126   assert(!has_partial_array_mask(ref_to_scan), "NarrowOop* elements should never be partial arrays.");
 127   do_oop_evac(ref_to_scan);
 128 }
 129 
 130 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
 131   assert(verify_task(ref), "sanity");
 132   if (ref.is_narrow()) {
 133     deal_with_reference((narrowOop*)ref);
 134   } else {
 135     deal_with_reference((oop*)ref);
 136   }
 137 }
 138 
 139 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
 140   StarTask stolen_task;
 141   while (task_queues->steal(_worker_id, stolen_task)) {
 142     assert(verify_task(stolen_task), "sanity");
 143     dispatch_reference(stolen_task);
 144 
 145     // We've just processed a reference and we might have made
 146     // available new entries on the queues. So we have to make sure
 147     // we drain the queues as necessary.
 148     trim_queue();
 149   }
 150 }
 151 
 152 inline bool G1ParScanThreadState::needs_partial_trimming() const {
 153   return !_refs->overflow_empty() || _refs->size() > _stack_trim_upper_threshold;
 154 }
 155 
 156 inline bool G1ParScanThreadState::is_partially_trimmed() const {
 157   return _refs->overflow_empty() && _refs->size() <= _stack_trim_lower_threshold;
 158 }
 159 
 160 inline void G1ParScanThreadState::trim_queue_to_threshold(uint threshold) {
 161   StarTask ref;
 162   // Drain the overflow stack first, so other threads can potentially steal.
 163   while (_refs->pop_overflow(ref)) {
 164     if (!_refs->try_push_to_taskqueue(ref)) {
 165       dispatch_reference(ref);
 166     }
 167   }
 168 
 169   while (_refs->pop_local(ref, threshold)) {
 170     dispatch_reference(ref);
 171   }
 172 }
 173 
 174 inline void G1ParScanThreadState::trim_queue_partially() {
 175   if (!needs_partial_trimming()) {
 176     return;
 177   }
 178 
 179   const Ticks start = Ticks::now();
 180   do {
 181     trim_queue_to_threshold(_stack_trim_lower_threshold);
 182   } while (!is_partially_trimmed());
 183   _trim_ticks += Ticks::now() - start;
 184 }
 185 
 186 inline Tickspan G1ParScanThreadState::trim_ticks() const {
 187   return _trim_ticks;
 188 }
 189 
 190 inline void G1ParScanThreadState::reset_trim_ticks() {
 191   _trim_ticks = Tickspan();
 192 }
 193 
 194 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP