< prev index next >

src/hotspot/share/gc/g1/g1ParScanThreadState.inline.hpp

Print this page
rev 49680 : imported patch 6672778-partial-queue-trimming
rev 49681 : [mq]: 6672778-refactoring


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  27 
  28 #include "gc/g1/g1ParScanThreadState.hpp"
  29 #include "gc/g1/g1RemSet.hpp"
  30 #include "oops/access.inline.hpp"
  31 #include "oops/oop.inline.hpp"

  32 
  33 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
  34   // Reference should not be NULL here as such are never pushed to the task queue.
  35   oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
  36 
  37   // Although we never intentionally push references outside of the collection
  38   // set, due to (benign) races in the claim mechanism during RSet scanning more
  39   // than one thread might claim the same card. So the same card may be
  40   // processed multiple times, and so we might get references into old gen here.
  41   // So we need to redo this check.
  42   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
  43   if (in_cset_state.is_in_cset()) {
  44     markOop m = obj->mark_raw();
  45     if (m->is_marked()) {
  46       obj = (oop) m->decode_pointer();
  47     } else {
  48       obj = copy_to_survivor_space(in_cset_state, obj, m);
  49     }
  50     RawAccess<OOP_NOT_NULL>::oop_store(p, obj);
  51   } else if (in_cset_state.is_humongous()) {


 132 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
 133   assert(verify_task(ref), "sanity");
 134   if (ref.is_narrow()) {
 135     deal_with_reference((narrowOop*)ref);
 136   } else {
 137     deal_with_reference((oop*)ref);
 138   }
 139 }
 140 
 141 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
 142   StarTask stolen_task;
 143   while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
 144     assert(verify_task(stolen_task), "sanity");
 145     dispatch_reference(stolen_task);
 146 
 147     // We've just processed a reference and we might have made
 148     // available new entries on the queues. So we have to make sure
 149     // we drain the queues as necessary.
 150     trim_queue();
 151   }






































 152 }
 153 
 154 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  26 #define SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
  27 
  28 #include "gc/g1/g1ParScanThreadState.hpp"
  29 #include "gc/g1/g1RemSet.hpp"
  30 #include "oops/access.inline.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/ticks.inline.hpp"
  33 
  34 template <class T> void G1ParScanThreadState::do_oop_evac(T* p) {
  35   // Reference should not be NULL here as such are never pushed to the task queue.
  36   oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
  37 
  38   // Although we never intentionally push references outside of the collection
  39   // set, due to (benign) races in the claim mechanism during RSet scanning more
  40   // than one thread might claim the same card. So the same card may be
  41   // processed multiple times, and so we might get references into old gen here.
  42   // So we need to redo this check.
  43   const InCSetState in_cset_state = _g1h->in_cset_state(obj);
  44   if (in_cset_state.is_in_cset()) {
  45     markOop m = obj->mark_raw();
  46     if (m->is_marked()) {
  47       obj = (oop) m->decode_pointer();
  48     } else {
  49       obj = copy_to_survivor_space(in_cset_state, obj, m);
  50     }
  51     RawAccess<OOP_NOT_NULL>::oop_store(p, obj);
  52   } else if (in_cset_state.is_humongous()) {


 133 inline void G1ParScanThreadState::dispatch_reference(StarTask ref) {
 134   assert(verify_task(ref), "sanity");
 135   if (ref.is_narrow()) {
 136     deal_with_reference((narrowOop*)ref);
 137   } else {
 138     deal_with_reference((oop*)ref);
 139   }
 140 }
 141 
 142 void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues) {
 143   StarTask stolen_task;
 144   while (task_queues->steal(_worker_id, &_hash_seed, stolen_task)) {
 145     assert(verify_task(stolen_task), "sanity");
 146     dispatch_reference(stolen_task);
 147 
 148     // We've just processed a reference and we might have made
 149     // available new entries on the queues. So we have to make sure
 150     // we drain the queues as necessary.
 151     trim_queue();
 152   }
 153 }
 154 
 155 inline bool G1ParScanThreadState::should_start_trim_queue_partially() const {
 156   return !_refs->overflow_empty() || _refs->size() > _stack_drain_upper_threshold;
 157 }
 158 
 159 inline bool G1ParScanThreadState::should_end_trim_queue_partially() const {
 160   return _refs->overflow_empty() && _refs->size() <= _stack_drain_lower_threshold;
 161 }
 162 
 163 inline void G1ParScanThreadState::trim_queue_partially_internal() {
 164   StarTask ref;
 165   do {
 166     // Drain the overflow stack first, so other threads can potentially steal.
 167     while (_refs->pop_overflow(ref)) {
 168       if (!_refs->try_push_to_taskqueue(ref)) {
 169         dispatch_reference(ref);
 170       }
 171     }
 172 
 173     while (_refs->pop_local(ref, _stack_drain_lower_threshold)) {
 174       dispatch_reference(ref);
 175     }
 176   } while (!should_end_trim_queue_partially());
 177 }
 178 
 179 inline void G1ParScanThreadState::trim_queue_partially() {
 180   if (should_start_trim_queue_partially()) {
 181     const Ticks start = Ticks::now();
 182     trim_queue_partially_internal();
 183     _trim_ticks += Ticks::now() - start;
 184   }
 185 }
 186 
 187 inline Tickspan G1ParScanThreadState::trim_ticks_and_reset() {
 188   Tickspan result = _trim_ticks;
 189   _trim_ticks = Tickspan();
 190   return result;
 191 }
 192 
 193 #endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
< prev index next >