--- old/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2015-04-21 13:44:49.211242854 +0200 +++ new/src/share/vm/gc_implementation/concurrentMarkSweep/concurrentMarkSweepGeneration.cpp 2015-04-21 13:44:49.019236405 +0200 @@ -66,6 +66,7 @@ #include "services/memoryService.hpp" #include "services/runtimeService.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" // statics CMSCollector* ConcurrentMarkSweepGeneration::_collector = NULL; --- old/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-04-21 13:44:49.639257232 +0200 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.cpp 2015-04-21 13:44:49.479251857 +0200 @@ -54,6 +54,7 @@ #include "runtime/atomic.inline.hpp" #include "runtime/prefetch.inline.hpp" #include "services/memTracker.hpp" +#include "utilities/taskqueue.inline.hpp" // Concurrent marking bit map wrapper --- old/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp 2015-04-21 13:44:49.915266503 +0200 +++ new/src/share/vm/gc_implementation/g1/concurrentMark.inline.hpp 2015-04-21 13:44:49.783262069 +0200 @@ -27,6 +27,7 @@ #include "gc_implementation/g1/concurrentMark.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" +#include "utilities/taskqueue.inline.hpp" // Utility routine to set an exclusive range of cards on the given // card liveness bitmap --- old/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-04-21 13:44:50.191275774 +0200 +++ new/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp 2015-04-21 13:44:50.011269729 +0200 @@ -66,6 +66,7 @@ #include "runtime/vmThread.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" size_t G1CollectedHeap::_humongous_object_threshold_in_words = 0; --- old/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2015-04-21 13:44:50.567288405 +0200 +++ new/src/share/vm/gc_implementation/parNew/parNewGeneration.cpp 2015-04-21 13:44:50.411283164 +0200 @@ -54,6 +54,7 @@ #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/stack.inline.hpp" +#include "utilities/taskqueue.inline.hpp" #include "utilities/workgroup.hpp" #ifdef _MSC_VER --- old/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp 2015-04-21 13:44:50.839297543 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp 2015-04-21 13:44:50.691292571 +0200 @@ -57,7 +57,7 @@ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); CLDToOopClosure mark_and_push_from_clds(&mark_and_push_closure, true); MarkingCodeBlobClosure mark_and_push_in_blobs(&mark_and_push_closure, !CodeBlobToOopClosure::FixRelocations); @@ -85,8 +85,8 @@ PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); switch (_root_type) { case universe: @@ -156,8 +156,8 @@ PrintGCDetails && TraceParallelOldGCTasks, true, NULL, PSParallelCompact::gc_tracer()->gc_id())); ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowStackClosure follow_stack_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowStackClosure follow_stack_closure(cm); _rp_task.work(_work_id, *PSParallelCompact::is_alive_closure(), mark_and_push_closure, follow_stack_closure); } @@ -213,7 +213,7 @@ ParCompactionManager* cm = ParCompactionManager::gc_thread_compaction_manager(which); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); oop obj = NULL; ObjArrayTask task; --- old/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp 2015-04-21 13:44:51.075305470 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp 2015-04-21 13:44:50.939300901 +0200 @@ -179,11 +179,11 @@ void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { assert(obj != NULL, "can't follow the content of NULL object"); - PSParallelCompact::follow_klass(cm, this); + cm->follow_klass(this); // Only mark the header and let the scan of the meta-data mark // everything else. - PSParallelCompact::MarkAndPushClosure cl(cm); + ParCompactionManager::MarkAndPushClosure cl(cm); InstanceKlass::oop_oop_iterate_oop_maps(obj, &cl); } @@ -203,7 +203,7 @@ if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { PSParallelCompact::follow_class_loader(cm, klass->class_loader_data()); } else { - PSParallelCompact::follow_klass(cm, klass); + cm->follow_klass(klass); } } else { // If klass is NULL then this a mirror for a primitive type. @@ -212,7 +212,7 @@ assert(java_lang_Class::is_primitive(obj), "Sanity check"); } - PSParallelCompact::MarkAndPushClosure cl(cm); + ParCompactionManager::MarkAndPushClosure cl(cm); oop_oop_iterate_statics(obj, &cl); } @@ -253,7 +253,7 @@ gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj)); } ) - PSParallelCompact::mark_and_push(cm, referent_addr); + cm->mark_and_push(referent_addr); } } T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); @@ -269,7 +269,7 @@ PTR_FORMAT, p2i(discovered_addr)); } ) - PSParallelCompact::mark_and_push(cm, discovered_addr); + cm->mark_and_push(discovered_addr); } } else { #ifdef ASSERT @@ -283,7 +283,7 @@ p2i(obj))); #endif } - PSParallelCompact::mark_and_push(cm, next_addr); + cm->mark_and_push(next_addr); klass->InstanceKlass::oop_pc_follow_contents(obj, cm); } @@ -297,7 +297,7 @@ } void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { - PSParallelCompact::follow_klass(cm, this); + cm->follow_klass(this); if (UseCompressedOops) { oop_pc_follow_contents_specialized(objArrayOop(obj), 0, cm); --- old/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp 2015-04-21 13:44:51.319313666 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.hpp 2015-04-21 13:44:51.179308963 +0200 @@ -170,10 +170,15 @@ bool should_copy(); // Save for later processing. Must not fail. - inline void push(oop obj) { _marking_stack.push(obj); } + inline void push(oop obj); inline void push_objarray(oop objarray, size_t index); inline void push_region(size_t index); + template + inline void mark_and_push(T* p); + + inline void follow_klass(Klass* klass); + // Access function for compaction managers static ParCompactionManager* gc_thread_compaction_manager(int index); @@ -200,6 +205,39 @@ void follow_contents(objArrayOop array, int index); void update_contents(oop obj); + + class MarkAndPushClosure: public ExtendedOopClosure { + private: + ParCompactionManager* _compaction_manager; + public: + MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + + template void do_oop_nv(T* p); + virtual void do_oop(oop* p); + virtual void do_oop(narrowOop* p); + + // This closure provides its own oop verification code. + debug_only(virtual bool should_verify_oops() { return false; }) + }; + + class FollowStackClosure: public VoidClosure { + private: + ParCompactionManager* _compaction_manager; + public: + FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + virtual void do_void(); + }; + + // The one and only place to start following the classes. + // Should only be applied to the ClassLoaderData klasses list. + class FollowKlassClosure : public KlassClosure { + private: + MarkAndPushClosure* _mark_and_push_closure; + public: + FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) : + _mark_and_push_closure(mark_and_push_closure) { } + void do_klass(Klass* klass); + }; }; inline ParCompactionManager* ParCompactionManager::manager_array(int index) { --- old/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp 2015-04-21 13:44:51.559321728 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp 2015-04-21 13:44:51.411316757 +0200 @@ -31,6 +31,11 @@ #include "oops/oop.inline.hpp" #include "utilities/debug.hpp" #include "utilities/globalDefinitions.hpp" +#include "utilities/taskqueue.inline.hpp" + +inline void ParCompactionManager::push(oop obj) { + _marking_stack.push(obj); +} void ParCompactionManager::push_objarray(oop obj, size_t index) { @@ -50,6 +55,40 @@ region_stack()->push(index); } +template +inline void ParCompactionManager::mark_and_push(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); + + if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) { + push(obj); + } + } +} + +template +inline void ParCompactionManager::MarkAndPushClosure::do_oop_nv(T* p) { + _compaction_manager->mark_and_push(p); +} + +inline void ParCompactionManager::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } +inline void ParCompactionManager::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } + +inline void ParCompactionManager::follow_klass(Klass* klass) { + oop holder = klass->klass_holder(); + mark_and_push(&holder); +} + +inline void ParCompactionManager::FollowStackClosure::do_void() { + _compaction_manager->follow_marking_stacks(); +} + +inline void ParCompactionManager::FollowKlassClosure::do_klass(Klass* klass) { + klass->oops_do(_mark_and_push_closure); +} + inline void ParCompactionManager::follow_contents(oop obj) { assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked"); obj->pc_follow_contents(this); @@ -69,7 +108,7 @@ // Push the non-NULL elements of the next stride on the marking stack. for (T* e = beg; e < end; e++) { - PSParallelCompact::mark_and_push(cm, e); + cm->mark_and_push(e); } if (end_index < len) { --- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2015-04-21 13:44:51.787329387 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp 2015-04-21 13:44:51.655324953 +0200 @@ -820,17 +820,9 @@ bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); } -void PSParallelCompact::KeepAliveClosure::do_oop(oop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } -void PSParallelCompact::KeepAliveClosure::do_oop(narrowOop* p) { PSParallelCompact::KeepAliveClosure::do_oop_work(p); } - PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } - -void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) { - klass->oops_do(_mark_and_push_closure); -} void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) { klass->oops_do(&PSParallelCompact::_adjust_pointer_closure); } @@ -2350,8 +2342,8 @@ TaskQueueSetSuper* qset = ParCompactionManager::region_array(); ParallelTaskTerminator terminator(active_gc_threads, qset); - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowStackClosure follow_stack_closure(cm); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowStackClosure follow_stack_closure(cm); // Need new claim bits before marking starts. ClassLoaderDataGraph::clear_claimed_marks(); @@ -2427,8 +2419,8 @@ void PSParallelCompact::follow_class_loader(ParCompactionManager* cm, ClassLoaderData* cld) { - PSParallelCompact::MarkAndPushClosure mark_and_push_closure(cm); - PSParallelCompact::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); + ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm); + ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure); cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true); } --- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp 2015-04-21 13:44:52.083339330 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp 2015-04-21 13:44:51.943334627 +0200 @@ -933,25 +933,6 @@ virtual bool do_object_b(oop p); }; - class KeepAliveClosure: public OopClosure { - private: - ParCompactionManager* _compaction_manager; - protected: - template inline void do_oop_work(T* p); - public: - KeepAliveClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - }; - - class FollowStackClosure: public VoidClosure { - private: - ParCompactionManager* _compaction_manager; - public: - FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - virtual void do_void(); - }; - class AdjustPointerClosure: public ExtendedOopClosure { public: template void do_oop_nv(T* p); @@ -967,7 +948,6 @@ void do_klass(Klass* klass); }; - friend class KeepAliveClosure; friend class FollowStackClosure; friend class AdjustPointerClosure; friend class AdjustKlassClosure; @@ -1142,30 +1122,6 @@ static void reset_millis_since_last_gc(); public: - class MarkAndPushClosure: public ExtendedOopClosure { - private: - ParCompactionManager* _compaction_manager; - public: - MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } - - template void do_oop_nv(T* p); - virtual void do_oop(oop* p); - virtual void do_oop(narrowOop* p); - - // This closure provides its own oop verification code. - debug_only(virtual bool should_verify_oops() { return false; }) - }; - - // The one and only place to start following the classes. - // Should only be applied to the ClassLoaderData klasses list. - class FollowKlassClosure : public KlassClosure { - private: - MarkAndPushClosure* _mark_and_push_closure; - public: - FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) : - _mark_and_push_closure(mark_and_push_closure) { } - void do_klass(Klass* klass); - }; PSParallelCompact(); @@ -1200,12 +1156,8 @@ // Marking support static inline bool mark_obj(oop obj); static inline bool is_marked(oop obj); - // Check mark and maybe push on marking stack - template static inline void mark_and_push(ParCompactionManager* cm, - T* p); - template static inline void adjust_pointer(T* p); - static inline void follow_klass(ParCompactionManager* cm, Klass* klass); + template static inline void adjust_pointer(T* p); static void follow_class_loader(ParCompactionManager* cm, ClassLoaderData* klass); @@ -1337,11 +1289,6 @@ return mark_bitmap()->is_marked(obj); } -template -inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { - mark_and_push(_compaction_manager, p); -} - inline bool PSParallelCompact::print_phases() { return _print_phases; } --- old/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp 2015-04-21 13:44:52.343348064 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp 2015-04-21 13:44:52.203343362 +0200 @@ -32,32 +32,6 @@ #include "oops/klass.hpp" #include "oops/oop.inline.hpp" -template -inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap"); - - if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { - cm->push(obj); - } - } -} - -template -inline void PSParallelCompact::MarkAndPushClosure::do_oop_nv(T* p) { - mark_and_push(_compaction_manager, p); -} - -inline void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } -inline void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } - -inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { - oop holder = klass->klass_holder(); - mark_and_push(cm, &holder); -} - template inline void PSParallelCompact::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2015-04-21 13:44:52.555355185 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.hpp 2015-04-21 13:44:52.435351155 +0200 @@ -143,9 +143,7 @@ int start, int end); void process_array_chunk(oop old); - template void push_depth(T* p) { - claimed_stack_depth()->push(p); - } + template void push_depth(T* p); inline void promotion_trace_event(oop new_obj, oop old_obj, size_t obj_size, uint age, bool tenured, --- old/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2015-04-21 13:44:52.803363516 +0200 +++ new/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp 2015-04-21 13:44:52.659358679 +0200 @@ -31,6 +31,7 @@ #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "oops/oop.inline.hpp" +#include "utilities/taskqueue.inline.hpp" inline PSPromotionManager* PSPromotionManager::manager_array(int index) { assert(_manager_array != NULL, "access of NULL manager_array"); @@ -38,6 +39,12 @@ return &_manager_array[index]; } + +template +inline void PSPromotionManager::push_depth(T* p) { + claimed_stack_depth()->push(p); +} + template inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) { if (p != NULL) { // XXX: error if p != NULL here @@ -99,7 +106,7 @@ // performance. // template -oop PSPromotionManager::copy_to_survivor_space(oop o) { +inline oop PSPromotionManager::copy_to_survivor_space(oop o) { assert(should_scavenge(&o), "Sanity"); oop new_obj = NULL; --- old/src/share/vm/utilities/taskqueue.hpp 2015-04-21 13:44:53.075372654 +0200 +++ new/src/share/vm/utilities/taskqueue.hpp 2015-04-21 13:44:52.899366742 +0200 @@ -337,24 +337,6 @@ // tty->print_cr("END OopTaskQueue::oops_do"); } -template -bool GenericTaskQueue::push_slow(E t, uint dirty_n_elems) { - if (dirty_n_elems == N - 1) { - // Actually means 0, so do the push. - uint localBot = _bottom; - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void)const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); - TASKQUEUE_STATS_ONLY(stats.record_push()); - return true; - } - return false; -} - // pop_local_slow() is done by the owning thread and is trying to // get the last task in the queue. It will compete with pop_global() // that will be used by other threads. The tag age is incremented @@ -469,16 +451,6 @@ }; template -bool OverflowTaskQueue::push(E t) -{ - if (!taskqueue_t::push(t)) { - overflow_stack()->push(t); - TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); - } - return true; -} - -template bool OverflowTaskQueue::pop_overflow(E& t) { if (overflow_empty()) return false; @@ -650,28 +622,6 @@ }; template inline bool -GenericTaskQueue::push(E t) { - uint localBot = _bottom; - assert(localBot < N, "_bottom out of range."); - idx_t top = _age.top(); - uint dirty_n_elems = dirty_size(localBot, top); - assert(dirty_n_elems < N, "n_elems out of range."); - if (dirty_n_elems < max_elems()) { - // g++ complains if the volatile result of the assignment is - // unused, so we cast the volatile away. We cannot cast directly - // to void, because gcc treats that as not using the result of the - // assignment. However, casting to E& means that we trigger an - // unused-value warning. So, we cast the E& to void. - (void) const_cast(_elems[localBot] = t); - OrderAccess::release_store(&_bottom, increment_index(localBot)); - TASKQUEUE_STATS_ONLY(stats.record_push()); - return true; - } else { - return push_slow(t, dirty_n_elems); - } -} - -template inline bool GenericTaskQueue::pop_local(volatile E& t) { uint localBot = _bottom; // This value cannot be N-1. That can only occur as a result of --- /dev/null 2015-03-26 13:30:33.624005573 +0100 +++ new/src/share/vm/utilities/taskqueue.inline.hpp 2015-04-21 13:44:53.199376819 +0200 @@ -0,0 +1,83 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP +#define SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP + +#include "utilities/debug.hpp" +#include "utilities/taskqueue.hpp" +#include "utilities/stack.inline.hpp" +#include "runtime/orderAccess.inline.hpp" + +template +bool GenericTaskQueue::push_slow(E t, uint dirty_n_elems) { + if (dirty_n_elems == N - 1) { + // Actually means 0, so do the push. + uint localBot = _bottom; + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void)const_cast(_elems[localBot] = t); + OrderAccess::release_store(&_bottom, increment_index(localBot)); + TASKQUEUE_STATS_ONLY(stats.record_push()); + return true; + } + return false; +} + +template inline bool +GenericTaskQueue::push(E t) { + uint localBot = _bottom; + assert(localBot < N, "_bottom out of range."); + idx_t top = _age.top(); + uint dirty_n_elems = dirty_size(localBot, top); + assert(dirty_n_elems < N, "n_elems out of range."); + if (dirty_n_elems < max_elems()) { + // g++ complains if the volatile result of the assignment is + // unused, so we cast the volatile away. We cannot cast directly + // to void, because gcc treats that as not using the result of the + // assignment. However, casting to E& means that we trigger an + // unused-value warning. So, we cast the E& to void. + (void) const_cast(_elems[localBot] = t); + OrderAccess::release_store(&_bottom, increment_index(localBot)); + TASKQUEUE_STATS_ONLY(stats.record_push()); + return true; + } else { + return push_slow(t, dirty_n_elems); + } +} + +template +inline bool OverflowTaskQueue::push(E t) +{ + if (!taskqueue_t::push(t)) { + overflow_stack()->push(t); + TASKQUEUE_STATS_ONLY(stats.record_overflow(overflow_stack()->size())); + } + return true; +} + +#endif // SHARE_VM_UTILITIES_TASKQUEUE_INLINE_HPP