# HG changeset patch # User stefank # Date 1427385849 -3600 # Thu Mar 26 17:04:09 2015 +0100 # Node ID 1917488a60ed23ee981bd5685ca3a6bc38f28733 # Parent c73a2be78b7385f0e16e429b8f43a779fdb024bf [mq]: OopOopIterateMacros diff --git a/make/solaris/makefiles/product.make b/make/solaris/makefiles/product.make --- a/make/solaris/makefiles/product.make +++ b/make/solaris/makefiles/product.make @@ -37,6 +37,11 @@ OPT_CFLAGS/ciEnv.o = $(OPT_CFLAGS) -xinline=no%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_ endif +# Need extra inlining to get oop_ps_push_contents functions to perform well enough. +ifndef USE_GCC +OPT_CFLAGS/psPromotionManager.o = $(OPT_CFLAGS) -W2,-Ainline:inc=1000 +endif + # (OPT_CFLAGS/SLOWER is also available, to alter compilation of buggy files) ifeq ("${Platform_compiler}", "sparcWorks") diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/cmsOopClosures.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/specialized_oop_closures.hpp" + +// Generate CMS specialized oop_oop_iterate functions. +SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_CMS(ALL_KLASS_OOP_OOP_ITERATE_DEFN) diff --git a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp --- a/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp +++ b/src/share/vm/gc_implementation/concurrentMarkSweep/promotionInfo.cpp @@ -23,6 +23,7 @@ */ #include "precompiled.hpp" +#include "memory/genOopClosures.hpp" #include "gc_implementation/concurrentMarkSweep/compactibleFreeListSpace.hpp" #include "gc_implementation/concurrentMarkSweep/promotionInfo.hpp" #include "oops/markOop.inline.hpp" diff --git a/src/share/vm/gc_implementation/g1/g1OopClosures.cpp b/src/share/vm/gc_implementation/g1/g1OopClosures.cpp --- a/src/share/vm/gc_implementation/g1/g1OopClosures.cpp +++ b/src/share/vm/gc_implementation/g1/g1OopClosures.cpp @@ -23,9 +23,11 @@ */ #include "precompiled.hpp" +#include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #include "gc_implementation/g1/g1CollectedHeap.inline.hpp" #include "gc_implementation/g1/g1OopClosures.inline.hpp" #include "gc_implementation/g1/g1ParScanThreadState.hpp" +#include "memory/iterator.inline.hpp" G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par_scan_state) : G1ParClosureSuper(g1, par_scan_state), _scanned_klass(NULL), @@ -50,3 +52,6 @@ assert(_worker_id < MAX2((uint)ParallelGCThreads, 1u), err_msg("The given worker id %u must be less than the number of threads %u", _worker_id, MAX2((uint)ParallelGCThreads, 1u))); } + +// Generate G1 specialized oop_oop_iterate functions. +SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_G1(ALL_KLASS_OOP_OOP_ITERATE_DEFN) diff --git a/src/share/vm/gc_implementation/parNew/parOopClosures.cpp b/src/share/vm/gc_implementation/parNew/parOopClosures.cpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/parNew/parOopClosures.cpp @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/specialized_oop_closures.hpp" +#include "gc_implementation/parNew/parOopClosures.inline.hpp" + +// Generate ParNew specialized oop_oop_iterate functions. +SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_P(ALL_KLASS_OOP_OOP_ITERATE_DEFN); diff --git a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/pcTasks.cpp @@ -41,6 +41,7 @@ #include "runtime/thread.hpp" #include "runtime/vmThread.hpp" #include "services/management.hpp" +#include "utilities/stack.inline.hpp" PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC diff --git a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.cpp @@ -30,7 +30,10 @@ #include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" #include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" +#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" @@ -174,6 +177,142 @@ return _manager_array[index]; } +void InstanceKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + assert(obj != NULL, "can't follow the content of NULL object"); + + PSParallelCompact::follow_klass(cm, obj->klass()); + // Only mark the header and let the scan of the meta-data mark + // everything else. + + PSParallelCompact::MarkAndPushClosure cl(cm); + InstanceKlass::oop_oop_iterate_oop_maps(obj, &cl); +} + +void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + InstanceKlass::oop_pc_follow_contents(obj, cm); + + // Follow the klass field in the mirror. + Klass* klass = java_lang_Class::as_Klass(obj); + if (klass != NULL) { + // An anonymous class doesn't have its own class loader, so the call + // to follow_klass will mark and push its java mirror instead of the + // class loader. When handling the java mirror for an anonymous class + // we need to make sure its class loader data is claimed, this is done + // by calling follow_class_loader explicitly. For non-anonymous classes + // the call to follow_class_loader is made when the class loader itself + // is handled. + if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { + PSParallelCompact::follow_class_loader(cm, klass->class_loader_data()); + } else { + PSParallelCompact::follow_klass(cm, klass); + } + } else { + // If klass is NULL then this a mirror for a primitive type. + // We don't have to follow them, since they are handled as strong + // roots in Universe::oops_do. + assert(java_lang_Class::is_primitive(obj), "Sanity check"); + } + + PSParallelCompact::MarkAndPushClosure cl(cm); + oop_oop_iterate_statics(obj, &cl); +} + +void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + InstanceKlass::oop_pc_follow_contents(obj, cm); + + ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); + if (loader_data != NULL) { + PSParallelCompact::follow_class_loader(cm, loader_data); + } +} + +template +static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T heap_oop = oopDesc::load_heap_oop(referent_addr); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj)); + } + ) + if (!oopDesc::is_null(heap_oop)) { + oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) && + PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) { + // reference already enqueued, referent will be traversed later + klass->InstanceKlass::oop_pc_follow_contents(obj, cm); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj)); + } + ) + return; + } else { + // treat referent as normal oop + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj)); + } + ) + PSParallelCompact::mark_and_push(cm, referent_addr); + } + } + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + // Treat discovered as normal oop, if ref is not "active", + // i.e. if next is non-NULL. + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); + } + ) + PSParallelCompact::mark_and_push(cm, discovered_addr); + } + } else { +#ifdef ASSERT + // In the case of older JDKs which do not use the discovered + // field for the pending list, an inactive ref (next != NULL) + // must always have a NULL discovered field. + T next = oopDesc::load_heap_oop(next_addr); + oop discovered = java_lang_ref_Reference::discovered(obj); + assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", + p2i(obj))); +#endif + } + PSParallelCompact::mark_and_push(cm, next_addr); + klass->InstanceKlass::oop_pc_follow_contents(obj, cm); +} + + +void InstanceRefKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + if (UseCompressedOops) { + oop_pc_follow_contents_specialized(this, obj, cm); + } else { + oop_pc_follow_contents_specialized(this, obj, cm); + } +} + +void ObjArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + PSParallelCompact::follow_klass(cm, this); + + if (UseCompressedOops) { + oop_pc_follow_contents_specialized(this, obj, 0, cm); + } else { + oop_pc_follow_contents_specialized(this, obj, 0, cm); + } +} + +void TypeArrayKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { + assert(obj->is_typeArray(),"must be a type array"); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::TypeArrayKlass never moves. +} + void ParCompactionManager::follow_marking_stacks() { do { // Drain the overflow stack first, to allow stealing from the marking stack. diff --git a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psCompactionManager.inline.hpp @@ -26,9 +26,11 @@ #define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP #include "gc_implementation/parallelScavenge/psCompactionManager.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" -#include "oops/objArrayKlass.inline.hpp" -#include "oops/oop.pcgc.inline.hpp" +#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp" +#include "oops/objArrayOop.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" void ParCompactionManager::push_objarray(oop obj, size_t index) { @@ -49,16 +51,43 @@ } inline void ParCompactionManager::follow_contents(oop obj) { - obj->follow_contents(this); + assert(PSParallelCompact::mark_bitmap()->is_marked(obj), "should be marked"); + obj->pc_follow_contents(this); +} + +template +inline void oop_pc_follow_contents_specialized(ObjArrayKlass* klass, oop obj, int index, ParCompactionManager* cm) { + objArrayOop a = objArrayOop(obj); + const size_t len = size_t(a->length()); + const size_t beg_index = size_t(index); + assert(beg_index < len || len == 0, "index too large"); + + const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); + const size_t end_index = beg_index + stride; + T* const base = (T*)a->base(); + T* const beg = base + beg_index; + T* const end = base + end_index; + + // Push the non-NULL elements of the next stride on the marking stack. + for (T* e = beg; e < end; e++) { + PSParallelCompact::mark_and_push(cm, e); + } + + if (end_index < len) { + cm->push_objarray(a, end_index); // Push the continuation. + } } inline void ParCompactionManager::follow_contents(objArrayOop obj, int index) { - ObjArrayKlass* k = (ObjArrayKlass*)obj->klass(); - k->oop_follow_contents(this, obj, index); + if (UseCompressedOops) { + oop_pc_follow_contents_specialized((ObjArrayKlass*)obj->klass(), obj, index, this); + } else { + oop_pc_follow_contents_specialized((ObjArrayKlass*)obj->klass(), obj, index, this); + } } inline void ParCompactionManager::update_contents(oop obj) { - obj->update_contents(this); + obj->pc_update_contents(); } #endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSCOMPACTIONMANAGER_INLINE_HPP diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.cpp @@ -34,7 +34,7 @@ #include "gc_implementation/parallelScavenge/psMarkSweep.hpp" #include "gc_implementation/parallelScavenge/psMarkSweepDecorator.hpp" #include "gc_implementation/parallelScavenge/psOldGen.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" +#include "gc_implementation/parallelScavenge/psParallelCompact.inline.hpp" #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" #include "gc_implementation/parallelScavenge/psYoungGen.hpp" @@ -48,7 +48,10 @@ #include "memory/gcLocker.inline.hpp" #include "memory/referencePolicy.hpp" #include "memory/referenceProcessor.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" #include "oops/methodData.hpp" +#include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "runtime/atomic.inline.hpp" #include "runtime/fprofiler.hpp" @@ -825,16 +828,8 @@ PSParallelCompact::AdjustPointerClosure PSParallelCompact::_adjust_pointer_closure; PSParallelCompact::AdjustKlassClosure PSParallelCompact::_adjust_klass_closure; -void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } -void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } - void PSParallelCompact::FollowStackClosure::do_void() { _compaction_manager->follow_marking_stacks(); } -void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { - mark_and_push(_compaction_manager, p); -} -void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(_compaction_manager, p); } - void PSParallelCompact::FollowKlassClosure::do_klass(Klass* klass) { klass->oops_do(_mark_and_push_closure); } @@ -3340,6 +3335,71 @@ update_state(words); } +void InstanceKlass::oop_pc_update_pointers(oop obj) { + oop_oop_iterate_oop_maps(obj, PSParallelCompact::adjust_pointer_closure()); +} + +void InstanceMirrorKlass::oop_pc_update_pointers(oop obj) { + InstanceKlass::oop_pc_update_pointers(obj); + + oop_oop_iterate_statics(obj, PSParallelCompact::adjust_pointer_closure()); +} + +void InstanceClassLoaderKlass::oop_pc_update_pointers(oop obj) { + InstanceKlass::oop_pc_update_pointers(obj); +} + +#ifdef ASSERT +template static void trace_reference_gc(const char *s, oop obj, + T* referent_addr, + T* next_addr, + T* discovered_addr) { + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj)); + gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(referent_addr), + referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL); + gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(next_addr), + next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL); + gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(discovered_addr), + discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL); + } +} +#endif + +template +static void oop_pc_update_pointers_specialized(oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + PSParallelCompact::adjust_pointer(referent_addr); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + PSParallelCompact::adjust_pointer(next_addr); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + PSParallelCompact::adjust_pointer(discovered_addr); + debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj, + referent_addr, next_addr, discovered_addr);) +} + +void InstanceRefKlass::oop_pc_update_pointers(oop obj) { + InstanceKlass::oop_pc_update_pointers(obj); + + if (UseCompressedOops) { + oop_pc_update_pointers_specialized(obj); + } else { + oop_pc_update_pointers_specialized(obj); + } +} + +void ObjArrayKlass::oop_pc_update_pointers(oop obj) { + assert(obj->is_objArray(), "obj must be obj array"); + oop_oop_iterate_elements(objArrayOop(obj), PSParallelCompact::adjust_pointer_closure()); +} + +void TypeArrayKlass::oop_pc_update_pointers(oop obj) { + assert(obj->is_typeArray(),"must be a type array"); +} + ParMarkBitMapClosure::IterationStatus MoveAndUpdateClosure::do_addr(HeapWord* addr, size_t words) { assert(destination() != NULL, "sanity"); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.hpp @@ -951,12 +951,17 @@ virtual void do_void(); }; - class AdjustPointerClosure: public OopClosure { + class AdjustPointerClosure: public ExtendedOopClosure { public: + template void do_oop_nv(T* p); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); + // do not walk from thread stacks to the code cache on this phase virtual void do_code_blob(CodeBlob* cb) const { } + + // This closure provides its own oop verification code. + debug_only(virtual bool should_verify_oops() { return false; }) }; class AdjustKlassClosure : public KlassClosure { @@ -1139,13 +1144,18 @@ static void reset_millis_since_last_gc(); public: - class MarkAndPushClosure: public OopClosure { + class MarkAndPushClosure: public ExtendedOopClosure { private: ParCompactionManager* _compaction_manager; public: MarkAndPushClosure(ParCompactionManager* cm) : _compaction_manager(cm) { } + + template void do_oop_nv(T* p); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); + + // This closure provides its own oop verification code. + debug_only(virtual bool should_verify_oops() { return false; }) }; // The one and only place to start following the classes. @@ -1177,7 +1187,9 @@ static bool initialize(); // Closure accessors - static OopClosure* adjust_pointer_closure() { return (OopClosure*)&_adjust_pointer_closure; } + static PSParallelCompact::AdjustPointerClosure* adjust_pointer_closure() { + return &_adjust_pointer_closure; + } static KlassClosure* adjust_klass_closure() { return (KlassClosure*)&_adjust_klass_closure; } static BoolObjectClosure* is_alive_closure() { return (BoolObjectClosure*)&_is_alive_closure; } @@ -1333,39 +1345,6 @@ } template -inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { - cm->push(obj); - } - } -} - -template -inline void PSParallelCompact::adjust_pointer(T* p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); - oop new_obj = (oop)summary_data().calc_new_pointer(obj); - assert(new_obj != NULL, // is forwarding ptr? - "should be forwarded"); - // Just always do the update unconditionally? - if (new_obj != NULL) { - assert(Universe::heap()->is_in_reserved(new_obj), - "should be in object space"); - oopDesc::encode_store_heap_oop_not_null(p, new_obj); - } - } -} - -inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { - oop holder = klass->klass_holder(); - PSParallelCompact::mark_and_push(cm, &holder); -} - -template inline void PSParallelCompact::KeepAliveClosure::do_oop_work(T* p) { mark_and_push(_compaction_manager, p); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/gc_implementation/parallelScavenge/psParallelCompact.inline.hpp @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP +#define SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP + +#include "gc_implementation/parallelScavenge/psCompactionManager.hpp" +#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" +#include "gc_interface/collectedHeap.hpp" +#include "oops/klass.hpp" +#include "oops/oop.inline.hpp" + +template +inline void PSParallelCompact::mark_and_push(ParCompactionManager* cm, T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(obj), "should be in heap"); + + if (mark_bitmap()->is_unmarked(obj) && mark_obj(obj)) { + cm->push(obj); + } + } +} + +template +inline void PSParallelCompact::MarkAndPushClosure::do_oop_nv(T* p) { + mark_and_push(_compaction_manager, p); +} + +inline void PSParallelCompact::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } +inline void PSParallelCompact::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } + +inline void PSParallelCompact::follow_klass(ParCompactionManager* cm, Klass* klass) { + oop holder = klass->klass_holder(); + mark_and_push(cm, &holder); +} + +template +inline void PSParallelCompact::adjust_pointer(T* p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(obj), "should be in heap"); + + oop new_obj = (oop)summary_data().calc_new_pointer(obj); + assert(new_obj != NULL, // is forwarding ptr? + "should be forwarded"); + // Just always do the update unconditionally? + if (new_obj != NULL) { + assert(Universe::heap()->is_in_reserved(new_obj), + "should be in object space"); + oopDesc::encode_store_heap_oop_not_null(p, new_obj); + } + } +} + +template +void PSParallelCompact::AdjustPointerClosure::do_oop_nv(T* p) { + adjust_pointer(p); +} + +inline void PSParallelCompact::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); } +inline void PSParallelCompact::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); } + +#endif // SHARE_VM_GC_IMPLEMENTATION_PARALLELSCAVENGE_PSPARALLELCOMPACT_INLINE_HPP diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.cpp @@ -32,6 +32,9 @@ #include "memory/allocation.inline.hpp" #include "memory/memRegion.hpp" #include "memory/padded.inline.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" +#include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/stack.inline.hpp" @@ -310,6 +313,118 @@ } } +class PushContentsClosure : public ExtendedOopClosure { + PSPromotionManager* _pm; + public: + PushContentsClosure(PSPromotionManager* pm) : _pm(pm) {} + + template void do_oop_nv(T* p) { + if (PSScavenge::should_scavenge(p)) { + _pm->claim_or_forward_depth(p); + } + } + + virtual void do_oop(oop* p) { do_oop_nv(p); } + virtual void do_oop(narrowOop* p) { do_oop_nv(p); } + + // Don't use the oop verification code in the oop_oop_iterate framework. + debug_only(virtual bool should_verify_oops() { return false; }) +}; + +void InstanceKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + PushContentsClosure cl(pm); + oop_oop_iterate_oop_maps_reverse(obj, &cl); +} + +void InstanceMirrorKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + // Note that we don't have to follow the mirror -> klass pointer, since all + // klasses that are dirty will be scavenged when we iterate over the + // ClassLoaderData objects. + + InstanceKlass::oop_ps_push_contents(obj, pm); + + PushContentsClosure cl(pm); + oop_oop_iterate_statics(obj, &cl); +} + +void InstanceClassLoaderKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + InstanceKlass::oop_ps_push_contents(obj, pm); + + // This is called by the young collector. It will already have taken care of + // all class loader data. So, we don't have to follow the class loader -> + // class loader data link. +} + +template +static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, PSPromotionManager* pm) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + if (PSScavenge::should_scavenge(referent_addr)) { + ReferenceProcessor* rp = PSScavenge::reference_processor(); + if (rp->discover_reference(obj, klass->reference_type())) { + // reference already enqueued, referent and next will be traversed later + klass->InstanceKlass::oop_ps_push_contents(obj, pm); + return; + } else { + // treat referent as normal oop + pm->claim_or_forward_depth(referent_addr); + } + } + // Treat discovered as normal oop, if ref is not "active", + // i.e. if next is non-NULL. + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); + } + ) + if (PSScavenge::should_scavenge(discovered_addr)) { + pm->claim_or_forward_depth(discovered_addr); + } + } + } else { +#ifdef ASSERT + // In the case of older JDKs which do not use the discovered + // field for the pending list, an inactive ref (next != NULL) + // must always have a NULL discovered field. + oop next = oopDesc::load_decode_heap_oop(next_addr); + oop discovered = java_lang_ref_Reference::discovered(obj); + assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", + (oopDesc*)obj)); +#endif + } + + // Treat next as normal oop; next is a link in the reference queue. + if (PSScavenge::should_scavenge(next_addr)) { + pm->claim_or_forward_depth(next_addr); + } + klass->InstanceKlass::oop_ps_push_contents(obj, pm); +} + +void InstanceRefKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + if (UseCompressedOops) { + oop_ps_push_contents_specialized(obj, this, pm); + } else { + oop_ps_push_contents_specialized(obj, this, pm); + } +} + +void ObjArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + assert(obj->is_objArray(), "obj must be obj array"); + PushContentsClosure cl(pm); + oop_oop_iterate_elements(objArrayOop(obj), &cl); +} + +void TypeArrayKlass::oop_ps_push_contents(oop obj, PSPromotionManager* pm) { + assert(obj->is_typeArray(),"must be a type array"); + ShouldNotReachHere(); +} + oop PSPromotionManager::oop_promotion_failed(oop obj, markOop obj_mark) { assert(_old_gen_is_full || PromotionFailureALot, "Sanity"); diff --git a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp --- a/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp +++ b/src/share/vm/gc_implementation/parallelScavenge/psPromotionManager.inline.hpp @@ -29,7 +29,7 @@ #include "gc_implementation/parallelScavenge/psPromotionManager.hpp" #include "gc_implementation/parallelScavenge/psPromotionLAB.inline.hpp" #include "gc_implementation/parallelScavenge/psScavenge.hpp" -#include "oops/oop.psgc.inline.hpp" +#include "oops/oop.inline.hpp" inline PSPromotionManager* PSPromotionManager::manager_array(int index) { assert(_manager_array != NULL, "access of NULL manager_array"); @@ -92,7 +92,7 @@ } inline void PSPromotionManager::push_contents(oop obj) { - obj->push_contents(this); + obj->ps_push_contents(this); } // // This method is pretty bulky. It would be nice to split it up diff --git a/src/share/vm/gc_implementation/shared/markSweep.cpp b/src/share/vm/gc_implementation/shared/markSweep.cpp --- a/src/share/vm/gc_implementation/shared/markSweep.cpp +++ b/src/share/vm/gc_implementation/shared/markSweep.cpp @@ -28,6 +28,8 @@ #include "gc_implementation/shared/gcTrace.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" #include "oops/methodData.hpp" #include "oops/objArrayKlass.inline.hpp" #include "oops/oop.inline.hpp" @@ -57,16 +59,183 @@ CLDToOopClosure MarkSweep::follow_cld_closure(&mark_and_push_closure); CLDToOopClosure MarkSweep::adjust_cld_closure(&adjust_pointer_closure); -void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { mark_and_push(p); } -void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { mark_and_push(p); } +template +void MarkSweep::MarkAndPushClosure::do_oop_nv(T* p) { mark_and_push(p); } +void MarkSweep::MarkAndPushClosure::do_oop(oop* p) { do_oop_nv(p); } +void MarkSweep::MarkAndPushClosure::do_oop(narrowOop* p) { do_oop_nv(p); } void MarkSweep::follow_class_loader(ClassLoaderData* cld) { MarkSweep::follow_cld_closure.do_cld(cld); } +void InstanceKlass::oop_ms_follow_contents(oop obj) { + assert(obj != NULL, "can't follow the content of NULL object"); + MarkSweep::follow_klass(obj->klass()); + + oop_oop_iterate_oop_maps(obj, &MarkSweep::mark_and_push_closure); +} + +void InstanceMirrorKlass::oop_ms_follow_contents(oop obj) { + InstanceKlass::oop_ms_follow_contents(obj); + + // Follow the klass field in the mirror + Klass* klass = java_lang_Class::as_Klass(obj); + if (klass != NULL) { + // An anonymous class doesn't have its own class loader, so the call + // to follow_klass will mark and push its java mirror instead of the + // class loader. When handling the java mirror for an anonymous class + // we need to make sure its class loader data is claimed, this is done + // by calling follow_class_loader explicitly. For non-anonymous classes + // the call to follow_class_loader is made when the class loader itself + // is handled. + if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { + MarkSweep::follow_class_loader(klass->class_loader_data()); + } else { + MarkSweep::follow_klass(klass); + } + } else { + // If klass is NULL then this a mirror for a primitive type. + // We don't have to follow them, since they are handled as strong + // roots in Universe::oops_do. + assert(java_lang_Class::is_primitive(obj), "Sanity check"); + } + + oop_oop_iterate_statics(obj, &MarkSweep::mark_and_push_closure); +} + +void InstanceClassLoaderKlass::oop_ms_follow_contents(oop obj) { + InstanceKlass::oop_ms_follow_contents(obj); + + ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); + + // We must NULL check here, since the class loader + // can be found before the loader data has been set up. + if(loader_data != NULL) { + MarkSweep::follow_class_loader(loader_data); + } +} + +template +static void oop_ms_follow_contents_specialized(InstanceRefKlass* klass, oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T heap_oop = oopDesc::load_heap_oop(referent_addr); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr("InstanceRefKlass::oop_ms_follow_contents_specialized " PTR_FORMAT, p2i(obj)); + } + ) + if (!oopDesc::is_null(heap_oop)) { + oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!referent->is_gc_marked() && + MarkSweep::ref_processor()->discover_reference(obj, klass->reference_type())) { + // reference was discovered, referent will be traversed later + klass->InstanceKlass::oop_ms_follow_contents(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Non NULL enqueued " PTR_FORMAT, p2i(obj)); + } + ) + return; + } else { + // treat referent as normal oop + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Non NULL normal " PTR_FORMAT, p2i(obj)); + } + ) + MarkSweep::mark_and_push(referent_addr); + } + } + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + // Treat discovered as normal oop, if ref is not "active", + // i.e. if next is non-NULL. + T next_oop = oopDesc::load_heap_oop(next_addr); + if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(discovered_addr)); + } + ) + MarkSweep::mark_and_push(discovered_addr); + } + } else { +#ifdef ASSERT + // In the case of older JDKs which do not use the discovered + // field for the pending list, an inactive ref (next != NULL) + // must always have a NULL discovered field. + oop next = oopDesc::load_decode_heap_oop(next_addr); + oop discovered = java_lang_ref_Reference::discovered(obj); + assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", + p2i(obj))); +#endif + } + // treat next as normal oop. next is a link in the reference queue. + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process next as normal " PTR_FORMAT, p2i(next_addr)); + } + ) + MarkSweep::mark_and_push(next_addr); + klass->InstanceKlass::oop_ms_follow_contents(obj); +} + +void InstanceRefKlass::oop_ms_follow_contents(oop obj) { + if (UseCompressedOops) { + oop_ms_follow_contents_specialized(this, obj); + } else { + oop_ms_follow_contents_specialized(this, obj); + } +} + +template +static void oop_ms_follow_contents_specialized(oop obj, int index) { + objArrayOop a = objArrayOop(obj); + const size_t len = size_t(a->length()); + const size_t beg_index = size_t(index); + assert(beg_index < len || len == 0, "index too large"); + + const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); + const size_t end_index = beg_index + stride; + T* const base = (T*)a->base(); + T* const beg = base + beg_index; + T* const end = base + end_index; + + // Push the non-NULL elements of the next stride on the marking stack. + for (T* e = beg; e < end; e++) { + MarkSweep::mark_and_push(e); + } + + if (end_index < len) { + MarkSweep::push_objarray(a, end_index); // Push the continuation. + } +} + +void ObjArrayKlass::oop_ms_follow_contents(oop obj) { + assert (obj->is_array(), "obj must be array"); + MarkSweep::follow_klass(this); + if (UseCompressedOops) { + oop_ms_follow_contents_specialized(obj, 0); + } else { + oop_ms_follow_contents_specialized(obj, 0); + } +} + +void TypeArrayKlass::oop_ms_follow_contents(oop obj) { + assert(obj->is_typeArray(),"must be a type array"); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::TypeArrayKlass never moves. +} + void MarkSweep::follow_array(objArrayOop array, int index) { - ObjArrayKlass* k = (ObjArrayKlass*)array->klass(); - k->oop_follow_contents(array, index); + if (UseCompressedOops) { + oop_ms_follow_contents_specialized(array, index); + } else { + oop_ms_follow_contents_specialized(array, index); + } } void MarkSweep::follow_stack() { @@ -114,8 +283,10 @@ MarkSweep::AdjustPointerClosure MarkSweep::adjust_pointer_closure; -void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { adjust_pointer(p); } -void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { adjust_pointer(p); } +template +void MarkSweep::AdjustPointerClosure::do_oop_nv(T* p) { adjust_pointer(p); } +void MarkSweep::AdjustPointerClosure::do_oop(oop* p) { do_oop_nv(p); } +void MarkSweep::AdjustPointerClosure::do_oop(narrowOop* p) { do_oop_nv(p); } void MarkSweep::adjust_marks() { assert( _preserved_oop_stack.size() == _preserved_mark_stack.size(), @@ -177,3 +348,84 @@ } #endif + +int InstanceKlass::oop_ms_adjust_pointers(oop obj) { + int size = size_helper(); + oop_oop_iterate_oop_maps(obj, &MarkSweep::adjust_pointer_closure); + return size; +} + +int InstanceMirrorKlass::oop_ms_adjust_pointers(oop obj) { + int size = oop_size(obj); + InstanceKlass::oop_ms_adjust_pointers(obj); + + oop_oop_iterate_statics(obj, &MarkSweep::adjust_pointer_closure); + return size; +} + +int InstanceClassLoaderKlass::oop_ms_adjust_pointers(oop obj) { + return InstanceKlass::oop_ms_adjust_pointers(obj); +} + +#ifdef ASSERT +template static void trace_reference_gc(const char *s, oop obj, + T* referent_addr, + T* next_addr, + T* discovered_addr) { + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr("%s obj " PTR_FORMAT, s, p2i(obj)); + gclog_or_tty->print_cr(" referent_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(referent_addr), + p2i(referent_addr ? + (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL)); + gclog_or_tty->print_cr(" next_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(next_addr), + p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL)); + gclog_or_tty->print_cr(" discovered_addr/* " PTR_FORMAT " / " + PTR_FORMAT, p2i(discovered_addr), + p2i(discovered_addr ? + (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL)); + } +} +#endif + +template void static adjust_object_specialized(oop obj) { + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + MarkSweep::adjust_pointer(referent_addr); + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + MarkSweep::adjust_pointer(next_addr); + T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + MarkSweep::adjust_pointer(discovered_addr); + debug_only(trace_reference_gc("InstanceRefKlass::oop_ms_adjust_pointers", obj, + referent_addr, next_addr, discovered_addr);) +} + +int InstanceRefKlass::oop_ms_adjust_pointers(oop obj) { + int size = size_helper(); + InstanceKlass::oop_ms_adjust_pointers(obj); + + if (UseCompressedOops) { + adjust_object_specialized(obj); + } else { + adjust_object_specialized(obj); + } + return size; +} + +int ObjArrayKlass::oop_ms_adjust_pointers(oop obj) { + assert(obj->is_objArray(), "obj must be obj array"); + objArrayOop a = objArrayOop(obj); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = a->object_size(); + oop_oop_iterate_elements(a, &MarkSweep::adjust_pointer_closure); + return size; +} + +int TypeArrayKlass::oop_ms_adjust_pointers(oop obj) { + assert(obj->is_typeArray(), "must be a type array"); + typeArrayOop t = typeArrayOop(obj); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::TypeArrayKlass never moves. + return t->object_size(); +} diff --git a/src/share/vm/gc_implementation/shared/markSweep.hpp b/src/share/vm/gc_implementation/shared/markSweep.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.hpp +++ b/src/share/vm/gc_implementation/shared/markSweep.hpp @@ -60,8 +60,9 @@ virtual void do_oop(narrowOop* p); }; - class MarkAndPushClosure: public OopClosure { + class MarkAndPushClosure: public ExtendedOopClosure { public: + template void do_oop_nv(T* p); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); }; @@ -73,8 +74,12 @@ class AdjustPointerClosure: public OopsInGenClosure { public: + template void do_oop_nv(T* p); virtual void do_oop(oop* p); virtual void do_oop(narrowOop* p); + + // This closure provides its own oop verification code. + debug_only(virtual bool should_verify_oops() { return false; }) }; // Used for java/lang/ref handling diff --git a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp --- a/src/share/vm/gc_implementation/shared/markSweep.inline.hpp +++ b/src/share/vm/gc_implementation/shared/markSweep.inline.hpp @@ -28,11 +28,15 @@ #include "gc_implementation/shared/markSweep.hpp" #include "gc_interface/collectedHeap.hpp" #include "oops/markOop.inline.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceClassLoaderKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" +#include "oops/instanceRefKlass.inline.hpp" +#include "oops/objArrayKlass.inline.hpp" #include "utilities/stack.inline.hpp" #include "utilities/macros.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1StringDedup.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" #endif // INCLUDE_ALL_GCS inline void MarkSweep::mark_object(oop obj) { @@ -59,7 +63,9 @@ } inline void MarkSweep::follow_object(oop obj) { - obj->follow_contents(); + assert(obj->is_gc_marked(), "should be marked"); + + obj->ms_follow_contents(); } template inline void MarkSweep::follow_root(T* p) { @@ -95,13 +101,15 @@ } inline int MarkSweep::adjust_pointers(oop obj) { - return obj->adjust_pointers(); + return obj->ms_adjust_pointers(); } template inline void MarkSweep::adjust_pointer(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(obj), "should be in heap"); + oop new_obj = oop(obj->mark()->decode_pointer()); assert(new_obj != NULL || // is forwarding ptr? obj->mark() == markOopDesc::prototype() || // not gc marked? diff --git a/src/share/vm/memory/defNewGeneration.cpp b/src/share/vm/memory/defNewGeneration.cpp --- a/src/share/vm/memory/defNewGeneration.cpp +++ b/src/share/vm/memory/defNewGeneration.cpp @@ -48,6 +48,9 @@ #include "utilities/copy.hpp" #include "utilities/globalDefinitions.hpp" #include "utilities/stack.inline.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/parNew/parOopClosures.hpp" +#endif PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC diff --git a/src/share/vm/memory/genOopClosures.cpp b/src/share/vm/memory/genOopClosures.cpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/genOopClosures.cpp @@ -0,0 +1,30 @@ +/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#include "precompiled.hpp" +#include "memory/genOopClosures.inline.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/specialized_oop_closures.hpp" + +// Generate Serial GC specialized oop_oop_iterate functions. +SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_S(ALL_KLASS_OOP_OOP_ITERATE_DEFN) diff --git a/src/share/vm/memory/iterator.cpp b/src/share/vm/memory/iterator.cpp --- a/src/share/vm/memory/iterator.cpp +++ b/src/share/vm/memory/iterator.cpp @@ -23,8 +23,11 @@ */ #include "precompiled.hpp" -#include "memory/iterator.hpp" +#include "memory/iterator.inline.hpp" +#include "memory/universe.hpp" #include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" void KlassToOopClosure::do_klass(Klass* k) { assert(_oop_closure != NULL, "Not initialized?"); @@ -77,3 +80,12 @@ do_nmethod(nm); } } + +// Generate the *Klass::oop_oop_iterate functions for the base class +// of the oop closures. These versions use the virtual do_oop calls, +// instead of the devirtualized do_oop_nv version. +ALL_KLASS_OOP_OOP_ITERATE_DEFN(ExtendedOopClosure, _v) + +// Generate the *Klass::oop_oop_iterate functions +// for the NoHeaderExtendedOopClosure helper class. +ALL_KLASS_OOP_OOP_ITERATE_DEFN(NoHeaderExtendedOopClosure, _nv) diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp --- a/src/share/vm/memory/iterator.hpp +++ b/src/share/vm/memory/iterator.hpp @@ -44,9 +44,7 @@ class OopClosure : public Closure { public: virtual void do_oop(oop* o) = 0; - virtual void do_oop_v(oop* o) { do_oop(o); } virtual void do_oop(narrowOop* o) = 0; - virtual void do_oop_v(narrowOop* o) { do_oop(o); } }; // ExtendedOopClosure adds extra code to be run during oop iterations. @@ -74,11 +72,9 @@ // Currently, only CMS and G1 need these. virtual bool do_metadata() { return do_metadata_nv(); } - bool do_metadata_v() { return do_metadata(); } bool do_metadata_nv() { return false; } virtual void do_klass(Klass* k) { do_klass_nv(k); } - void do_klass_v(Klass* k) { do_klass(k); } void do_klass_nv(Klass* k) { ShouldNotReachHere(); } virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); } @@ -87,6 +83,14 @@ // location without an intervening "major reset" (like the end of a GC). virtual bool idempotent() { return false; } virtual bool apply_to_weak_ref_discovered_field() { return false; } + +#ifdef ASSERT + // Default verification of each visited oop field. + template void verify(T* p); + + // Can be used by subclasses to turn off the default verification of oop fields. + virtual bool should_verify_oops() { return true; } +#endif }; // Wrapper closure only used to implement oop_iterate_no_header(). @@ -364,16 +368,33 @@ } }; +// The two class template specializations are used to dispatch calls +// to the ExtendedOopClosure functions. If use_non_virtual_call is true, +// the non-virtual versions are called (E.g. do_oop_nv), otherwise the +// virtual versions are called (E.g. do_oop). -// Helper defines for ExtendOopClosure +template +class Devirtualizer {}; -#define if_do_metadata_checked(closure, nv_suffix) \ - /* Make sure the non-virtual and the virtual versions match. */ \ - assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \ - "Inconsistency in do_metadata"); \ - if (closure->do_metadata##nv_suffix()) +// Dispatches to the non-virtual functions. +template <> class Devirtualizer { + public: + template static void do_oop(OopClosureType* closure, T* p); + template static void do_klass(OopClosureType* closure, Klass* k); + template static bool do_metadata(OopClosureType* closure); +}; -#define assert_should_ignore_metadata(closure, nv_suffix) \ - assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented") +// Dispatches to the virtual functions. +template <> class Devirtualizer { + public: + template static void do_oop(OopClosureType* closure, T* p); + template static void do_klass(OopClosureType* closure, Klass* k); + template static bool do_metadata(OopClosureType* closure); +}; + +// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions. +#define nvs_nv_to_bool true +#define nvs_v_to_bool false +#define nvs_to_bool(nv_suffix) nvs##nv_suffix##_to_bool #endif // SHARE_VM_MEMORY_ITERATOR_HPP diff --git a/src/share/vm/memory/iterator.inline.hpp b/src/share/vm/memory/iterator.inline.hpp --- a/src/share/vm/memory/iterator.inline.hpp +++ b/src/share/vm/memory/iterator.inline.hpp @@ -28,6 +28,12 @@ #include "classfile/classLoaderData.hpp" #include "memory/iterator.hpp" #include "oops/klass.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" +#include "oops/instanceClassLoaderKlass.inline.hpp" +#include "oops/instanceRefKlass.inline.hpp" +#include "oops/objArrayKlass.inline.hpp" +#include "oops/typeArrayKlass.inline.hpp" #include "utilities/debug.hpp" inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) { @@ -44,4 +50,63 @@ inline void MetadataAwareOopClosure::do_klass(Klass* k) { do_klass_nv(k); } +#ifdef ASSERT +// This verification is applied to all visited oops. +// The closures can turn is off by overriding should_verify_oops(). +template +void ExtendedOopClosure::verify(T* p) { + if (should_verify_oops()) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_closed_subset(o), + err_msg("should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o))); + } + } +} +#endif + +// Implementation of the non-virtual do_oop dispatch. + +template +inline void Devirtualizer::do_oop(OopClosureType* closure, T* p) { + debug_only(closure->verify(p)); + closure->do_oop_nv(p); +} +template +inline void Devirtualizer::do_klass(OopClosureType* closure, Klass* k) { + closure->do_klass_nv(k); +} +template +inline bool Devirtualizer::do_metadata(OopClosureType* closure) { + // Make sure the non-virtual and the virtual versions match. + assert(closure->do_metadata_nv() == closure->do_metadata(), "Inconsistency in do_metadata"); + return closure->do_metadata_nv(); +} + +// Implementation of the virtual do_oop dispatch. + +template +void Devirtualizer::do_oop(OopClosureType* closure, T* p) { + debug_only(closure->verify(p)); + closure->do_oop(p); +} +template +void Devirtualizer::do_klass(OopClosureType* closure, Klass* k) { + closure->do_klass(k); +} +template +bool Devirtualizer::do_metadata(OopClosureType* closure) { + return closure->do_metadata(); +} + +// The list of all "specializable" oop_oop_iterate function definitions. +#define ALL_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) + #endif // SHARE_VM_MEMORY_ITERATOR_INLINE_HPP diff --git a/src/share/vm/memory/space.cpp b/src/share/vm/memory/space.cpp --- a/src/share/vm/memory/space.cpp +++ b/src/share/vm/memory/space.cpp @@ -31,6 +31,7 @@ #include "memory/blockOffsetTable.inline.hpp" #include "memory/defNewGeneration.hpp" #include "memory/genCollectedHeap.hpp" +#include "memory/genOopClosures.inline.hpp" #include "memory/space.hpp" #include "memory/space.inline.hpp" #include "memory/universe.inline.hpp" diff --git a/src/share/vm/memory/tenuredGeneration.cpp b/src/share/vm/memory/tenuredGeneration.cpp --- a/src/share/vm/memory/tenuredGeneration.cpp +++ b/src/share/vm/memory/tenuredGeneration.cpp @@ -36,6 +36,9 @@ #include "oops/oop.inline.hpp" #include "runtime/java.hpp" #include "utilities/macros.hpp" +#if INCLUDE_ALL_GCS +#include "gc_implementation/parNew/parOopClosures.hpp" +#endif TenuredGeneration::TenuredGeneration(ReservedSpace rs, size_t initial_byte_size, int level, diff --git a/src/share/vm/oops/instanceClassLoaderKlass.cpp b/src/share/vm/oops/instanceClassLoaderKlass.cpp deleted file mode 100644 --- a/src/share/vm/oops/instanceClassLoaderKlass.cpp +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright (c) 2011, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#include "precompiled.hpp" -#include "classfile/javaClasses.hpp" -#include "classfile/systemDictionary.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" -#include "gc_interface/collectedHeap.inline.hpp" -#include "memory/genOopClosures.inline.hpp" -#include "memory/iterator.inline.hpp" -#include "memory/oopFactory.hpp" -#include "memory/specialized_oop_closures.hpp" -#include "oops/instanceKlass.hpp" -#include "oops/instanceClassLoaderKlass.hpp" -#include "oops/instanceMirrorKlass.hpp" -#include "oops/instanceOop.hpp" -#include "oops/oop.inline.hpp" -#include "oops/symbol.hpp" -#include "runtime/handles.inline.hpp" -#include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/parNew/parOopClosures.inline.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#endif // INCLUDE_ALL_GCS - -// Macro to define InstanceClassLoaderKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. -// Since ClassLoader objects have only a pointer to the loader_data, they are not -// compressed nor does the pointer move. - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)\ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \ - /* cld can be null if we have a non-registered class loader. */ \ - if (cld != NULL) { \ - closure->do_class_loader_data(cld); \ - } \ - } \ - \ - return size; \ -} - -#if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - return size; \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceClassLoaderKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); \ - /* cld can be null if we have a non-registered class loader. */ \ - if (cld != NULL) { \ - closure->do_class_loader_data(cld); \ - } \ - } \ - } \ - \ - return size; \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m) - -void InstanceClassLoaderKlass::oop_follow_contents(oop obj) { - InstanceKlass::oop_follow_contents(obj); - ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); - - // We must NULL check here, since the class loader - // can be found before the loader data has been set up. - if(loader_data != NULL) { - MarkSweep::follow_class_loader(loader_data); - } -} - -#if INCLUDE_ALL_GCS -void InstanceClassLoaderKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - InstanceKlass::oop_follow_contents(cm, obj); - ClassLoaderData * const loader_data = java_lang_ClassLoader::loader_data(obj); - if (loader_data != NULL) { - PSParallelCompact::follow_class_loader(cm, loader_data); - } -} - -void InstanceClassLoaderKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - InstanceKlass::oop_push_contents(pm, obj); - - // This is called by the young collector. It will already have taken care of - // all class loader data. So, we don't have to follow the class loader -> - // class loader data link. -} - -int InstanceClassLoaderKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - InstanceKlass::oop_update_pointers(cm, obj); - return size_helper(); -} -#endif // INCLUDE_ALL_GCS - diff --git a/src/share/vm/oops/instanceClassLoaderKlass.hpp b/src/share/vm/oops/instanceClassLoaderKlass.hpp --- a/src/share/vm/oops/instanceClassLoaderKlass.hpp +++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp @@ -48,34 +48,60 @@ InstanceClassLoaderKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ + // Oop fields (and metadata) iterators + // [nv = true] Use non-virtual calls to do_oop_nv. + // [nv = false] Use virtual calls to do_oop. + // + // The InstanceClassLoaderKlass iterators also visit the CLD pointer (or mirror of anonymous klasses.) + + private: + // Forward iteration + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); + +#if INCLUDE_ALL_GCS + // Reverse iteration + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure); +#endif + + // Bounded range iteration + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + public: + +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) #endif // INCLUDE_ALL_GCS - // Garbage collection - void oop_follow_contents(oop obj); - - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS }; #endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_HPP diff --git a/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp @@ -0,0 +1,111 @@ +/* +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP + +#include "classfile/javaClasses.hpp" +#include "oops/instanceClassLoaderKlass.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +template +inline int InstanceClassLoaderKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + int size = InstanceKlass::oop_oop_iterate(obj, closure); + + if (Devirtualizer::do_metadata(closure)) { + ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); + // cld can be null if we have a non-registered class loader. + if (cld != NULL) { + closure->do_class_loader_data(cld); + } + } + + return size; +} + +#if INCLUDE_ALL_GCS +template +inline int InstanceClassLoaderKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { + int size = InstanceKlass::oop_oop_iterate_reverse(obj, closure); + + assert(!Devirtualizer::do_metadata(closure), + "Code to handle metadata is not implemented"); + + return size; +} +#endif // INCLUDE_ALL_GCS + + +template +inline int InstanceClassLoaderKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + int size = InstanceKlass::oop_oop_iterate_bounded(obj, closure, mr); + + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + ClassLoaderData* cld = java_lang_ClassLoader::loader_data(obj); + // cld can be null if we have a non-registered class loader. + if (cld != NULL) { + closure->do_class_loader_data(cld); + } + } + } + + return size; +} + + +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceClassLoaderKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_reverse(obj, closure); \ +} +#else +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + + +#define InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int InstanceClassLoaderKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + +#define ALL_INSTANCE_CLASS_LOADER_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + InstanceClassLoaderKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + InstanceClassLoaderKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + +#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp +++ b/src/share/vm/oops/instanceKlass.cpp @@ -28,12 +28,10 @@ #include "classfile/verifier.hpp" #include "classfile/vmSymbols.hpp" #include "compiler/compileBroker.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "interpreter/oopMapCache.hpp" #include "interpreter/rewriter.hpp" #include "jvmtifiles/jvmti.h" -#include "memory/genOopClosures.inline.hpp" #include "memory/heapInspection.hpp" #include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" @@ -41,7 +39,7 @@ #include "memory/specialized_oop_closures.hpp" #include "oops/fieldStreams.hpp" #include "oops/instanceClassLoaderKlass.hpp" -#include "oops/instanceKlass.hpp" +#include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/klass.inline.hpp" @@ -64,17 +62,6 @@ #include "services/threadService.hpp" #include "utilities/dtrace.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1OopClosures.inline.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionManager.inline.hpp" -#include "gc_implementation/parNew/parOopClosures.inline.hpp" -#include "gc_implementation/parallelScavenge/parallelScavengeHeap.inline.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" -#endif // INCLUDE_ALL_GCS #ifdef COMPILER1 #include "c1/c1_Compiler.hpp" #endif @@ -2010,288 +1997,6 @@ } #endif //PRODUCT - -// Garbage collection - -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } -} -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), - err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o)); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT - -// -// Macros that iterate over areas of oops which are specialized on type of -// oop pointer either narrow or wide, depending on UseCompressedOops -// -// Parameters are: -// T - type of oop to point to (either oop or narrowOop) -// start_p - starting pointer for region to iterate over -// count - number of oops or narrowOops to iterate over -// do_oop - action to perform on each oop (it's arbitrary C code which -// makes it more efficient to put in a macro rather than making -// it a template function) -// assert_fn - assert function which is template function because performance -// doesn't matter when enabled. -#define InstanceKlass_SPECIALIZED_OOP_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* p = (T*)(start_p); \ - T* const end = p + (count); \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - -#define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* const start = (T*)(start_p); \ - T* p = start + (count); \ - while (start < p) { \ - --p; \ - (assert_fn)(p); \ - do_oop; \ - } \ -} - -#define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ - T, start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ - mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ - "bounded region must be properly aligned"); \ - T* p = (T*)(start_p); \ - T* end = p + (count); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - - -// The following macros call specialized macros, passing either oop or -// narrowOop as the specialization type. These test the UseCompressedOops -// flag. -#define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn) \ -{ \ - /* Compute oopmap block range. The common case \ - is nonstatic_oop_map_size == 1. */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - ++map; \ - } \ - } else { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_OOP_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - ++map; \ - } \ - } \ -} - -#define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn) \ -{ \ - OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* map = start_map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (start_map < map) { \ - --map; \ - InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - } \ - } else { \ - while (start_map < map) { \ - --map; \ - InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - do_oop, assert_fn) \ - } \ - } \ -} - -#define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop, \ - assert_fn) \ -{ \ - /* Compute oopmap block range. The common case is \ - nonstatic_oop_map_size == 1, so we accept the \ - usually non-existent extra overhead of examining \ - all the maps. */ \ - OopMapBlock* map = start_of_nonstatic_oop_maps(); \ - OopMapBlock* const end_map = map + nonstatic_oop_map_count(); \ - if (UseCompressedOops) { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - low, high, \ - do_oop, assert_fn) \ - ++map; \ - } \ - } else { \ - while (map < end_map) { \ - InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - obj->obj_field_addr(map->offset()), map->count(), \ - low, high, \ - do_oop, assert_fn) \ - ++map; \ - } \ - } \ -} - -void InstanceKlass::oop_follow_contents(oop obj) { - assert(obj != NULL, "can't follow the content of NULL object"); - MarkSweep::follow_klass(obj->klass()); - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - MarkSweep::mark_and_push(p), \ - assert_is_in_closed_subset) -} - -#if INCLUDE_ALL_GCS -void InstanceKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - assert(obj != NULL, "can't follow the content of NULL object"); - PSParallelCompact::follow_klass(cm, obj->klass()); - // Only mark the header and let the scan of the meta-data mark - // everything else. - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - PSParallelCompact::mark_and_push(cm, p), \ - assert_is_in) -} -#endif // INCLUDE_ALL_GCS - -// closure's do_metadata() method dictates whether the given closure should be -// applied to the klass ptr in the object header. - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* header */ \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} - -#if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, \ - OopClosureType* closure) { \ - assert_should_ignore_metadata(closure, nv_suffix); \ - \ - /* instance variables */ \ - InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ - obj, \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} -#endif // INCLUDE_ALL_GCS - -#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - } \ - InstanceKlass_BOUNDED_OOP_MAP_ITERATE( \ - obj, mr.start(), mr.end(), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return size_helper(); \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DEFN_m) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS - -int InstanceKlass::oop_adjust_pointers(oop obj) { - int size = size_helper(); - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - MarkSweep::adjust_pointer(p), \ - assert_is_in) - return size; -} - -#if INCLUDE_ALL_GCS -void InstanceKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - InstanceKlass_OOP_MAP_REVERSE_ITERATE( \ - obj, \ - if (PSScavenge::should_scavenge(p)) { \ - pm->claim_or_forward_depth(p); \ - }, \ - assert_nothing ) -} - -int InstanceKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - int size = size_helper(); - InstanceKlass_OOP_MAP_ITERATE( \ - obj, \ - PSParallelCompact::adjust_pointer(p), \ - assert_is_in) - return size; -} - -#endif // INCLUDE_ALL_GCS - void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) { assert(class_loader_data()->is_alive(is_alive), "this klass should be live"); if (is_interface()) { diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp +++ b/src/share/vm/oops/instanceKlass.hpp @@ -954,10 +954,6 @@ void adjust_default_methods(InstanceKlass* holder, bool* trace_name_printed); #endif // INCLUDE_JVMTI - // Garbage collection - void oop_follow_contents(oop obj); - int oop_adjust_pointers(oop obj); - void clean_implementors_list(BoolObjectClosure* is_alive); void clean_method_data(BoolObjectClosure* is_alive); void clean_dependent_nmethods(); @@ -981,32 +977,108 @@ static void notify_unload_class(InstanceKlass* ik); static void release_C_heap_structures(InstanceKlass* ik); - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS - // Naming const char* signature_name() const; - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + // Oop fields (and metadata) iterators + // [nv = true] Use non-virtual calls to do_oop_nv. + // [nv = false] Use virtual calls to do_oop. + // + // The InstanceKlass iterators also visits the Object's klass. + + // Forward iteration + public: + // Iterate over all oop fields in the oop maps. + template + inline void oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure); -#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ - MemRegion mr); + protected: + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); + + private: + // Iterate over all oop fields in the oop maps. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure); + + // Iterate over all oop fields in one oop map. + template + inline void oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure); + + + // Reverse iteration +#if INCLUDE_ALL_GCS + public: + // Iterate over all oop fields in the oop maps. + template + inline void oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure); + + protected: + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure); + + private: + // Iterate over all oop fields in the oop maps. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure); + + // Iterate over all oop fields in one oop map. + template + inline void oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure); +#endif + + + // Bounded range iteration + public: + // Iterate over all oop fields in the oop maps. + template + inline void oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + protected: + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + private: + // Iterate over all oop fields in the oop maps. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Iterate over all oop fields in one oop map. + template + inline void oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr); + + + public: + +#define InstanceKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); +#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) diff --git a/src/share/vm/oops/instanceKlass.inline.hpp b/src/share/vm/oops/instanceKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceKlass.inline.hpp @@ -0,0 +1,215 @@ +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP + +#include "memory/iterator.hpp" +#include "oops/instanceKlass.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +// The iteration over the oops in objects is a hot path in the GC code. +// By force inlining the following functions, we get similar GC performance +// as the previous macro based implementation. +#ifdef TARGET_COMPILER_visCPP +#define INLINE __forceinline +#else +#define INLINE inline +#endif + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) { + T* p = (T*)obj->obj_field_addr(map->offset()); + T* const end = p + map->count(); + + for (; p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } +} + +#if INCLUDE_ALL_GCS +template +INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) { + T* const start = (T*)obj->obj_field_addr(map->offset()); + T* p = start + map->count(); + + while (start < p) { + --p; + Devirtualizer::do_oop(closure, p); + } +} +#endif + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) { + T* p = (T*)obj->obj_field_addr(map->offset()); + T* end = p + map->count(); + + T* const l = (T*)mr.start(); + T* const h = (T*)mr.end(); + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && + mask_bits((intptr_t)h, sizeof(T)-1) == 0, + "bounded region must be properly aligned"); + + if (p < l) { + p = l; + } + if (end > h) { + end = h; + } + + for (;p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } +} + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) { + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + + for (; map < end_map; ++map) { + oop_oop_iterate_oop_map(map, obj, closure); + } +} + +#if INCLUDE_ALL_GCS +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) { + OopMapBlock* const start_map = start_of_nonstatic_oop_maps(); + OopMapBlock* map = start_map + nonstatic_oop_map_count(); + + while (start_map < map) { + --map; + oop_oop_iterate_oop_map_reverse(map, obj, closure); + } +} +#endif + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + + for (;map < end_map; ++map) { + oop_oop_iterate_oop_map_bounded(map, obj, closure, mr); + } +} + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) { + if (UseCompressedOops) { + oop_oop_iterate_oop_maps_specialized(obj, closure); + } else { + oop_oop_iterate_oop_maps_specialized(obj, closure); + } +} + +#if INCLUDE_ALL_GCS +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) { + if (UseCompressedOops) { + oop_oop_iterate_oop_maps_specialized_reverse(obj, closure); + } else { + oop_oop_iterate_oop_maps_specialized_reverse(obj, closure); + } +} +#endif + +template +INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + if (UseCompressedOops) { + oop_oop_iterate_oop_maps_specialized_bounded(obj, closure, mr); + } else { + oop_oop_iterate_oop_maps_specialized_bounded(obj, closure, mr); + } +} + +template +INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + + oop_oop_iterate_oop_maps(obj, closure); + + return size_helper(); +} + +#if INCLUDE_ALL_GCS +template +INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { + assert(!Devirtualizer::do_metadata(closure), + "Code to handle metadata is not implemented"); + + oop_oop_iterate_oop_maps_reverse(obj, closure); + + return size_helper(); +} +#endif + +template +INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + Devirtualizer::do_klass(closure, obj->klass()); + } + } + + oop_oop_iterate_oop_maps_bounded(obj, closure, mr); + + return size_helper(); +} + +#undef INLINE + + +#define InstanceKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ +int InstanceKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ +int InstanceKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_reverse(obj, closure); \ +} +#else +#define InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + +#define InstanceKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ +int InstanceKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + +#define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + InstanceKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + InstanceKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + InstanceKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + +#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp --- a/src/share/vm/oops/instanceMirrorKlass.cpp +++ b/src/share/vm/oops/instanceMirrorKlass.cpp @@ -25,9 +25,7 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" -#include "memory/genOopClosures.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/oopFactory.hpp" #include "memory/specialized_oop_closures.hpp" @@ -38,313 +36,9 @@ #include "oops/symbol.hpp" #include "runtime/handles.inline.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1OopClosures.inline.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionManager.inline.hpp" -#include "gc_implementation/parNew/parOopClosures.inline.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" -#endif // INCLUDE_ALL_GCS int InstanceMirrorKlass::_offset_of_static_fields = 0; -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } -} -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT - -#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ - T, start_p, count, do_oop, \ - assert_fn) \ -{ \ - T* p = (T*)(start_p); \ - T* const end = p + (count); \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - -#define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \ - T, start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \ - mask_bits((intptr_t)h, sizeof(T)-1) == 0, \ - "bounded region must be properly aligned"); \ - T* p = (T*)(start_p); \ - T* end = p + (count); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - (assert_fn)(p); \ - do_oop; \ - ++p; \ - } \ -} - - -#define InstanceMirrorKlass_OOP_ITERATE(start_p, count, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - do_oop, assert_fn) \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop, \ - start_p, count, \ - do_oop, assert_fn) \ - } \ -} - -// The following macros call specialized macros, passing either oop or -// narrowOop as the specialization type. These test the UseCompressedOops -// flag. -#define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \ - do_oop, assert_fn) \ -{ \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - start_p, count, \ - low, high, \ - do_oop, assert_fn) \ - } \ -} - - -void InstanceMirrorKlass::oop_follow_contents(oop obj) { - InstanceKlass::oop_follow_contents(obj); - - // Follow the klass field in the mirror. - Klass* klass = java_lang_Class::as_Klass(obj); - if (klass != NULL) { - // An anonymous class doesn't have its own class loader, so the call - // to follow_klass will mark and push its java mirror instead of the - // class loader. When handling the java mirror for an anonymous class - // we need to make sure its class loader data is claimed, this is done - // by calling follow_class_loader explicitly. For non-anonymous classes - // the call to follow_class_loader is made when the class loader itself - // is handled. - if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { - MarkSweep::follow_class_loader(klass->class_loader_data()); - } else { - MarkSweep::follow_klass(klass); - } - } else { - // If klass is NULL then this a mirror for a primitive type. - // We don't have to follow them, since they are handled as strong - // roots in Universe::oops_do. - assert(java_lang_Class::is_primitive(obj), "Sanity check"); - } - - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - MarkSweep::mark_and_push(p), \ - assert_is_in_closed_subset) -} - -#if INCLUDE_ALL_GCS -void InstanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - InstanceKlass::oop_follow_contents(cm, obj); - - // Follow the klass field in the mirror. - Klass* klass = java_lang_Class::as_Klass(obj); - if (klass != NULL) { - // An anonymous class doesn't have its own class loader, so the call - // to follow_klass will mark and push its java mirror instead of the - // class loader. When handling the java mirror for an anonymous class - // we need to make sure its class loader data is claimed, this is done - // by calling follow_class_loader explicitly. For non-anonymous classes - // the call to follow_class_loader is made when the class loader itself - // is handled. - if (klass->oop_is_instance() && InstanceKlass::cast(klass)->is_anonymous()) { - PSParallelCompact::follow_class_loader(cm, klass->class_loader_data()); - } else { - PSParallelCompact::follow_klass(cm, klass); - } - } else { - // If klass is NULL then this a mirror for a primitive type. - // We don't have to follow them, since they are handled as strong - // roots in Universe::oops_do. - assert(java_lang_Class::is_primitive(obj), "Sanity check"); - } - - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - PSParallelCompact::mark_and_push(cm, p), \ - assert_is_in) -} -#endif // INCLUDE_ALL_GCS - -int InstanceMirrorKlass::oop_adjust_pointers(oop obj) { - int size = oop_size(obj); - InstanceKlass::oop_adjust_pointers(obj); - - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - MarkSweep::adjust_pointer(p), \ - assert_nothing) - return size; -} - -#define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix) \ - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return oop_size(obj); \ - -#define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr) \ - InstanceMirrorKlass_BOUNDED_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj), \ - mr.start(), mr.end(), \ - (closure)->do_oop##nv_suffix(p), \ - assert_is_in_closed_subset) \ - return oop_size(obj); \ - - -// Macro to define InstanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - Klass* klass = java_lang_Class::as_Klass(obj); \ - /* We'll get NULL for primitive mirrors. */ \ - if (klass != NULL) { \ - closure->do_klass##nv_suffix(klass); \ - } \ - } \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ - } \ -} - -#if INCLUDE_ALL_GCS -#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix); \ - } else { \ - InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix); \ - } \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceMirrorKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - \ - if_do_metadata_checked(closure, nv_suffix) { \ - if (mr.contains(obj)) { \ - Klass* klass = java_lang_Class::as_Klass(obj); \ - /* We'll get NULL for primitive mirrors. */ \ - if (klass != NULL) { \ - closure->do_klass##nv_suffix(klass); \ - } \ - } \ - } \ - \ - if (UseCompressedOops) { \ - InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr); \ - } else { \ - InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr); \ - } \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m) - -#if INCLUDE_ALL_GCS -void InstanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - // Note that we don't have to follow the mirror -> klass pointer, since all - // klasses that are dirty will be scavenged when we iterate over the - // ClassLoaderData objects. - - InstanceKlass::oop_push_contents(pm, obj); - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ - if (PSScavenge::should_scavenge(p)) { \ - pm->claim_or_forward_depth(p); \ - }, \ - assert_nothing ) -} - -int InstanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - int size = oop_size(obj); - InstanceKlass::oop_update_pointers(cm, obj); - - InstanceMirrorKlass_OOP_ITERATE( \ - start_of_static_fields(obj), java_lang_Class::static_oop_field_count(obj),\ - PSParallelCompact::adjust_pointer(p), \ - assert_nothing) - return size; -} -#endif // INCLUDE_ALL_GCS - int InstanceMirrorKlass::instance_size(KlassHandle k) { if (k() != NULL && k->oop_is_instance()) { return align_object_size(size_helper() + InstanceKlass::cast(k())->static_field_size()); diff --git a/src/share/vm/oops/instanceMirrorKlass.hpp b/src/share/vm/oops/instanceMirrorKlass.hpp --- a/src/share/vm/oops/instanceMirrorKlass.hpp +++ b/src/share/vm/oops/instanceMirrorKlass.hpp @@ -88,19 +88,66 @@ // allocation instanceOop allocate_instance(KlassHandle k, TRAPS); - // Garbage collection - int oop_adjust_pointers(oop obj); - void oop_follow_contents(oop obj); + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS + // Oop fields (and metadata) iterators + // [nv = true] Use non-virtual calls to do_oop_nv. + // [nv = false] Use virtual calls to do_oop. + // + // The InstanceMirrorKlass iterators also visit the hidden Klass pointer. - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + public: + // Iterate over the static fields. + template + inline void oop_oop_iterate_statics(oop obj, OopClosureType* closure); + + private: + // Iterate over the static fields. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_statics_specialized(oop obj, OopClosureType* closure); + + // Forward iteration + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); + + + // Reverse iteration +#if INCLUDE_ALL_GCS + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure); +#endif + + + // Bounded range iteration + // Iterate over the oop fields and metadata. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Iterate over the static fields. + template + inline void oop_oop_iterate_statics_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Iterate over the static fields. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_statics_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + + public: #define InstanceMirrorKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ diff --git a/src/share/vm/oops/instanceMirrorKlass.inline.hpp b/src/share/vm/oops/instanceMirrorKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceMirrorKlass.inline.hpp @@ -0,0 +1,164 @@ +/* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP + +#include "classfile/javaClasses.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/instanceMirrorKlass.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +template +void InstanceMirrorKlass::oop_oop_iterate_statics_specialized(oop obj, OopClosureType* closure) { + T* p = (T*)start_of_static_fields(obj); + T* const end = p + java_lang_Class::static_oop_field_count(obj); + + for (; p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } +} + +template +void InstanceMirrorKlass::oop_oop_iterate_statics(oop obj, OopClosureType* closure) { + if (UseCompressedOops) { + oop_oop_iterate_statics_specialized(obj, closure); + } else { + oop_oop_iterate_statics_specialized(obj, closure); + } +} + +template +int InstanceMirrorKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + InstanceKlass::oop_oop_iterate(obj, closure); + + if (Devirtualizer::do_metadata(closure)) { + Klass* klass = java_lang_Class::as_Klass(obj); + // We'll get NULL for primitive mirrors. + if (klass != NULL) { + Devirtualizer::do_klass(closure, klass); + } + } + + oop_oop_iterate_statics(obj, closure); + + return oop_size(obj); +} + +#if INCLUDE_ALL_GCS +template +int InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { + InstanceKlass::oop_oop_iterate_reverse(obj, closure); + + InstanceMirrorKlass::oop_oop_iterate_statics(obj, closure); + + return oop_size(obj); +} +#endif + +template +void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj, + OopClosureType* closure, + MemRegion mr) { + T* p = (T*)start_of_static_fields(obj); + T* end = p + java_lang_Class::static_oop_field_count(obj); + + T* const l = (T*)mr.start(); + T* const h = (T*)mr.end(); + assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && + mask_bits((intptr_t)h, sizeof(T)-1) == 0, + "bounded region must be properly aligned"); + + if (p < l) { + p = l; + } + if (end > h) { + end = h; + } + + for (;p < end; ++p) { + Devirtualizer::do_oop(closure, p); + } +} + +template +void InstanceMirrorKlass::oop_oop_iterate_statics_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + if (UseCompressedOops) { + oop_oop_iterate_statics_specialized_bounded(obj, closure, mr); + } else { + oop_oop_iterate_statics_specialized_bounded(obj, closure, mr); + } +} + +template +int InstanceMirrorKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + InstanceKlass::oop_oop_iterate_bounded(obj, closure, mr); + + if (Devirtualizer::do_metadata(closure)) { + if (mr.contains(obj)) { + Klass* klass = java_lang_Class::as_Klass(obj); + // We'll get NULL for primitive mirrors. + if (klass != NULL) { + Devirtualizer::do_klass(closure, klass); + } + } + } + + oop_oop_iterate_statics_bounded(obj, closure, mr); + + return oop_size(obj); +} + + +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceMirrorKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceMirrorKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_reverse(obj, closure); \ +} +#else +#define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + + +#define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int InstanceMirrorKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + +#define ALL_INSTANCE_MIRROR_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + +#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP diff --git a/src/share/vm/oops/instanceRefKlass.cpp b/src/share/vm/oops/instanceRefKlass.cpp --- a/src/share/vm/oops/instanceRefKlass.cpp +++ b/src/share/vm/oops/instanceRefKlass.cpp @@ -25,421 +25,16 @@ #include "precompiled.hpp" #include "classfile/javaClasses.hpp" #include "classfile/systemDictionary.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" -#include "gc_interface/collectedHeap.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/genCollectedHeap.hpp" -#include "memory/genOopClosures.inline.hpp" #include "memory/specialized_oop_closures.hpp" -#include "oops/instanceRefKlass.hpp" +#include "oops/instanceRefKlass.inline.hpp" #include "oops/oop.inline.hpp" #include "utilities/preserveException.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1OopClosures.inline.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionManager.inline.hpp" -#include "gc_implementation/parNew/parOopClosures.inline.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" -#endif // INCLUDE_ALL_GCS PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC -template -void specialized_oop_follow_contents(InstanceRefKlass* ref, oop obj) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); - T heap_oop = oopDesc::load_heap_oop(referent_addr); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj); - } - ) - if (!oopDesc::is_null(heap_oop)) { - oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); - if (!referent->is_gc_marked() && - MarkSweep::ref_processor()->discover_reference(obj, ref->reference_type())) { - // reference was discovered, referent will be traversed later - ref->InstanceKlass::oop_follow_contents(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj); - } - ) - return; - } else { - // treat referent as normal oop - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj); - } - ) - MarkSweep::mark_and_push(referent_addr); - } - } - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - // Treat discovered as normal oop, if ref is not "active", - // i.e. if next is non-NULL. - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - INTPTR_FORMAT, discovered_addr); - } - ) - MarkSweep::mark_and_push(discovered_addr); - } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - oop next = oopDesc::load_decode_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - (oopDesc*)obj)); -#endif - } - // treat next as normal oop. next is a link in the reference queue. - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process next as normal " INTPTR_FORMAT, next_addr); - } - ) - MarkSweep::mark_and_push(next_addr); - ref->InstanceKlass::oop_follow_contents(obj); -} - -void InstanceRefKlass::oop_follow_contents(oop obj) { - if (UseCompressedOops) { - specialized_oop_follow_contents(this, obj); - } else { - specialized_oop_follow_contents(this, obj); - } -} - -#if INCLUDE_ALL_GCS -template -void specialized_oop_follow_contents(InstanceRefKlass* ref, - ParCompactionManager* cm, - oop obj) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); - T heap_oop = oopDesc::load_heap_oop(referent_addr); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("InstanceRefKlass::oop_follow_contents " INTPTR_FORMAT, (void *)obj); - } - ) - if (!oopDesc::is_null(heap_oop)) { - oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); - if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) && - PSParallelCompact::ref_processor()-> - discover_reference(obj, ref->reference_type())) { - // reference already enqueued, referent will be traversed later - ref->InstanceKlass::oop_follow_contents(cm, obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL enqueued " INTPTR_FORMAT, (void *)obj); - } - ) - return; - } else { - // treat referent as normal oop - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Non NULL normal " INTPTR_FORMAT, (void *)obj); - } - ) - PSParallelCompact::mark_and_push(cm, referent_addr); - } - } - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - // Treat discovered as normal oop, if ref is not "active", - // i.e. if next is non-NULL. - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - INTPTR_FORMAT, discovered_addr); - } - ) - PSParallelCompact::mark_and_push(cm, discovered_addr); - } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - T next = oopDesc::load_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - (oopDesc*)obj)); -#endif - } - PSParallelCompact::mark_and_push(cm, next_addr); - ref->InstanceKlass::oop_follow_contents(cm, obj); -} - -void InstanceRefKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - if (UseCompressedOops) { - specialized_oop_follow_contents(this, cm, obj); - } else { - specialized_oop_follow_contents(this, cm, obj); - } -} -#endif // INCLUDE_ALL_GCS - -#ifdef ASSERT -template void trace_reference_gc(const char *s, oop obj, - T* referent_addr, - T* next_addr, - T* discovered_addr) { - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr("%s obj " INTPTR_FORMAT, s, (address)obj); - gclog_or_tty->print_cr(" referent_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, referent_addr, - referent_addr ? - (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL); - gclog_or_tty->print_cr(" next_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, next_addr, - next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL); - gclog_or_tty->print_cr(" discovered_addr/* " INTPTR_FORMAT " / " - INTPTR_FORMAT, discovered_addr, - discovered_addr ? - (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL); - } -} -#endif - -template void specialized_oop_adjust_pointers(InstanceRefKlass *ref, oop obj) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); - MarkSweep::adjust_pointer(referent_addr); - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - MarkSweep::adjust_pointer(next_addr); - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - MarkSweep::adjust_pointer(discovered_addr); - debug_only(trace_reference_gc("InstanceRefKlass::oop_adjust_pointers", obj, - referent_addr, next_addr, discovered_addr);) -} - -int InstanceRefKlass::oop_adjust_pointers(oop obj) { - int size = size_helper(); - InstanceKlass::oop_adjust_pointers(obj); - - if (UseCompressedOops) { - specialized_oop_adjust_pointers(this, obj); - } else { - specialized_oop_adjust_pointers(this, obj); - } - return size; -} - -#define InstanceRefKlass_SPECIALIZED_OOP_ITERATE(T, nv_suffix, contains) \ - T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); \ - if (closure->apply_to_weak_ref_discovered_field()) { \ - closure->do_oop##nv_suffix(disc_addr); \ - } \ - \ - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); \ - T heap_oop = oopDesc::load_heap_oop(referent_addr); \ - ReferenceProcessor* rp = closure->_ref_processor; \ - if (!oopDesc::is_null(heap_oop)) { \ - oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); \ - if (!referent->is_gc_marked() && (rp != NULL) && \ - rp->discover_reference(obj, reference_type())) { \ - return size; \ - } else if (contains(referent_addr)) { \ - /* treat referent as normal oop */ \ - closure->do_oop##nv_suffix(referent_addr); \ - } \ - } \ - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); \ - if (ReferenceProcessor::pending_list_uses_discovered_field()) { \ - T next_oop = oopDesc::load_heap_oop(next_addr); \ - /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */\ - if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { \ - /* i.e. ref is not "active" */ \ - debug_only( \ - if(TraceReferenceGC && PrintGCDetails) { \ - gclog_or_tty->print_cr(" Process discovered as normal " \ - INTPTR_FORMAT, disc_addr); \ - } \ - ) \ - closure->do_oop##nv_suffix(disc_addr); \ - } \ - } else { \ - /* In the case of older JDKs which do not use the discovered field for */ \ - /* the pending list, an inactive ref (next != NULL) must always have a */ \ - /* NULL discovered field. */ \ - debug_only( \ - T next_oop = oopDesc::load_heap_oop(next_addr); \ - T disc_oop = oopDesc::load_heap_oop(disc_addr); \ - assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), \ - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" \ - "discovered field", (oopDesc*)obj)); \ - ) \ - } \ - /* treat next as normal oop */ \ - if (contains(next_addr)) { \ - closure->do_oop##nv_suffix(next_addr); \ - } \ - return size; \ - - -template bool contains(T *t) { return true; } - -// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for -// all closures. Macros calling macros above for each oop size. - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ - } \ -} - -#if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ - /* Get size before changing pointers */ \ - int size = InstanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure); \ - \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, contains); \ - } \ -} -#endif // INCLUDE_ALL_GCS - - -#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int InstanceRefKlass:: \ -oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - int size = InstanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr); \ - if (UseCompressedOops) { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr.contains); \ - } else { \ - InstanceRefKlass_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr.contains); \ - } \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN) -#if INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS -ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m) - -#if INCLUDE_ALL_GCS -template -void specialized_oop_push_contents(InstanceRefKlass *ref, - PSPromotionManager* pm, oop obj) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); - if (PSScavenge::should_scavenge(referent_addr)) { - ReferenceProcessor* rp = PSScavenge::reference_processor(); - if (rp->discover_reference(obj, ref->reference_type())) { - // reference already enqueued, referent and next will be traversed later - ref->InstanceKlass::oop_push_contents(pm, obj); - return; - } else { - // treat referent as normal oop - pm->claim_or_forward_depth(referent_addr); - } - } - // Treat discovered as normal oop, if ref is not "active", - // i.e. if next is non-NULL. - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - if (ReferenceProcessor::pending_list_uses_discovered_field()) { - T next_oop = oopDesc::load_heap_oop(next_addr); - if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active" - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - debug_only( - if(TraceReferenceGC && PrintGCDetails) { - gclog_or_tty->print_cr(" Process discovered as normal " - INTPTR_FORMAT, discovered_addr); - } - ) - if (PSScavenge::should_scavenge(discovered_addr)) { - pm->claim_or_forward_depth(discovered_addr); - } - } - } else { -#ifdef ASSERT - // In the case of older JDKs which do not use the discovered - // field for the pending list, an inactive ref (next != NULL) - // must always have a NULL discovered field. - oop next = oopDesc::load_decode_heap_oop(next_addr); - oop discovered = java_lang_ref_Reference::discovered(obj); - assert(oopDesc::is_null(next) || oopDesc::is_null(discovered), - err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", - (oopDesc*)obj)); -#endif - } - - // Treat next as normal oop; next is a link in the reference queue. - if (PSScavenge::should_scavenge(next_addr)) { - pm->claim_or_forward_depth(next_addr); - } - ref->InstanceKlass::oop_push_contents(pm, obj); -} - -void InstanceRefKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - if (UseCompressedOops) { - specialized_oop_push_contents(this, pm, obj); - } else { - specialized_oop_push_contents(this, pm, obj); - } -} - -template -void specialized_oop_update_pointers(InstanceRefKlass *ref, - ParCompactionManager* cm, oop obj) { - T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); - PSParallelCompact::adjust_pointer(referent_addr); - T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); - PSParallelCompact::adjust_pointer(next_addr); - T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); - PSParallelCompact::adjust_pointer(discovered_addr); - debug_only(trace_reference_gc("InstanceRefKlass::oop_update_ptrs", obj, - referent_addr, next_addr, discovered_addr);) -} - -int InstanceRefKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - InstanceKlass::oop_update_pointers(cm, obj); - if (UseCompressedOops) { - specialized_oop_update_pointers(this, cm, obj); - } else { - specialized_oop_update_pointers(this, cm, obj); - } - return size_helper(); -} -#endif // INCLUDE_ALL_GCS - void InstanceRefKlass::update_nonstatic_oop_maps(Klass* k) { // Clear the nonstatic oop-map entries corresponding to referent // and nextPending field. They are treated specially by the diff --git a/src/share/vm/oops/instanceRefKlass.hpp b/src/share/vm/oops/instanceRefKlass.hpp --- a/src/share/vm/oops/instanceRefKlass.hpp +++ b/src/share/vm/oops/instanceRefKlass.hpp @@ -64,30 +64,71 @@ return (InstanceRefKlass*) k; } - // Garbage collection - int oop_adjust_pointers(oop obj); - void oop_follow_contents(oop obj); + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS + // Oop fields (and metadata) iterators + // [nv = true] Use non-virtual calls to do_oop_nv. + // [nv = false] Use virtual calls to do_oop. + // + // The InstanceRefKlass iterators also support reference processing. + + + // Forward iteration +private: + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + // Reverse iteration +#if INCLUDE_ALL_GCS + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate_reverse(oop obj, OopClosureType* closure); +#endif // INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ - int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, MemRegion mr); + // Bounded range iteration + // Iterate over all oop fields and metadata. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Reference processing part of the iterators. + + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains); + + // Only perform reference processing if the referent object is within mr. + template + inline void oop_oop_iterate_ref_processing_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Reference processing + template + inline void oop_oop_iterate_ref_processing(oop obj, OopClosureType* closure); + + + public: + +#define InstanceRefKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS -#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); +#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) diff --git a/src/share/vm/oops/instanceRefKlass.inline.hpp b/src/share/vm/oops/instanceRefKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceRefKlass.inline.hpp @@ -0,0 +1,188 @@ +/* +/* + * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP + +#include "classfile/javaClasses.hpp" +#include "memory/referenceProcessor.hpp" +#include "oops/instanceRefKlass.hpp" +#include "oops/instanceKlass.inline.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/debug.hpp" +#include "utilities/globalDefinitions.hpp" +#include "utilities/macros.hpp" + +template +void InstanceRefKlass::oop_oop_iterate_ref_processing_specialized(oop obj, OopClosureType* closure, Contains& contains) { + T* disc_addr = (T*)java_lang_ref_Reference::discovered_addr(obj); + if (closure->apply_to_weak_ref_discovered_field()) { + Devirtualizer::do_oop(closure, disc_addr); + } + + T* referent_addr = (T*)java_lang_ref_Reference::referent_addr(obj); + T heap_oop = oopDesc::load_heap_oop(referent_addr); + ReferenceProcessor* rp = closure->_ref_processor; + if (!oopDesc::is_null(heap_oop)) { + oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!referent->is_gc_marked() && (rp != NULL) && + rp->discover_reference(obj, reference_type())) { + return; + } else if (contains(referent_addr)) { + // treat referent as normal oop + Devirtualizer::do_oop(closure, referent_addr); + } + } + T* next_addr = (T*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + T next_oop = oopDesc::load_heap_oop(next_addr); + // Treat discovered as normal oop, if ref is not "active" (next non-NULL) + if (!oopDesc::is_null(next_oop) && contains(disc_addr)) { + // i.e. ref is not "active" + debug_only( + if(TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal " + PTR_FORMAT, p2i(disc_addr)); + } + ) + Devirtualizer::do_oop(closure, disc_addr); + } + } else { + // In the case of older JDKs which do not use the discovered field for + // the pending list, an inactive ref (next != NULL) must always have a + // NULL discovered field. + debug_only( + T next_oop = oopDesc::load_heap_oop(next_addr); + T disc_oop = oopDesc::load_heap_oop(disc_addr); + assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL" + "discovered field", p2i(obj))); + ) + } + // treat next as normal oop + if (contains(next_addr)) { + Devirtualizer::do_oop(closure, next_addr); + } +} + +class AlwaysContains { + public: + template bool operator()(T* p) const { return true; } +}; +static AlwaysContains always_contains; + +template +void InstanceRefKlass::oop_oop_iterate_ref_processing(oop obj, OopClosureType* closure) { + if (UseCompressedOops) { + oop_oop_iterate_ref_processing_specialized(obj, closure, always_contains); + } else { + oop_oop_iterate_ref_processing_specialized(obj, closure, always_contains); + } +} + +class MrContains { + const MemRegion _mr; + public: + MrContains(MemRegion mr) : _mr(mr) {} + template bool operator()(T* p) const { return _mr.contains(p); } +}; + +template +void InstanceRefKlass::oop_oop_iterate_ref_processing_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + const MrContains contains(mr); + if (UseCompressedOops) { + oop_oop_iterate_ref_processing_specialized(obj, closure, contains); + } else { + oop_oop_iterate_ref_processing_specialized(obj, closure, contains); + } +} + +template +int InstanceRefKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + // Get size before changing pointers + int size = InstanceKlass::oop_oop_iterate(obj, closure); + + oop_oop_iterate_ref_processing(obj, closure); + + return size; +} + +#if INCLUDE_ALL_GCS +template +int InstanceRefKlass:: +oop_oop_iterate_reverse(oop obj, OopClosureType* closure) { + // Get size before changing pointers + int size = InstanceKlass::oop_oop_iterate_reverse(obj, closure); + + oop_oop_iterate_ref_processing(obj, closure); + + return size; +} +#endif // INCLUDE_ALL_GCS + + +template +int InstanceRefKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + // Get size before changing pointers + int size = InstanceKlass::oop_oop_iterate_bounded(obj, closure, mr); + + oop_oop_iterate_ref_processing_bounded(obj, closure, mr); + + return size; +} + +// Macro to define InstanceRefKlass::oop_oop_iterate for virtual/nonvirtual for +// all closures. Macros calling macros above for each oop size. + +#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceRefKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int InstanceRefKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_reverse(obj, closure); \ +} +#else +#define InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + + +#define InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int InstanceRefKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + +#define ALL_INSTANCE_REF_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + InstanceRefKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + InstanceRefKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + InstanceRefKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + + +#endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP diff --git a/src/share/vm/oops/klass.cpp b/src/share/vm/oops/klass.cpp --- a/src/share/vm/oops/klass.cpp +++ b/src/share/vm/oops/klass.cpp @@ -27,7 +27,6 @@ #include "classfile/dictionary.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/heapInspection.hpp" #include "memory/metadataFactory.hpp" @@ -43,9 +42,6 @@ #include "utilities/stack.inline.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1SATBCardTableModRefBS.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.hpp" #endif // INCLUDE_ALL_GCS void Klass::set_name(Symbol* n) { diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp +++ b/src/share/vm/oops/klass.hpp @@ -25,21 +25,14 @@ #ifndef SHARE_VM_OOPS_KLASS_HPP #define SHARE_VM_OOPS_KLASS_HPP -#include "memory/genOopClosures.hpp" #include "memory/iterator.hpp" #include "memory/memRegion.hpp" #include "memory/specialized_oop_closures.hpp" -#include "oops/klassPS.hpp" #include "oops/metadata.hpp" #include "oops/oop.hpp" #include "trace/traceMacros.hpp" #include "utilities/accessFlags.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.hpp" -#include "gc_implementation/g1/g1OopClosures.hpp" -#include "gc_implementation/parNew/parOopClosures.hpp" -#endif // INCLUDE_ALL_GCS // // A Klass provides: @@ -61,6 +54,7 @@ class ClassLoaderData; class klassVtable; class ParCompactionManager; +class PSPromotionManager; class KlassSizeStats; class fieldDescriptor; @@ -478,13 +472,6 @@ // and the package separators as '/'. virtual const char* signature_name() const; - // garbage collection support - virtual void oop_follow_contents(oop obj) = 0; - virtual int oop_adjust_pointers(oop obj) = 0; - - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS_PV - // type testing operations protected: virtual bool oop_is_instance_slow() const { return false; } @@ -581,60 +568,35 @@ clean_weak_klass_links(is_alive, false /* clean_alive_klasses */); } - // iterators - virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0; - virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate(obj, blk); - } + // GC specific object visitors + // + // Mark Sweep + virtual void oop_ms_follow_contents(oop obj) = 0; + virtual int oop_ms_adjust_pointers(oop obj) = 0; +#if INCLUDE_ALL_GCS + // Parallel Scavenge + virtual void oop_ps_push_contents( oop obj, PSPromotionManager* pm) = 0; + // Parallel Compact + virtual void oop_pc_follow_contents(oop obj, ParCompactionManager* cm) = 0; + virtual void oop_pc_update_pointers(oop obj) = 0; +#endif + + // Iterators specialized to particular subtypes + // of ExtendedOopClosure, to avoid closure virtual calls. +#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) = 0; \ + /* Iterates "closure" over all the oops in "obj" (of type "this") within "mr". */ \ + virtual int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) = 0; + + ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL) #if INCLUDE_ALL_GCS - // In case we don't have a specialized backward scanner use forward - // iteration. - virtual int oop_oop_iterate_backwards_v(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } -#endif // INCLUDE_ALL_GCS +#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) = 0; - // Iterates "blk" over all the oops in "obj" (of type "this") within "mr". - // (I don't see why the _m should be required, but without it the Solaris - // C++ gives warning messages about overridings of the "oop_oop_iterate" - // defined above "hiding" this virtual function. (DLD, 6/20/00)) */ - virtual int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) = 0; - virtual int oop_oop_iterate_v_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_m(obj, blk, mr); - } - - // Versions of the above iterators specialized to particular subtypes - // of OopClosure, to avoid closure virtual calls. -#define Klass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ - virtual int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk) { \ - /* Default implementation reverts to general version. */ \ - return oop_oop_iterate(obj, blk); \ - } \ - \ - /* Iterates "blk" over all the oops in "obj" (of type "this") within "mr". \ - (I don't see why the _m should be required, but without it the Solaris \ - C++ gives warning messages about overridings of the "oop_oop_iterate" \ - defined above "hiding" this virtual function. (DLD, 6/20/00)) */ \ - virtual int oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* blk, \ - MemRegion mr) { \ - return oop_oop_iterate_m(obj, blk, mr); \ - } - - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_DECL) - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_DECL) - -#if INCLUDE_ALL_GCS -#define Klass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ - virtual int oop_oop_iterate_backwards##nv_suffix(oop obj, \ - OopClosureType* blk) { \ - /* Default implementation reverts to general version. */ \ - return oop_oop_iterate_backwards_v(obj, blk); \ - } - - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) - SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_1(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(Klass_OOP_OOP_ITERATE_BACKWARDS_DECL) #endif // INCLUDE_ALL_GCS virtual void array_klasses_do(void f(Klass* k)) {} diff --git a/src/share/vm/oops/klassPS.hpp b/src/share/vm/oops/klassPS.hpp deleted file mode 100644 --- a/src/share/vm/oops/klassPS.hpp +++ /dev/null @@ -1,54 +0,0 @@ -/* - * Copyright (c) 2007, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_OOPS_KLASSPS_HPP -#define SHARE_VM_OOPS_KLASSPS_HPP - - // Expands to Parallel Scavenge and Parallel Old declarations - -#include "utilities/macros.hpp" - -#if INCLUDE_ALL_GCS -#define PARALLEL_GC_DECLS \ - virtual void oop_push_contents(PSPromotionManager* pm, oop obj); \ - /* Parallel Old GC support \ - \ - The 2-arg version of oop_update_pointers is for objects that are \ - known not to cross chunk boundaries. The 4-arg version is for \ - objects that do (or may) cross chunk boundaries; it updates only those \ - oops that are in the region [beg_addr, end_addr). */ \ - virtual void oop_follow_contents(ParCompactionManager* cm, oop obj); \ - virtual int oop_update_pointers(ParCompactionManager* cm, oop obj); - -// Pure virtual version for klass.hpp -#define PARALLEL_GC_DECLS_PV \ - virtual void oop_push_contents(PSPromotionManager* pm, oop obj) = 0; \ - virtual void oop_follow_contents(ParCompactionManager* cm, oop obj) = 0; \ - virtual int oop_update_pointers(ParCompactionManager* cm, oop obj) = 0; -#else // INCLUDE_ALL_GCS -#define PARALLEL_GC_DECLS -#define PARALLEL_GC_DECLS_PV -#endif // INCLUDE_ALL_GCS - -#endif // SHARE_VM_OOPS_KLASSPS_HPP diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp --- a/src/share/vm/oops/objArrayKlass.cpp +++ b/src/share/vm/oops/objArrayKlass.cpp @@ -26,9 +26,7 @@ #include "classfile/symbolTable.hpp" #include "classfile/systemDictionary.hpp" #include "classfile/vmSymbols.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" -#include "memory/genOopClosures.inline.hpp" #include "memory/iterator.inline.hpp" #include "memory/metadataFactory.hpp" #include "memory/resourceArea.hpp" @@ -45,17 +43,6 @@ #include "runtime/orderAccess.inline.hpp" #include "utilities/copy.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/concurrentMarkSweep/cmsOopClosures.inline.hpp" -#include "gc_implementation/g1/g1CollectedHeap.inline.hpp" -#include "gc_implementation/g1/g1OopClosures.inline.hpp" -#include "gc_implementation/g1/g1RemSet.inline.hpp" -#include "gc_implementation/g1/heapRegionManager.inline.hpp" -#include "gc_implementation/parNew/parOopClosures.inline.hpp" -#include "gc_implementation/parallelScavenge/psCompactionManager.hpp" -#include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.inline.hpp" -#endif // INCLUDE_ALL_GCS ObjArrayKlass* ObjArrayKlass::allocate(ClassLoaderData* loader_data, int n, KlassHandle klass_handle, Symbol* name, TRAPS) { assert(ObjArrayKlass::header_size() <= InstanceKlass::header_size(), @@ -410,179 +397,6 @@ bottom_klass()->initialize(THREAD); // dispatches to either InstanceKlass or TypeArrayKlass } -#define ObjArrayKlass_SPECIALIZED_OOP_ITERATE(T, a, p, do_oop) \ -{ \ - T* p = (T*)(a)->base(); \ - T* const end = p + (a)->length(); \ - while (p < end) { \ - do_oop; \ - p++; \ - } \ -} - -#define ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(T, a, p, low, high, do_oop) \ -{ \ - T* const l = (T*)(low); \ - T* const h = (T*)(high); \ - T* p = (T*)(a)->base(); \ - T* end = p + (a)->length(); \ - if (p < l) p = l; \ - if (end > h) end = h; \ - while (p < end) { \ - do_oop; \ - ++p; \ - } \ -} - -#define ObjArrayKlass_OOP_ITERATE(a, p, do_oop) \ - if (UseCompressedOops) { \ - ObjArrayKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \ - a, p, do_oop) \ - } else { \ - ObjArrayKlass_SPECIALIZED_OOP_ITERATE(oop, \ - a, p, do_oop) \ - } - -#define ObjArrayKlass_BOUNDED_OOP_ITERATE(a, p, low, high, do_oop) \ - if (UseCompressedOops) { \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - a, p, low, high, do_oop) \ - } else { \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - a, p, low, high, do_oop) \ - } - -void ObjArrayKlass::oop_follow_contents(oop obj) { - assert (obj->is_array(), "obj must be array"); - MarkSweep::follow_klass(obj->klass()); - if (UseCompressedOops) { - objarray_follow_contents(obj, 0); - } else { - objarray_follow_contents(obj, 0); - } -} - -#if INCLUDE_ALL_GCS -void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm, - oop obj) { - assert(obj->is_array(), "obj must be array"); - PSParallelCompact::follow_klass(cm, obj->klass()); - if (UseCompressedOops) { - objarray_follow_contents(cm, obj, 0); - } else { - objarray_follow_contents(cm, obj, 0); - } -} -#endif // INCLUDE_ALL_GCS - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ - OopClosureType* closure) { \ - assert (obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call. */ \ - int size = a->object_size(); \ - if_do_metadata_checked(closure, nv_suffix) { \ - closure->do_klass##nv_suffix(obj->klass()); \ - } \ - ObjArrayKlass_OOP_ITERATE(a, p, (closure)->do_oop##nv_suffix(p)) \ - return size; \ -} - -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, \ - OopClosureType* closure, \ - MemRegion mr) { \ - assert(obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call */ \ - int size = a->object_size(); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_BOUNDED_OOP_ITERATE( \ - a, p, mr.start(), mr.end(), (closure)->do_oop##nv_suffix(p)) \ - return size; \ -} - -// Like oop_oop_iterate but only iterates over a specified range and only used -// for objArrayOops. -#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ - \ -int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, \ - OopClosureType* closure, \ - int start, int end) { \ - assert(obj->is_array(), "obj must be array"); \ - objArrayOop a = objArrayOop(obj); \ - /* Get size before changing pointers. */ \ - /* Don't call size() or oop_size() since that is a virtual call */ \ - int size = a->object_size(); \ - if (UseCompressedOops) { \ - HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start);\ - /* this might be wierd if end needs to be aligned on HeapWord boundary */ \ - HeapWord* high = (HeapWord*)((narrowOop*)a->base() + end); \ - MemRegion mr(low, high); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop, \ - a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ - } else { \ - HeapWord* low = start == 0 ? (HeapWord*)a : (HeapWord*)a->obj_at_addr(start); \ - HeapWord* high = (HeapWord*)((oop*)a->base() + end); \ - MemRegion mr(low, high); \ - if_do_metadata_checked(closure, nv_suffix) { \ - /* SSS: Do we need to pass down mr here? */ \ - closure->do_klass##nv_suffix(a->klass()); \ - } \ - ObjArrayKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop, \ - a, p, low, high, (closure)->do_oop##nv_suffix(p)) \ - } \ - return size; \ -} - -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m) -ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) -ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r) - -int ObjArrayKlass::oop_adjust_pointers(oop obj) { - assert(obj->is_objArray(), "obj must be obj array"); - objArrayOop a = objArrayOop(obj); - // Get size before changing pointers. - // Don't call size() or oop_size() since that is a virtual call. - int size = a->object_size(); - ObjArrayKlass_OOP_ITERATE(a, p, MarkSweep::adjust_pointer(p)) - return size; -} - -#if INCLUDE_ALL_GCS -void ObjArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - assert(obj->is_objArray(), "obj must be obj array"); - ObjArrayKlass_OOP_ITERATE( \ - objArrayOop(obj), p, \ - if (PSScavenge::should_scavenge(p)) { \ - pm->claim_or_forward_depth(p); \ - }) -} - -int ObjArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - assert (obj->is_objArray(), "obj must be obj array"); - objArrayOop a = objArrayOop(obj); - int size = a->object_size(); - ObjArrayKlass_OOP_ITERATE(a, p, PSParallelCompact::adjust_pointer(p)) - return size; -} -#endif // INCLUDE_ALL_GCS - // JVM support jint ObjArrayKlass::compute_modifier_flags(TRAPS) const { diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp --- a/src/share/vm/oops/objArrayKlass.hpp +++ b/src/share/vm/oops/objArrayKlass.hpp @@ -26,7 +26,6 @@ #define SHARE_VM_OOPS_OBJARRAYKLASS_HPP #include "classfile/classLoaderData.hpp" -#include "memory/specialized_oop_closures.hpp" #include "oops/arrayKlass.hpp" #include "utilities/macros.hpp" @@ -103,28 +102,68 @@ // Initialization (virtual from Klass) void initialize(TRAPS); - // Garbage collection - void oop_follow_contents(oop obj); - inline void oop_follow_contents(oop obj, int index); - template inline void objarray_follow_contents(oop obj, int index); + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif - int oop_adjust_pointers(oop obj); + // Oop fields (and metadata) iterators + // [nv = true] Use non-virtual calls to do_oop_nv. + // [nv = false] Use virtual calls to do_oop. + // + // The ObjArrayKlass iterators also visits the Object's klass. - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS -#if INCLUDE_ALL_GCS - inline void oop_follow_contents(ParCompactionManager* cm, oop obj, int index); - template inline void - objarray_follow_contents(ParCompactionManager* cm, oop obj, int index); -#endif // INCLUDE_ALL_GCS + private: - // Iterators - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - return oop_oop_iterate_v(obj, blk); - } - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - return oop_oop_iterate_v_m(obj, blk, mr); - } + // Iterate over oop elements and metadata. + template + inline int oop_oop_iterate(oop obj, OopClosureType* closure); + + // Iterate over oop elements within mr, and metadata. + template + inline int oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr); + + // Iterate over oop elements with indices within [start, end), and metadata. + template + inline int oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end); + + // Iterate over oop elements within [start, end), and metadata. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end); + + public: + // Iterate over all oop elements. + template + inline void oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure); + + private: + // Iterate over all oop elements. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure); + + // Iterate over all oop elements with indices within mr. + template + inline void oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr); + + // Iterate over oop elements within [low, high).. + // Specialized for [T = oop] or [T = narrowOop]. + template + inline void oop_oop_iterate_elements_specialized_bounded(objArrayOop a, OopClosureType* closure, void* low, void* high); + + + + public: + #define ObjArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* blk); \ int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* blk, \ @@ -135,6 +174,14 @@ ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_DECL) ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_DECL) +#if INCLUDE_ALL_GCS +#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* blk); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // INCLUDE_ALL_GCS + // JVM support jint compute_modifier_flags(TRAPS) const; diff --git a/src/share/vm/oops/objArrayKlass.inline.hpp b/src/share/vm/oops/objArrayKlass.inline.hpp --- a/src/share/vm/oops/objArrayKlass.inline.hpp +++ b/src/share/vm/oops/objArrayKlass.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2010, 2013, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2010, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -25,78 +25,165 @@ #ifndef SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP #define SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP -#include "gc_implementation/shared/markSweep.inline.hpp" +#include "memory/memRegion.hpp" +#include "memory/iterator.inline.hpp" #include "oops/objArrayKlass.hpp" +#include "oops/objArrayOop.inline.hpp" +#include "oops/oop.inline.hpp" #include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/parallelScavenge/psCompactionManager.inline.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" -#endif // INCLUDE_ALL_GCS -void ObjArrayKlass::oop_follow_contents(oop obj, int index) { - if (UseCompressedOops) { - objarray_follow_contents(obj, index); - } else { - objarray_follow_contents(obj, index); +template +void ObjArrayKlass::oop_oop_iterate_elements_specialized(objArrayOop a, OopClosureType* closure) { + T* p = (T*)a->base(); + T* const end = p + a->length(); + + for (;p < end; p++) { + Devirtualizer::do_oop(closure, p); } } -template -void ObjArrayKlass::objarray_follow_contents(oop obj, int index) { - objArrayOop a = objArrayOop(obj); - const size_t len = size_t(a->length()); - const size_t beg_index = size_t(index); - assert(beg_index < len || len == 0, "index too large"); +template +void ObjArrayKlass::oop_oop_iterate_elements_specialized_bounded( + objArrayOop a, OopClosureType* closure, void* low, void* high) { - const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); - const size_t end_index = beg_index + stride; - T* const base = (T*)a->base(); - T* const beg = base + beg_index; - T* const end = base + end_index; + T* const l = (T*)low; + T* const h = (T*)high; - // Push the non-NULL elements of the next stride on the marking stack. - for (T* e = beg; e < end; e++) { - MarkSweep::mark_and_push(e); + T* p = (T*)a->base(); + T* end = p + a->length(); + + if (p < l) { + p = l; + } + if (end > h) { + end = h; } - if (end_index < len) { - MarkSweep::push_objarray(a, end_index); // Push the continuation. + for (;p < end; ++p) { + Devirtualizer::do_oop(closure, p); } } -#if INCLUDE_ALL_GCS -void ObjArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj, - int index) { +template +void ObjArrayKlass::oop_oop_iterate_elements(objArrayOop a, OopClosureType* closure) { if (UseCompressedOops) { - objarray_follow_contents(cm, obj, index); + oop_oop_iterate_elements_specialized(a, closure); } else { - objarray_follow_contents(cm, obj, index); + oop_oop_iterate_elements_specialized(a, closure); } } -template -void ObjArrayKlass::objarray_follow_contents(ParCompactionManager* cm, oop obj, - int index) { +template +void ObjArrayKlass::oop_oop_iterate_elements_bounded(objArrayOop a, OopClosureType* closure, MemRegion mr) { + if (UseCompressedOops) { + oop_oop_iterate_elements_specialized_bounded(a, closure, mr.start(), mr.end()); + } else { + oop_oop_iterate_elements_specialized_bounded(a, closure, mr.start(), mr.end()); + } +} + +template +int ObjArrayKlass::oop_oop_iterate(oop obj, OopClosureType* closure) { + assert (obj->is_array(), "obj must be array"); objArrayOop a = objArrayOop(obj); - const size_t len = size_t(a->length()); - const size_t beg_index = size_t(index); - assert(beg_index < len || len == 0, "index too large"); - const size_t stride = MIN2(len - beg_index, ObjArrayMarkingStride); - const size_t end_index = beg_index + stride; - T* const base = (T*)a->base(); - T* const beg = base + beg_index; - T* const end = base + end_index; - - // Push the non-NULL elements of the next stride on the marking stack. - for (T* e = beg; e < end; e++) { - PSParallelCompact::mark_and_push(cm, e); + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call. + int size = a->object_size(); + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, obj->klass()); } - if (end_index < len) { - cm->push_objarray(a, end_index); // Push the continuation. + oop_oop_iterate_elements(a, closure); + + return size; +} + +template +int ObjArrayKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) { + assert(obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call + int size = a->object_size(); + + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, a->klass()); } + + oop_oop_iterate_elements_bounded(a, closure, mr); + + return size; } -#endif // INCLUDE_ALL_GCS + +template +void ObjArrayKlass::oop_oop_iterate_range_specialized(objArrayOop a, OopClosureType* closure, int start, int end) { + if (Devirtualizer::do_metadata(closure)) { + Devirtualizer::do_klass(closure, a->klass()); + } + + T* low = start == 0 ? cast_from_oop(a) : a->obj_at_addr(start); + T* high = (T*)a->base() + end; + + oop_oop_iterate_elements_specialized_bounded(a, closure, low, high); +} + +// Like oop_oop_iterate but only iterates over a specified range and only used +// for objArrayOops. +template +int ObjArrayKlass::oop_oop_iterate_range(oop obj, OopClosureType* closure, int start, int end) { + assert(obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + + // Get size before changing pointers. + // Don't call size() or oop_size() since that is a virtual call + int size = a->object_size(); + + if (UseCompressedOops) { + oop_oop_iterate_range_specialized(a, closure, start, end); + } else { + oop_oop_iterate_range_specialized(a, closure, start, end); + } + + return size; +} + + +#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ +int ObjArrayKlass::oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + /* No reverse implementation ATM. */ \ + return oop_oop_iterate(obj, closure); \ +} +#else +#define ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + +#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int ObjArrayKlass::oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_bounded(obj, closure, mr); \ +} + +#define ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r(OopClosureType, nv_suffix) \ + \ +int ObjArrayKlass::oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, int start, int end) { \ + return oop_oop_iterate_range(obj, closure, start, end); \ +} + + +#define ALL_OBJ_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + ObjArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + ObjArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + ObjArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + ObjArrayKlass_OOP_OOP_ITERATE_DEFN_r( OopClosureType, nv_suffix) + #endif // SHARE_VM_OOPS_OBJARRAYKLASS_INLINE_HPP diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp +++ b/src/share/vm/oops/oop.hpp @@ -298,19 +298,6 @@ // garbage collection bool is_gc_marked() const; - // Apply "MarkSweep::mark_and_push" to (the address of) every non-NULL - // reference field in "this". - void follow_contents(void); - -#if INCLUDE_ALL_GCS - // Parallel Scavenge - void push_contents(PSPromotionManager* pm); - - // Parallel Old - void update_contents(ParCompactionManager* cm); - - void follow_contents(ParCompactionManager* cm); -#endif // INCLUDE_ALL_GCS bool is_scavengable() const; @@ -334,9 +321,6 @@ uint age() const; void incr_age(); - // Adjust all pointers in this object to point at it's forwarded location and - // return the size of this oop. This is used by the MarkSweep collector. - int adjust_pointers(); // mark-sweep support void follow_body(int begin, int end); @@ -345,6 +329,22 @@ static BarrierSet* bs() { return _bs; } static void set_bs(BarrierSet* bs) { _bs = bs; } + // Garbage Collection support + + // Mark Sweep + void ms_follow_contents(); + // Adjust all pointers in this object to point at it's forwarded location and + // return the size of this oop. This is used by the MarkSweep collector. + int ms_adjust_pointers(); +#if INCLUDE_ALL_GCS + // Parallel Compact + void pc_follow_contents(ParCompactionManager* pc); + void pc_update_contents(); + // Parallel Scavenge + void ps_push_contents(PSPromotionManager* pm); +#endif + + // iterators, returns size of object #define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_iterate(OopClosureType* blk); \ diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp +++ b/src/share/vm/oops/oop.inline.hpp @@ -26,13 +26,11 @@ #define SHARE_VM_OOPS_OOP_INLINE_HPP #include "gc_implementation/shared/ageTable.hpp" -#include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" #include "memory/barrierSet.inline.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/generation.hpp" -#include "memory/specialized_oop_closures.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" #include "oops/klass.inline.hpp" @@ -592,11 +590,6 @@ } #endif // PRODUCT -inline void oopDesc::follow_contents(void) { - assert (is_gc_marked(), "should be marked"); - klass()->oop_follow_contents(this); -} - inline bool oopDesc::is_scavengable() const { return Universe::heap()->is_scavengable(this); } @@ -706,21 +699,49 @@ } } -inline int oopDesc::adjust_pointers() { +inline void oopDesc::ms_follow_contents() { + klass()->oop_ms_follow_contents(this); +} + +inline int oopDesc::ms_adjust_pointers() { debug_only(int check_size = size()); - int s = klass()->oop_adjust_pointers(this); + int s = klass()->oop_ms_adjust_pointers(this); assert(s == check_size, "should be the same"); return s; } -#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk) { \ - return klass()->oop_oop_iterate##nv_suffix(this, blk); \ -} \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ - return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ +#if INCLUDE_ALL_GCS +inline void oopDesc::pc_follow_contents(ParCompactionManager* cm) { + klass()->oop_pc_follow_contents(this, cm); +} + +inline void oopDesc::pc_update_contents() { + Klass* k = klass(); + if (!k->oop_is_typeArray()) { + // It might contain oops beyond the header, so take the virtual call. + k->oop_pc_update_pointers(this); + } + // Else skip it. The TypeArrayKlass in the header never needs scavenging. +} + +inline void oopDesc::ps_push_contents(PSPromotionManager* pm) { + Klass* k = klass(); + if (!k->oop_is_typeArray()) { + // It might contain oops beyond the header, so take the virtual call. + k->oop_ps_push_contents(this, pm); + } + // Else skip it. The TypeArrayKlass in the header never needs scavenging. +} +#endif + +#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +inline int oopDesc::oop_iterate(OopClosureType* blk) { \ + return klass()->oop_oop_iterate##nv_suffix(this, blk); \ +} \ + \ +inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ + return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ } @@ -736,18 +757,21 @@ return oop_iterate(&cl, mr); } -ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) +#if INCLUDE_ALL_GCS +#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ + return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ +} +#else +#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif -#if INCLUDE_ALL_GCS -#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ - return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ -} +#define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \ + OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) -ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) -ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DEFN) -#endif // INCLUDE_ALL_GCS +ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE) +ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE) #endif // SHARE_VM_OOPS_OOP_INLINE_HPP diff --git a/src/share/vm/oops/oop.pcgc.inline.hpp b/src/share/vm/oops/oop.pcgc.inline.hpp deleted file mode 100644 --- a/src/share/vm/oops/oop.pcgc.inline.hpp +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright (c) 2005, 2015, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP -#define SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP - -#include "runtime/atomic.inline.hpp" -#include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/parNew/parNewGeneration.hpp" -#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" -#include "gc_implementation/parallelScavenge/psCompactionManager.hpp" -#include "gc_implementation/parallelScavenge/psParallelCompact.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.hpp" -#endif // INCLUDE_ALL_GCS - -inline void oopDesc::update_contents(ParCompactionManager* cm) { - // The klass field must be updated before anything else - // can be done. - DEBUG_ONLY(Klass* original_klass = klass()); - - Klass* new_klass = klass(); - if (!new_klass->oop_is_typeArray()) { - // It might contain oops beyond the header, so take the virtual call. - new_klass->oop_update_pointers(cm, this); - } - // Else skip it. The TypeArrayKlass in the header never needs scavenging. -} - -inline void oopDesc::follow_contents(ParCompactionManager* cm) { - assert (PSParallelCompact::mark_bitmap()->is_marked(this), - "should be marked"); - klass()->oop_follow_contents(cm, this); -} - -#endif // SHARE_VM_OOPS_OOP_PCGC_INLINE_HPP diff --git a/src/share/vm/oops/oop.psgc.inline.hpp b/src/share/vm/oops/oop.psgc.inline.hpp deleted file mode 100644 --- a/src/share/vm/oops/oop.psgc.inline.hpp +++ /dev/null @@ -1,45 +0,0 @@ -/* - * Copyright (c) 2002, 2013, Oracle and/or its affiliates. All rights reserved. - * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. - * - * This code is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License version 2 only, as - * published by the Free Software Foundation. - * - * This code is distributed in the hope that it will be useful, but WITHOUT - * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or - * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License - * version 2 for more details (a copy is included in the LICENSE file that - * accompanied this code). - * - * You should have received a copy of the GNU General Public License version - * 2 along with this work; if not, write to the Free Software Foundation, - * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. - * - * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA - * or visit www.oracle.com if you need additional information or have any - * questions. - * - */ - -#ifndef SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP -#define SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP - -#include "utilities/macros.hpp" -#if INCLUDE_ALL_GCS -#include "gc_implementation/parallelScavenge/parallelScavengeHeap.hpp" -#include "gc_implementation/parallelScavenge/psScavenge.hpp" -#endif // INCLUDE_ALL_GCS - -// ParallelScavengeHeap methods - -inline void oopDesc::push_contents(PSPromotionManager* pm) { - Klass* k = klass(); - if (!k->oop_is_typeArray()) { - // It might contain oops beyond the header, so take the virtual call. - k->oop_push_contents(pm, this); - } - // Else skip it. The TypeArrayKlass in the header never needs scavenging. -} - -#endif // SHARE_VM_OOPS_OOP_PSGC_INLINE_HPP diff --git a/src/share/vm/oops/typeArrayKlass.cpp b/src/share/vm/oops/typeArrayKlass.cpp --- a/src/share/vm/oops/typeArrayKlass.cpp +++ b/src/share/vm/oops/typeArrayKlass.cpp @@ -36,7 +36,7 @@ #include "oops/klass.inline.hpp" #include "oops/objArrayKlass.hpp" #include "oops/oop.inline.hpp" -#include "oops/typeArrayKlass.hpp" +#include "oops/typeArrayKlass.inline.hpp" #include "oops/typeArrayOop.hpp" #include "runtime/handles.inline.hpp" #include "runtime/orderAccess.inline.hpp" @@ -204,57 +204,6 @@ return t->object_size(); } -void TypeArrayKlass::oop_follow_contents(oop obj) { - assert(obj->is_typeArray(),"must be a type array"); - // Performance tweak: We skip iterating over the klass pointer since we - // know that Universe::TypeArrayKlass never moves. -} - -#if INCLUDE_ALL_GCS -void TypeArrayKlass::oop_follow_contents(ParCompactionManager* cm, oop obj) { - assert(obj->is_typeArray(),"must be a type array"); - // Performance tweak: We skip iterating over the klass pointer since we - // know that Universe::TypeArrayKlass never moves. -} -#endif // INCLUDE_ALL_GCS - -int TypeArrayKlass::oop_adjust_pointers(oop obj) { - assert(obj->is_typeArray(),"must be a type array"); - typeArrayOop t = typeArrayOop(obj); - // Performance tweak: We skip iterating over the klass pointer since we - // know that Universe::TypeArrayKlass never moves. - return t->object_size(); -} - -int TypeArrayKlass::oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { - assert(obj->is_typeArray(),"must be a type array"); - typeArrayOop t = typeArrayOop(obj); - // Performance tweak: We skip iterating over the klass pointer since we - // know that Universe::TypeArrayKlass never moves. - return t->object_size(); -} - -int TypeArrayKlass::oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr) { - assert(obj->is_typeArray(),"must be a type array"); - typeArrayOop t = typeArrayOop(obj); - // Performance tweak: We skip iterating over the klass pointer since we - // know that Universe::TypeArrayKlass never moves. - return t->object_size(); -} - -#if INCLUDE_ALL_GCS -void TypeArrayKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { - ShouldNotReachHere(); - assert(obj->is_typeArray(),"must be a type array"); -} - -int -TypeArrayKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { - assert(obj->is_typeArray(),"must be a type array"); - return typeArrayOop(obj)->object_size(); -} -#endif // INCLUDE_ALL_GCS - void TypeArrayKlass::initialize(TRAPS) { // Nothing to do. Having this function is handy since objArrayKlasses can be // initialized by calling initialize on their bottom_klass, see ObjArrayKlass::initialize diff --git a/src/share/vm/oops/typeArrayKlass.hpp b/src/share/vm/oops/typeArrayKlass.hpp --- a/src/share/vm/oops/typeArrayKlass.hpp +++ b/src/share/vm/oops/typeArrayKlass.hpp @@ -72,17 +72,47 @@ // Copying void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); - // Iteration - int oop_oop_iterate(oop obj, ExtendedOopClosure* blk); - int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr); + // GC specific object visitors + // + // Mark Sweep + void oop_ms_follow_contents(oop obj); + int oop_ms_adjust_pointers(oop obj); +#if INCLUDE_ALL_GCS + // Parallel Scavenge + void oop_ps_push_contents( oop obj, PSPromotionManager* pm); + // Parallel Compact + void oop_pc_follow_contents(oop obj, ParCompactionManager* cm); + void oop_pc_update_pointers(oop obj); +#endif - // Garbage collection - void oop_follow_contents(oop obj); - int oop_adjust_pointers(oop obj); + // Oop iterators. Since there are no oops in TypeArrayKlasses, + // these functions only return the size of the object. - // Parallel Scavenge and Parallel Old - PARALLEL_GC_DECLS + private: + // The implementation used by all oop_oop_iterate functions in TypeArrayKlasses. + inline int oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure); + public: + +#define TypeArrayKlass_OOP_OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure); \ + int oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, \ + MemRegion mr); \ + int oop_oop_iterate_range##nv_suffix(oop obj, OopClosureType* closure, \ + int start, int end); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_DECL) + +#if INCLUDE_ALL_GCS +#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL(OopClosureType, nv_suffix) \ + int oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure); + + ALL_OOP_OOP_ITERATE_CLOSURES_1(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) + ALL_OOP_OOP_ITERATE_CLOSURES_2(TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DECL) +#endif // INCLUDE_ALL_GCS + + protected: // Find n'th dimensional array virtual Klass* array_klass_impl(bool or_null, int n, TRAPS); diff --git a/src/share/vm/oops/typeArrayKlass.inline.hpp b/src/share/vm/oops/typeArrayKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/typeArrayKlass.inline.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP +#define SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP + +#include "oops/oop.inline.hpp" +#include "oops/typeArrayKlass.hpp" +#include "oops/typeArrayOop.hpp" + +class ExtendedOopClosure; + +inline int TypeArrayKlass::oop_oop_iterate_impl(oop obj, ExtendedOopClosure* closure) { + assert(obj->is_typeArray(),"must be a type array"); + typeArrayOop t = typeArrayOop(obj); + // Performance tweak: We skip iterating over the klass pointer since we + // know that Universe::TypeArrayKlass never moves. + return t->object_size(); +} + +#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +int TypeArrayKlass:: \ +oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_impl(obj, closure); \ +} + +#if INCLUDE_ALL_GCS +#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +int TypeArrayKlass:: \ +oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) { \ + return oop_oop_iterate_impl(obj, closure); \ +} +#else +#define TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) +#endif + + +#define TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix) \ + \ +int TypeArrayKlass:: \ +oop_oop_iterate##nv_suffix##_m(oop obj, OopClosureType* closure, MemRegion mr) { \ + return oop_oop_iterate_impl(obj, closure); \ +} + +#define ALL_TYPE_ARRAY_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + TypeArrayKlass_OOP_OOP_ITERATE_DEFN( OopClosureType, nv_suffix) \ + TypeArrayKlass_OOP_OOP_ITERATE_DEFN_m( OopClosureType, nv_suffix) \ + TypeArrayKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) + +#endif // SHARE_VM_OOPS_TYPEARRAYKLASS_INLINE_HPP diff --git a/src/share/vm/precompiled/precompiled.hpp b/src/share/vm/precompiled/precompiled.hpp --- a/src/share/vm/precompiled/precompiled.hpp +++ b/src/share/vm/precompiled/precompiled.hpp @@ -122,7 +122,6 @@ # include "memory/defNewGeneration.hpp" # include "memory/gcLocker.hpp" # include "memory/genCollectedHeap.hpp" -# include "memory/genOopClosures.hpp" # include "memory/genRemSet.hpp" # include "memory/generation.hpp" # include "memory/heap.hpp" @@ -147,7 +146,6 @@ # include "oops/instanceOop.hpp" # include "oops/instanceRefKlass.hpp" # include "oops/klass.hpp" -# include "oops/klassPS.hpp" # include "oops/klassVtable.hpp" # include "oops/markOop.hpp" # include "oops/markOop.inline.hpp" @@ -319,7 +317,6 @@ # include "gc_implementation/parallelScavenge/psGCAdaptivePolicyCounters.hpp" # include "gc_implementation/parallelScavenge/psGenerationCounters.hpp" # include "gc_implementation/parallelScavenge/psOldGen.hpp" -# include "gc_implementation/parallelScavenge/psParallelCompact.hpp" # include "gc_implementation/parallelScavenge/psVirtualspace.hpp" # include "gc_implementation/parallelScavenge/psYoungGen.hpp" # include "gc_implementation/shared/gcAdaptivePolicyCounters.hpp"