# HG changeset patch # User eosterlund # Date 1412762400 -7200 # Wed Oct 08 12:00:00 2014 +0200 # Node ID 675c881bd17c7a4009f08af223224388dcb344f3 # Parent d8b17079b783c32428e05ac7ba7d019a9c07ae45 autospecialized oop_iterate using SFINAE and templates diff --git a/src/share/vm/classfile/classFileParser.hpp b/src/share/vm/classfile/classFileParser.hpp --- a/src/share/vm/classfile/classFileParser.hpp +++ b/src/share/vm/classfile/classFileParser.hpp @@ -27,9 +27,9 @@ #include "classfile/classFileStream.hpp" #include "memory/resourceArea.hpp" -#include "oops/oop.inline.hpp" +#include "oops/oop.hpp" #include "oops/typeArrayOop.hpp" -#include "runtime/handles.inline.hpp" +#include "runtime/handles.hpp" #include "utilities/accessFlags.hpp" #include "classfile/symbolTable.hpp" diff --git a/src/share/vm/memory/iterator.hpp b/src/share/vm/memory/iterator.hpp --- a/src/share/vm/memory/iterator.hpp +++ b/src/share/vm/memory/iterator.hpp @@ -47,6 +47,11 @@ virtual void do_oop_v(oop* o) { do_oop(o); } virtual void do_oop(narrowOop* o) = 0; virtual void do_oop_v(narrowOop* o) { do_oop(o); } + + // Use SFINAE to dispatch to the "most appropriate" do_oop using OopClosureDispatcher. + // Read the specialized_oop_closures.hpp file how this works + template + void do_oop_auto(OopType* o); }; // ExtendedOopClosure adds extra code to be run during oop iterations. @@ -76,12 +81,24 @@ virtual bool do_metadata() { return do_metadata_nv(); } bool do_metadata_v() { return do_metadata(); } bool do_metadata_nv() { return false; } + // Use SFINAE to dispatch to the "most appropriate" do_metadata using OopClosureDispatcher. + // Read the specialized_oop_closures.hpp file how this works + template + bool do_metadata_auto(); virtual void do_klass(Klass* k) { do_klass_nv(k); } void do_klass_v(Klass* k) { do_klass(k); } void do_klass_nv(Klass* k) { ShouldNotReachHere(); } + // Use SFINAE to dispatch to the "most appropriate" do_klass using OopClosureDispatcher. + // Read the specialized_oop_closures.hpp file how this works + template + void do_klass_auto(Klass* klass); virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); } + // Use SFINAE to dispatch to the "most appropriate" do_class_loader_data using OopClosureDispatcher. + // Read the specialized_oop_closures.hpp file how this works + template + void do_class_loader_data_auto(ClassLoaderData* cld); // True iff this closure may be safely applied more than once to an oop // location without an intervening "major reset" (like the end of a GC). @@ -89,6 +106,18 @@ virtual bool apply_to_weak_ref_discovered_field() { return false; } }; +// Autospecialization uses an OopClosure rather than ExtendedOopClosure +// for oop_iterate_no_header to make sure metadata methods are not called +// in the first place using SFINAE type checks +template +class NoHeaderOopClosure : public OopClosure { + OopClosureType* _cl; + public: + NoHeaderOopClosure(OopClosureType *cl) : _cl(cl) {} + void do_oop(oop *p) { _cl->template do_oop_auto(p); } + void do_oop(narrowOop *p) { _cl->template do_oop_auto(p); } +}; + // Wrapper closure only used to implement oop_iterate_no_header(). class NoHeaderExtendedOopClosure : public ExtendedOopClosure { OopClosure* _wrapped_closure; diff --git a/src/share/vm/memory/iterator.inline.hpp b/src/share/vm/memory/iterator.inline.hpp --- a/src/share/vm/memory/iterator.inline.hpp +++ b/src/share/vm/memory/iterator.inline.hpp @@ -29,6 +29,27 @@ #include "memory/iterator.hpp" #include "oops/klass.hpp" #include "utilities/debug.hpp" +#include "specialized_oop_closures.inline.hpp" + +template +inline void OopClosure::do_oop_auto(OopType* o) { + OopClosureDispatcher::template do_oop(static_cast(this), o); +} + +template +inline bool ExtendedOopClosure::do_metadata_auto() { + return OopClosureDispatcher::template do_metadata(static_cast(this)); +} + +template +inline void ExtendedOopClosure::do_klass_auto(Klass* klass) { + OopClosureDispatcher::template do_klass(static_cast(this), klass); +} + +template +inline void ExtendedOopClosure::do_class_loader_data_auto(ClassLoaderData* cld) { + OopClosureType::template do_class_loader_data(static_cast(this), cld); +} inline void MetadataAwareOopClosure::do_class_loader_data(ClassLoaderData* cld) { assert(_klass_closure._oop_closure == this, "Must be"); diff --git a/src/share/vm/memory/specialized_oop_closures.hpp b/src/share/vm/memory/specialized_oop_closures.hpp --- a/src/share/vm/memory/specialized_oop_closures.hpp +++ b/src/share/vm/memory/specialized_oop_closures.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_HPP #include "utilities/macros.hpp" +#include "utilities/templateIdioms.hpp" #if INCLUDE_ALL_GCS #include "gc_implementation/g1/g1_specialized_oop_closures.hpp" #endif // INCLUDE_ALL_GCS @@ -69,6 +70,25 @@ // This is split into several because of a Visual C++ 6.0 compiler bug // where very long macros cause the compiler to crash +// These two macros are for explicitly declaring base classes for closures that +// have no do_oop implementation and hence need virtual calls. +// It should never be necessary to add more closure types except the base +// classes here as auto specialization mechanism automatically +// checks that the declared closure type also has its own declaration +// of the specialized calls. +#define UNSPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ + f(ExtendedOopClosure) \ + f(OopClosure) + +#define UNSPECIALIZED_DO_METADATA_CLOSURES(f) \ + f(ExtendedOopClosure) + +#define FORWARD_DECLARE_CLOSURE(OopClosureType) \ +class OopClosureType; + +UNSPECIALIZED_OOP_OOP_ITERATE_CLOSURES(FORWARD_DECLARE_CLOSURE) +UNSPECIALIZED_DO_METADATA_CLOSURES(FORWARD_DECLARE_CLOSURE) + // Some other heap might define further specialized closures. #ifndef FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES #define FURTHER_SPECIALIZED_OOP_OOP_ITERATE_CLOSURES(f) \ @@ -194,6 +214,120 @@ // For keeping stats on effectiveness. #define ENABLE_SPECIALIZATION_STATS 0 +enum DispatchTag { + _unknown_klass = 1, + _instance_mirror_klass, + _instance_class_loader_klass, + _instance_ref_klass, +}; + +/** + * The OopClosureDispatcher is a proxy class that automatically figures out + * which OopClosure member function to call. It first checks for overridden + * specializations using macros (only needed for OopClosure and + * ExtendedOopClosure. These will result in virtual calls. + * Otherwise if it's a subclass of these two, it will first try to call + * the corresponding _nv member function for backward compatibility. + * Otherwise, it will check for a a normal non nv declaration, in the derived + * class (not the super class). If it exists, it will be called, otherwise + * it resorts to a normal virtual call. + */ +class OopClosureDispatcher : AllStatic { + template + static typename enable_if::value, void>::type + do_oop_internal_try_nv(OopClosureType *cl, OopType *obj); + + template + static typename enable_if::value, void>::type + do_oop_internal_try_nv(OopClosureType *cl, OopType *obj); + + template + static typename enable_if::value, void>::type + do_oop_internal_try_v(OopClosureType *cl, OopType *obj); + + template + static typename enable_if::value, void>::type + do_oop_internal_try_v(OopClosureType *cl, OopType *obj); + + template + static typename enable_if::value, bool>::type + do_metadata_internal_try_nv(OopClosureType *cl); + + template + static typename enable_if::value, bool>::type + do_metadata_internal_try_nv(OopClosureType *cl); + + template + static typename enable_if::value, bool>::type + do_metadata_internal_try_v(OopClosureType *cl); + + template + static typename enable_if::value, bool>::type + do_metadata_internal_try_v(OopClosureType *cl); + + template + static typename enable_if::value, void>::type + do_klass_internal_try_nv(OopClosureType *cl, Klass *klass); + + template + static typename enable_if::value, void>::type + do_klass_internal_try_nv(OopClosureType *cl, Klass *klass); + + template + static typename enable_if::value, void>::type + do_klass_internal_try_v(OopClosureType *cl, Klass *klass); + + template + static typename enable_if::value, void>::type + do_klass_internal_try_v(OopClosureType *cl, Klass *klass); + + template + static typename enable_if::value, void>::type + do_class_loader_data_internal_try_nv(OopClosureType *cl, ClassLoaderData *cld); + + template + static typename enable_if::value, void>::type + do_class_loader_data_internal_try_nv(OopClosureType *cl, ClassLoaderData *cld); + + template + static typename enable_if::value, void>::type + do_class_loader_data_internal_try_v(OopClosureType *cl, ClassLoaderData *cld); + + template + static typename enable_if::value, void>::type + do_class_loader_data_internal_try_v(OopClosureType *cl, ClassLoaderData *cld); + + template + static void do_oop_internal(OopClosureType *cl, OopType *obj); + + template + static bool do_metadata_internal(OopClosureType *cl); + + template + static void do_klass_internal(OopClosureType *cl, Klass *klass); + + template + static void do_class_loader_data_internal(OopClosureType *cl, ClassLoaderData *cld); + +public: + // Make sure we only dispatch to OopClosure subtypes, otherwise compiler error + template + static typename enable_if::value, void>::type + do_oop(OopClosureType *cl, OopType *obj); + + // Only do metadata stuff on ExtendedOopClosure, otherwise compiler error + template + static typename enable_if::value, bool>::type + do_metadata(OopClosureType *cl); + + template + static typename enable_if::value, void>::type + do_klass(OopClosureType *cl, Klass *klass); + + template + static typename enable_if::value, void>::type + do_class_loader_data(OopClosureType *cl, ClassLoaderData* cld); +}; class SpecializationStats { public: diff --git a/src/share/vm/memory/specialized_oop_closures.inline.hpp b/src/share/vm/memory/specialized_oop_closures.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/memory/specialized_oop_closures.inline.hpp @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2011, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_INLINE_HPP +#define SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_INLINE_HPP + +#include "utilities/templateIdioms.hpp" +#include "memory/specialized_oop_closures.hpp" + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_oop_internal_try_v(OopClosureType *cl, OopType *obj) { + cl->do_oop(obj); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_oop_internal_try_v(OopClosureType *cl, OopType *obj) { + cl->OopClosureType::do_oop(obj); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_oop_internal_try_nv(OopClosureType *cl, OopType *obj) { + do_oop_internal_try_v(cl, obj); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_oop_internal_try_nv(OopClosureType *cl, OopType *obj) { + // Backward-compatibility - if do_oop_nv is declared, use it + cl->do_oop_nv(obj); +} + +template +inline void OopClosureDispatcher::do_oop_internal(OopClosureType *cl, OopType *obj) { + do_oop_internal_try_nv(cl, obj); +} + +#define OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_OOP_DEF(OopClosureType, OopType) \ +template <> \ +inline void \ +OopClosureDispatcher::do_oop_internal(OopClosureType *cl, OopType *obj) { \ + reinterpret_cast(cl)->do_oop(obj); \ +} + +#define OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_OOP_ALL(OopClosureType) \ +OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_OOP_DEF(OopClosureType, narrowOop) \ +OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_OOP_DEF(OopClosureType, oop) + +UNSPECIALIZED_OOP_OOP_ITERATE_CLOSURES(OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_OOP_ALL) + +template +inline typename enable_if::value, bool>::type +OopClosureDispatcher::do_metadata_internal_try_v(OopClosureType *cl) { + return cl->OopClosureType::do_metadata(); +} + +template +inline typename enable_if::value, bool>::type +OopClosureDispatcher::do_metadata_internal_try_v(OopClosureType *cl) { + return cl->do_metadata(); +} + +// Use _nv call if declared for backward compatibility +template +inline typename enable_if::value, bool>::type +OopClosureDispatcher::do_metadata_internal_try_nv(OopClosureType *cl) { + return cl->OopClosureType::do_metadata_nv(); +} + +// Non-virtualize virtual call if _nv is not declared +template +inline typename enable_if::value, bool>::type +OopClosureDispatcher::do_metadata_internal_try_nv(OopClosureType *cl) { + return do_metadata_internal_try_v(cl); +} + +template +inline bool OopClosureDispatcher::do_metadata_internal(OopClosureType *cl) { + return do_metadata_internal_try_nv(cl); +} + +#define OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_METADATA_DEF(OopClosureType) \ +template <> \ +inline bool \ +OopClosureDispatcher::do_metadata_internal(OopClosureType *cl) { \ + return reinterpret_cast(cl)->do_metadata(); \ +} + +UNSPECIALIZED_DO_METADATA_CLOSURES(OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_METADATA_DEF) + +// call _nv for backward compatibility - it is expected to be called if it exists +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_klass_internal_try_v(OopClosureType *cl, Klass *klass) { + cl->OopClosureType::do_klass(klass); +} + +// non-virtualize the virtual call if _nv is not expected +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_klass_internal_try_v(OopClosureType *cl, Klass *klass) { + cl->do_klass(klass); +} + +// call _nv for backward compatibility - it is expected to be called if it exists +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_klass_internal_try_nv(OopClosureType *cl, Klass *klass) { + cl->do_klass_nv(klass); +} + +// non-virtualize the virtual call if _nv is not expected +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_klass_internal_try_nv(OopClosureType *cl, Klass *klass) { + do_klass_internal_try_v(cl, klass); +} + +template +inline void OopClosureDispatcher::do_klass_internal(OopClosureType *cl, Klass *klass) { + do_klass_internal_try_nv(cl, klass); +} + +#define OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_KLASS_DEF(OopClosureType) \ +template <> \ +inline void \ +OopClosureDispatcher::do_klass_internal(OopClosureType *cl, Klass *klass) { \ + reinterpret_cast(cl)->do_klass(klass); \ +} + +UNSPECIALIZED_DO_METADATA_CLOSURES(OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_KLASS_DEF) + +// non-virtualize the virtual call if the declared type also declared the member function +// note that if a super class declared it, we can't safely remove virtual call +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_class_loader_data_internal_try_v(OopClosureType *cl, ClassLoaderData *cld) { + cl->OopClosureType::do_class_loader_data(cld); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_class_loader_data_internal_try_v(OopClosureType *cl, ClassLoaderData *cld) { + cl->do_class_loader_data(cld); +} + +// call _nv for backward compatibility - it is expected to be called if it exists +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_class_loader_data_internal_try_nv(OopClosureType *cl, ClassLoaderData *cld) { + cl->do_class_loader_data_nv(cld); +} + +// non-virtualize the virtual call if _nv is not expected +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_class_loader_data_internal_try_nv(OopClosureType *cl, ClassLoaderData *cld) { + do_class_loader_data_internal_try_v(cl, cld); +} + +template +inline void OopClosureDispatcher::do_class_loader_data_internal(OopClosureType *cl, ClassLoaderData *cld) { + do_class_loader_data_internal_try_nv(cl, cld); +} + +#define OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_CLD_DEF(OopClosureType) \ +template <> \ +inline void \ +OopClosureDispatcher::do_class_loader_data_internal(OopClosureType *cl, ClassLoaderData *cld) { \ + reinterpret_cast(cl)->do_class_loader_data(cld); \ +} + +UNSPECIALIZED_DO_METADATA_CLOSURES(OOP_CLOSURE_DISPATCHER_UNSPECIALIZED_DO_CLD_DEF) + +// Make sure we only dispatch to OopClosure subtypes, otherwise compiler error +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_oop(OopClosureType *cl, OopType *obj) { + do_oop_internal(cl, obj); +} + +// Only do metadata stuff on ExtendedOopClosure, otherwise compiler error +template +inline typename enable_if::value, bool>::type +OopClosureDispatcher::do_metadata(OopClosureType *cl) { + return do_metadata_internal(cl); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_klass(OopClosureType *cl, Klass *klass) { + do_klass_internal(cl, klass); +} + +template +inline typename enable_if::value, void>::type +OopClosureDispatcher::do_class_loader_data(OopClosureType *cl, ClassLoaderData *cld) { + do_class_loader_data_internal(cl, cld); +} + +#endif // SHARE_VM_MEMORY_SPECIALIZED_OOP_CLOSURES_INLINE_HPP + diff --git a/src/share/vm/oops/instanceClassLoaderKlass.hpp b/src/share/vm/oops/instanceClassLoaderKlass.hpp --- a/src/share/vm/oops/instanceClassLoaderKlass.hpp +++ b/src/share/vm/oops/instanceClassLoaderKlass.hpp @@ -47,6 +47,22 @@ InstanceClassLoaderKlass() { assert(DumpSharedSpaces || UseSharedSpaces, "only for CDS"); } + +private: + template + typename enable_if::value, void>::type + do_metadata_if_applicable(oop obj, OopClosureType *cl); + + template + typename enable_if::value, void>::type + do_metadata_if_applicable(oop obj, OopClosureType *cl); + +public: + template + int oop_iterate_and_dispatch(oop obj, OopClosureType *cl); + + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { return -_instance_class_loader_klass; } + // Iterators int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { return oop_oop_iterate_v(obj, blk); diff --git a/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceClassLoaderKlass.inline.hpp @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2011, 2013, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP + +#include "utilities/templateIdioms.hpp" +#include "instanceClassLoaderKlass.hpp" + +template +inline typename enable_if::value, void>::type +InstanceClassLoaderKlass::do_metadata_if_applicable(oop obj, OopClosureType *cl) {} + +template +inline typename enable_if::value, void>::type +InstanceClassLoaderKlass::do_metadata_if_applicable(oop obj, OopClosureType *cl) { + if (OopClosureDispatcher::do_metadata(cl)) { + ClassLoaderData *cld = java_lang_ClassLoader::loader_data(obj); + if (cld != NULL) { + // Only true for registered class loaders + OopClosureDispatcher::do_class_loader_data(cl, cld); + } + } +} + +template +inline int InstanceClassLoaderKlass::oop_iterate_and_dispatch(oop obj, OopClosureType *cl) { + SpecializationStats::record_iterate_call_nv(SpecializationStats::irk); + int size = InstanceKlass::oop_iterate_and_dispatch(obj, cl); + + do_metadata_if_applicable(obj, cl); + + return size; +} + +#endif // SHARE_VM_OOPS_INSTANCECLASSLOADERKLASS_INLINE_HPP + diff --git a/src/share/vm/oops/instanceKlass.cpp b/src/share/vm/oops/instanceKlass.cpp --- a/src/share/vm/oops/instanceKlass.cpp +++ b/src/share/vm/oops/instanceKlass.cpp @@ -40,7 +40,7 @@ #include "memory/oopFactory.hpp" #include "oops/fieldStreams.hpp" #include "oops/instanceClassLoaderKlass.hpp" -#include "oops/instanceKlass.hpp" +#include "oops/instanceKlass.inline.hpp" #include "oops/instanceMirrorKlass.hpp" #include "oops/instanceOop.hpp" #include "oops/klass.inline.hpp" @@ -1966,37 +1966,12 @@ // Garbage collection -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } +int InstanceKlass::get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { + assert(sizeof(OopMapBlock) == sizeof(OopInterval), "auto closure specialization assumes same data layout for speed"); + start = (OopInterval*)start_of_nonstatic_oop_maps(); + size = size_helper(); + return nonstatic_oop_map_count(); } -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), - err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o)); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT // // Macros that iterate over areas of oops which are specialized on type of diff --git a/src/share/vm/oops/instanceKlass.hpp b/src/share/vm/oops/instanceKlass.hpp --- a/src/share/vm/oops/instanceKlass.hpp +++ b/src/share/vm/oops/instanceKlass.hpp @@ -953,6 +953,21 @@ // Naming const char* signature_name() const; +protected: + template static void assert_is_in(T *p); + template static void assert_is_in_closed_subset(T *p); + template static void assert_is_in_reserved(T *p); + template static void assert_nothing(T *p); + + template + void oop_iterate_and_dispatch_helper(oop obj, OopClosureType *closure, OopType *start, int count); + +public: + template + int oop_iterate_and_dispatch(oop obj, OopClosureType *closure); + + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size); + // Iterators int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { return oop_oop_iterate_v(obj, blk); diff --git a/src/share/vm/oops/instanceKlass.inline.hpp b/src/share/vm/oops/instanceKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceKlass.inline.hpp @@ -0,0 +1,100 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP + +#include "oops/instanceKlass.hpp" +#include "classfile/javaClasses.hpp" +#include "classfile/systemDictionary.hpp" + +#ifdef ASSERT +template inline void InstanceKlass::assert_is_in(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in(o), "should be in heap"); + } +} +template inline void InstanceKlass::assert_is_in_closed_subset(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_closed_subset(o), + err_msg("should be in closed *p " INTPTR_FORMAT " " INTPTR_FORMAT, (address)p, (address)o)); + } +} +template inline void InstanceKlass::assert_is_in_reserved(T *p) { + T heap_oop = oopDesc::load_heap_oop(p); + if (!oopDesc::is_null(heap_oop)) { + oop o = oopDesc::decode_heap_oop_not_null(heap_oop); + assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); + } +} +template inline void InstanceKlass::assert_nothing(T *p) {} + +#else +template inline void InstanceKlass::assert_is_in(T *p) {} +template inline void InstanceKlass::assert_is_in_closed_subset(T *p) {} +template inline void InstanceKlass::assert_is_in_reserved(T *p) {} +template inline void InstanceKlass::assert_nothing(T *p) {} +#endif // ASSERT + + +template +void InstanceKlass::oop_iterate_and_dispatch_helper(oop obj, OopClosureType *closure, OopType *start, int count) { + OopType *current = start; + OopType* const end = current + count; + while (current < end) { + assert_is_in_closed_subset(current); + OopClosureDispatcher::do_oop(closure, current); + current++; + } +} + +template +int InstanceKlass::oop_iterate_and_dispatch(oop obj, OopClosureType *closure) { + SpecializationStats::record_iterate_call_nv(SpecializationStats::ik); + + // Don't do metadata, let subclasses as only subclasses use this method. + + OopMapBlock* map = start_of_nonstatic_oop_maps(); + OopMapBlock* const end_map = map + nonstatic_oop_map_count(); + if (UseCompressedOops) { + while (map < end_map) { + oop_iterate_and_dispatch_helper(obj, closure, (narrowOop*)obj->obj_field_addr(map->offset()), map->count()); + ++map; + } + } else { + while (map < end_map) { + oop_iterate_and_dispatch_helper(obj, closure, (oop*)obj->obj_field_addr(map->offset()), map->count()); + ++map; + } + } + + return size_helper(); +} + +#endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP + diff --git a/src/share/vm/oops/instanceMirrorKlass.cpp b/src/share/vm/oops/instanceMirrorKlass.cpp --- a/src/share/vm/oops/instanceMirrorKlass.cpp +++ b/src/share/vm/oops/instanceMirrorKlass.cpp @@ -51,37 +51,6 @@ int InstanceMirrorKlass::_offset_of_static_fields = 0; -#ifdef ASSERT -template void assert_is_in(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in(o), "should be in heap"); - } -} -template void assert_is_in_closed_subset(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_closed_subset(o), "should be in closed"); - } -} -template void assert_is_in_reserved(T *p) { - T heap_oop = oopDesc::load_heap_oop(p); - if (!oopDesc::is_null(heap_oop)) { - oop o = oopDesc::decode_heap_oop_not_null(heap_oop); - assert(Universe::heap()->is_in_reserved(o), "should be in reserved"); - } -} -template void assert_nothing(T *p) {} - -#else -template void assert_is_in(T *p) {} -template void assert_is_in_closed_subset(T *p) {} -template void assert_is_in_reserved(T *p) {} -template void assert_nothing(T *p) {} -#endif // ASSERT - #define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \ T, start_p, count, do_oop, \ assert_fn) \ diff --git a/src/share/vm/oops/instanceMirrorKlass.hpp b/src/share/vm/oops/instanceMirrorKlass.hpp --- a/src/share/vm/oops/instanceMirrorKlass.hpp +++ b/src/share/vm/oops/instanceMirrorKlass.hpp @@ -29,6 +29,7 @@ #include "oops/instanceKlass.hpp" #include "runtime/handles.hpp" #include "utilities/macros.hpp" +#include "utilities/templateIdioms.hpp" // An InstanceMirrorKlass is a specialized InstanceKlass for // java.lang.Class instances. These instances are special because @@ -69,11 +70,7 @@ return (HeapWord*)(cast_from_oop(obj) + offset_of_static_fields()); } - static void init_offset_of_static_fields() { - // Cache the offset of the static fields in the Class instance - assert(_offset_of_static_fields == 0, "once"); - _offset_of_static_fields = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->size_helper() << LogHeapWordSize; - } + static void init_offset_of_static_fields(); static int offset_of_static_fields() { return _offset_of_static_fields; @@ -91,6 +88,26 @@ int oop_adjust_pointers(oop obj); void oop_follow_contents(oop obj); +private: + // Some dispatch functions for auto dispatching + template + void oop_iterate_and_dispatch_helper(oop obj, OopClosureType *closure); + + // Make sure metadata only gets dispatched for ExtendedOopClosure using SFINAE + template + typename enable_if::value, void>::type + do_metadata_if_applicable(oop obj, OopClosureType *cl); + + template + typename enable_if::value, void>::type + do_metadata_if_applicable(oop obj, OopClosureType *cl) {} + +public: + template + int oop_iterate_and_dispatch(oop obj, OopClosureType *cl); + + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { return _instance_mirror_klass; } + // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS diff --git a/src/share/vm/oops/instanceMirrorKlass.inline.hpp b/src/share/vm/oops/instanceMirrorKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceMirrorKlass.inline.hpp @@ -0,0 +1,78 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP + +#include "utilities/templateIdioms.hpp" +#include "oops/instanceMirrorKlass.hpp" +#include "oops/instanceKlass.inline.hpp" + +inline void InstanceMirrorKlass::init_offset_of_static_fields() { + // Cache the offset of the static fields in the Class instance + assert(_offset_of_static_fields == 0, "once"); + _offset_of_static_fields = InstanceMirrorKlass::cast(SystemDictionary::Class_klass())->size_helper() << LogHeapWordSize; +} + +// Make sure metadata only gets dispatched for ExtendedOopClosure using SFINAE +template +inline typename enable_if::value, void>::type +InstanceMirrorKlass::do_metadata_if_applicable(oop obj, OopClosureType *cl) { + if (OopClosureDispatcher::do_metadata(cl)) { + OopClosureDispatcher::do_klass(cl, obj->klass()); + } +} + +// Some dispatch functions for auto dispatching +template +inline void InstanceMirrorKlass::oop_iterate_and_dispatch_helper(oop obj, OopClosureType *closure) { + OopType *current = (OopType*)start_of_static_fields(obj); + OopType* const end = current + java_lang_Class::static_oop_field_count(obj); + while (current < end) { + assert_is_in_closed_subset(current); + OopClosureDispatcher::do_oop(closure, current); + current++; + } +} + + +// Auto specailization +template +inline int InstanceMirrorKlass::oop_iterate_and_dispatch(oop obj, OopClosureType *cl) { + SpecializationStats::record_iterate_call_nv(SpecializationStats::irk); + InstanceKlass::oop_iterate_and_dispatch(obj, cl); + + do_metadata_if_applicable(obj, cl); + + if (UseCompressedOops) { + oop_iterate_and_dispatch_helper(obj, cl); + return oop_size(obj); + } else { + oop_iterate_and_dispatch_helper(obj, cl); + return oop_size(obj); + } +} + +#endif // SHARE_VM_OOPS_INSTANCEMIRRORKLASS_INLINE_HPP + diff --git a/src/share/vm/oops/instanceRefKlass.hpp b/src/share/vm/oops/instanceRefKlass.hpp --- a/src/share/vm/oops/instanceRefKlass.hpp +++ b/src/share/vm/oops/instanceRefKlass.hpp @@ -70,6 +70,16 @@ // Parallel Scavenge and Parallel Old PARALLEL_GC_DECLS +private: + template + void do_metadata_if_applicable(oop obj, OopClosureType *cl); + +public: + template + int oop_iterate_and_dispatch(oop obj, OopClosureType *cl); + + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { return -_instance_ref_klass; } + int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { return oop_oop_iterate_v(obj, blk); } diff --git a/src/share/vm/oops/instanceRefKlass.inline.hpp b/src/share/vm/oops/instanceRefKlass.inline.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/oops/instanceRefKlass.inline.hpp @@ -0,0 +1,98 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP +#define SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP + +#include "utilities/templateIdioms.hpp" +#include "memory/specialized_oop_closures.inline.hpp" +#include "oops/instanceRefKlass.hpp" +#include "oops/instanceKlass.inline.hpp" + +template +inline int InstanceRefKlass::oop_iterate_and_dispatch(oop obj, OopClosureType *cl) { + SpecializationStats::record_iterate_call_nv(SpecializationStats::irk); + int size = InstanceKlass::oop_iterate_and_dispatch(obj, cl); + + if (UseCompressedOops) { + do_metadata_if_applicable::value>(obj, cl); + } else { + do_metadata_if_applicable::value>(obj, cl); + } + + return size; +} + + +template +inline void InstanceRefKlass::do_metadata_if_applicable(oop obj, OopClosureType *cl) { + OopType *disc_addr = (OopType*)java_lang_ref_Reference::discovered_addr(obj); + if (is_extended && reinterpret_cast(cl)->apply_to_weak_ref_discovered_field()) { + OopClosureDispatcher::do_oop(cl, disc_addr); + } + OopType *referent_addr = (OopType*)java_lang_ref_Reference::discovered_addr(obj); + OopType heap_oop = oopDesc::load_heap_oop(referent_addr); + ReferenceProcessor *rp = is_extended ? reinterpret_cast(cl)->_ref_processor : NULL; + if (!oopDesc::is_null(heap_oop)) { + oop referent = oopDesc::decode_heap_oop_not_null(heap_oop); + if (!referent->is_gc_marked() && rp != NULL && rp->discover_reference(obj, reference_type())) { + return; + } else { + /* treat referent as normal oop */ + SpecializationStats::record_do_oop_call_nv(SpecializationStats::irk); + OopClosureDispatcher::do_oop(cl, referent_addr); + } + } + + OopType *next_addr = (OopType*)java_lang_ref_Reference::next_addr(obj); + if (ReferenceProcessor::pending_list_uses_discovered_field()) { + OopType next_oop = oopDesc::load_heap_oop(next_addr); + /* Treat discovered as normal oop, if ref is not "active" (next non-NULL) */ + if (!oopDesc::is_null(next_oop)) { + /* i.e. ref is not "active" */ + debug_only( + if (TraceReferenceGC && PrintGCDetails) { + gclog_or_tty->print_cr(" Process discovered as normal ", INTPTR_FORMAT, disc_addr); + } + ) + } + } else { + /* In the case of older JDKs which do not use the discovered field for */ + /* the pending list, an inactive ref (next != NULL) must always have a */ + /* NULL discovered field. */ + debug_only( + OopType next_oop = oopDesc::load_heap_oop(next_addr); + OopType disc_oop = oopDesc::load_heap_oop(disc_addr); + assert(oopDesc::is_null(next_oop) || oopDesc::is_null(disc_oop), + err_msg("Found an inactive reference " PTR_FORMAT " with a non-NULL discovered field", (oopDesc*)obj)); + ) + } + + /* treat next as normal oop */ + SpecializationStats::record_do_oop_call_nv(SpecializationStats::irk); + OopClosureDispatcher::do_oop(cl, next_addr); +} + +#endif // SHARE_VM_OOPS_INSTANCEREFKLASS_INLINE_HPP + diff --git a/src/share/vm/oops/klass.hpp b/src/share/vm/oops/klass.hpp --- a/src/share/vm/oops/klass.hpp +++ b/src/share/vm/oops/klass.hpp @@ -28,7 +28,7 @@ #include "memory/genOopClosures.hpp" #include "memory/iterator.hpp" #include "memory/memRegion.hpp" -#include "memory/specialized_oop_closures.hpp" +#include "memory/specialized_oop_closures.inline.hpp" #include "oops/klassPS.hpp" #include "oops/metadata.hpp" #include "oops/oop.hpp" @@ -577,6 +577,15 @@ clean_weak_klass_links(is_alive, false /* clean_alive_klasses */); } + struct OopInterval { + int _offset; + uint _size; + }; + + // Returns positive value if oop maps could be returned and the klass doesn't need to know closure type. + // Otherwise returns a negative value corresponding to which klass implementation is being used (DispatchTag). + virtual int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { return _unknown_klass; } + // iterators virtual int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) = 0; virtual int oop_oop_iterate_v(oop obj, ExtendedOopClosure* blk) { diff --git a/src/share/vm/oops/objArrayKlass.cpp b/src/share/vm/oops/objArrayKlass.cpp --- a/src/share/vm/oops/objArrayKlass.cpp +++ b/src/share/vm/oops/objArrayKlass.cpp @@ -477,6 +477,19 @@ } #endif // INCLUDE_ALL_GCS +int ObjArrayKlass::get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) +{ + assert(obj->is_array(), "obj must be array"); + objArrayOop a = objArrayOop(obj); + + start->_offset = arrayOopDesc::base_offset_in_bytes(T_OBJECT); //base_offset_in_bytes(); + start->_size = a->length(); + + size = a->object_size(); + + return 1; +} + #define ObjArrayKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ \ int ObjArrayKlass::oop_oop_iterate##nv_suffix(oop obj, \ diff --git a/src/share/vm/oops/objArrayKlass.hpp b/src/share/vm/oops/objArrayKlass.hpp --- a/src/share/vm/oops/objArrayKlass.hpp +++ b/src/share/vm/oops/objArrayKlass.hpp @@ -26,7 +26,7 @@ #define SHARE_VM_OOPS_OBJARRAYKLASS_HPP #include "classfile/classLoaderData.hpp" -#include "memory/specialized_oop_closures.hpp" +#include "memory/specialized_oop_closures.inline.hpp" #include "oops/arrayKlass.hpp" #include "utilities/macros.hpp" @@ -118,6 +118,8 @@ objarray_follow_contents(ParCompactionManager* cm, oop obj, int index); #endif // INCLUDE_ALL_GCS + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size); + // Iterators int oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { return oop_oop_iterate_v(obj, blk); diff --git a/src/share/vm/oops/oop.hpp b/src/share/vm/oops/oop.hpp --- a/src/share/vm/oops/oop.hpp +++ b/src/share/vm/oops/oop.hpp @@ -27,10 +27,11 @@ #include "memory/iterator.hpp" #include "memory/memRegion.hpp" -#include "memory/specialized_oop_closures.hpp" +#include "memory/specialized_oop_closures.inline.hpp" #include "oops/metadata.hpp" #include "utilities/macros.hpp" #include "utilities/top.hpp" +#include "utilities/templateIdioms.hpp" // oopDesc is the top baseclass for objects classes. The {name}Desc classes describe // the format of Java objects so the fields can be accessed from C++. @@ -336,7 +337,30 @@ static BarrierSet* bs() { return _bs; } static void set_bs(BarrierSet* bs) { _bs = bs; } +private: + // Dispatch member functions to oop closures + + template + typename enable_if::value, void>::type + do_metadata_if_applicable(OopClosureType *cl); + + template + typename enable_if::value, void>::type + do_metadata_if_applicable(OopClosureType *cl); + + template + int oop_iterate_dispatch_tag(OopClosureType *blk, DispatchTag tag); + + template + int oop_iterate_internal(OopClosureType *blk); + +public: + template + typename enable_if::value, int>::type + oop_iterate(OopClosureType *blk); + // iterators, returns size of object + #define OOP_ITERATE_DECL(OopClosureType, nv_suffix) \ int oop_iterate(OopClosureType* blk); \ int oop_iterate(OopClosureType* blk, MemRegion mr); // Only in mr. @@ -353,7 +377,10 @@ ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_BACKWARDS_DECL) #endif - int oop_iterate_no_header(OopClosure* bk); + template + typename enable_if::value, int>::type + oop_iterate_no_header(OopClosureType* bk); + int oop_iterate_no_header(OopClosure* bk, MemRegion mr); // identity hash; returns the identity hash key (computes it if necessary) diff --git a/src/share/vm/oops/oop.inline.hpp b/src/share/vm/oops/oop.inline.hpp --- a/src/share/vm/oops/oop.inline.hpp +++ b/src/share/vm/oops/oop.inline.hpp @@ -28,11 +28,15 @@ #include "gc_implementation/shared/ageTable.hpp" #include "gc_implementation/shared/markSweep.inline.hpp" #include "gc_interface/collectedHeap.inline.hpp" +#include "memory/iterator.inline.hpp" #include "memory/barrierSet.inline.hpp" #include "memory/cardTableModRefBS.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/generation.hpp" -#include "memory/specialized_oop_closures.hpp" +#include "memory/specialized_oop_closures.inline.hpp" +#include "oops/instanceMirrorKlass.inline.hpp" +#include "oops/instanceRefKlass.inline.hpp" +#include "oops/instanceClassLoaderKlass.inline.hpp" #include "oops/arrayKlass.hpp" #include "oops/arrayOop.hpp" #include "oops/klass.inline.hpp" @@ -684,6 +688,30 @@ } } +template +inline typename enable_if::value, void>::type +oopDesc::do_metadata_if_applicable(OopClosureType *cl) { + if (OopClosureDispatcher::do_metadata(cl)) { + OopClosureDispatcher::do_klass(cl, klass()); + } +} + +template +inline typename enable_if::value, void>::type +oopDesc::do_metadata_if_applicable(OopClosureType *cl) {} + +template +inline int oopDesc::oop_iterate_dispatch_tag(OopClosureType *blk, DispatchTag tag) { + // Closure is not explicitly specialized; determine which klass with a tag and call an inline dispatch method. + switch (tag) { + case _instance_mirror_klass: return static_cast(klass())->oop_iterate_and_dispatch(this, blk); + case _instance_class_loader_klass: return static_cast(klass())->InstanceClassLoaderKlass::oop_iterate_and_dispatch(this, blk); + case _instance_ref_klass: return static_cast(klass())->InstanceRefKlass::oop_iterate_and_dispatch(this, blk); + } + ShouldNotReachHere(); + return -1; +} + inline int oopDesc::adjust_pointers() { debug_only(int check_size = size()); int s = klass()->oop_adjust_pointers(this); @@ -691,24 +719,61 @@ return s; } -#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate##nv_suffix(this, blk); \ -} \ - \ -inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ +template +inline int oopDesc::oop_iterate_internal(OopClosureType *blk) { + SpecializationStats::record_call(); + Klass::OopInterval interval; // Allocate space for an interval for arrays just in case it's needed + Klass::OopInterval *current = &interval; + int size; + int count = klass()->get_linear_oop_intervals(this, current, size); + if (count >= 0) { + if (UseCompressedOops) { + for (int i = 0; i < count; i++) { + narrowOop *current_oop = this->obj_field_addr(current->_offset); + for (uint j = 0; j < current->_size; j++) { + OopClosureDispatcher::do_oop(blk, current_oop); + current_oop++; + } + } + current++; + } else { + for (int i = 0; i < count; i++) { + oop *current_oop = this->obj_field_addr(current->_offset); + for (uint j = 0; j < current->_size; j++) { + OopClosureDispatcher::do_oop(blk, current_oop); + current_oop++; + } + } + current++; + } + + // The below call uses SFINAE and does nothing if the closure is not an ExtendedOopClosure + // Otherwise, if it is, it checks if it should send in metadata into the closure too and then does so + do_metadata_if_applicable(blk); + + return size; + } else { + // The Klass is of a slightly more advanced type, falling back to dispatch tag solution. + // This fallback parses the returned tag and identifies the klass implementation and calls it. + return oop_iterate_dispatch_tag(blk, DispatchTag(-count)); + } } +template +inline typename enable_if::value, int>::type +oopDesc::oop_iterate(OopClosureType *blk) { + return oop_iterate_internal(blk); +} -inline int oopDesc::oop_iterate_no_header(OopClosure* blk) { - // The NoHeaderExtendedOopClosure wraps the OopClosure and proxies all - // the do_oop calls, but turns off all other features in ExtendedOopClosure. - NoHeaderExtendedOopClosure cl(blk); - return oop_iterate(&cl); +template +inline typename enable_if::value, int>::type +oopDesc::oop_iterate_no_header(OopClosureType *blk) { + if (is_kind_of::value) { + NoHeaderOopClosure cl(blk); + return oop_iterate_internal(&cl); + } else { + return oop_iterate_internal(blk); + } } inline int oopDesc::oop_iterate_no_header(OopClosure* blk, MemRegion mr) { @@ -716,15 +781,27 @@ return oop_iterate(&cl, mr); } +#define OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \ + \ +inline int oopDesc::oop_iterate(OopClosureType* blk) { \ + SpecializationStats::record_call(); \ + return klass()->oop_oop_iterate##nv_suffix(this, blk); \ +} \ + \ +inline int oopDesc::oop_iterate(OopClosureType* blk, MemRegion mr) { \ + SpecializationStats::record_call(); \ + return klass()->oop_oop_iterate##nv_suffix##_m(this, blk, mr); \ +} + ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_DEFN) ALL_OOP_OOP_ITERATE_CLOSURES_2(OOP_ITERATE_DEFN) #if INCLUDE_ALL_GCS -#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ - \ -inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ - SpecializationStats::record_call(); \ - return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ +#define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \ + \ +inline int oopDesc::oop_iterate_backwards(OopClosureType* blk) { \ + SpecializationStats::record_call(); \ + return klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \ } ALL_OOP_OOP_ITERATE_CLOSURES_1(OOP_ITERATE_BACKWARDS_DEFN) diff --git a/src/share/vm/oops/typeArrayKlass.cpp b/src/share/vm/oops/typeArrayKlass.cpp --- a/src/share/vm/oops/typeArrayKlass.cpp +++ b/src/share/vm/oops/typeArrayKlass.cpp @@ -226,6 +226,13 @@ return t->object_size(); } +int TypeArrayKlass::get_linear_oop_intervals(oop obj, OopInterval* &start, int &size) { + assert(obj->is_typeArray(),"must be a type array"); + typeArrayOop a = typeArrayOop(obj); + size = a->object_size(); + return 0; // there are no oops in here! +} + int TypeArrayKlass::oop_oop_iterate(oop obj, ExtendedOopClosure* blk) { assert(obj->is_typeArray(),"must be a type array"); typeArrayOop t = typeArrayOop(obj); diff --git a/src/share/vm/oops/typeArrayKlass.hpp b/src/share/vm/oops/typeArrayKlass.hpp --- a/src/share/vm/oops/typeArrayKlass.hpp +++ b/src/share/vm/oops/typeArrayKlass.hpp @@ -72,6 +72,8 @@ // Copying void copy_array(arrayOop s, int src_pos, arrayOop d, int dst_pos, int length, TRAPS); + int get_linear_oop_intervals(oop obj, OopInterval* &start, int &size); + // Iteration int oop_oop_iterate(oop obj, ExtendedOopClosure* blk); int oop_oop_iterate_m(oop obj, ExtendedOopClosure* blk, MemRegion mr); diff --git a/src/share/vm/utilities/templateIdioms.hpp b/src/share/vm/utilities/templateIdioms.hpp new file mode 100644 --- /dev/null +++ b/src/share/vm/utilities/templateIdioms.hpp @@ -0,0 +1,137 @@ +/* + * Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + * + */ + +#ifndef SHARE_VM_UTILITIES_TEMPLATE_IDIOMS_HPP +#define SHARE_VM_UTILITIES_TEMPLATE_IDIOMS_HPP + +template +struct IsBaseOfHost +{ + operator Base*() const; + operator Derived*(); +}; + +// Statically check if two types are related in a class hierarchy +template +struct is_base_of +{ + typedef char yes[1]; + typedef char no[2]; + + template + static yes &check(Derived*, T); + static no &check(Base*, int); + + static const bool value = sizeof(check(IsBaseOfHost(), int())) == sizeof(yes); +}; + +// Statically check if two types are equal +template +struct is_same +{ + static const bool value = false; +}; + +template +struct is_same +{ + static const bool value = true; +}; + +template +struct is_kind_of { + static const bool value = is_same::value || is_base_of::value; +}; + +// Enable a template substitution only if a certain condition holds +template +struct enable_if +{}; + +template +struct enable_if +{ + typedef T type; +}; + +// Like enable_if but infuse a dependency to a type not in the parameters to the template. +// This would normally make SFINAE fail, but with the infused dependency, all is fine. +template +struct enable_if_depend: public enable_if<(sizeof(D) == -1 || value), T> +{}; + +// Check for a certain typedef in a class +#define GENERATE_SFINAE_TYPEDEF_CHECK(name) \ +template \ +struct has_typedef_##name { \ + typedef char yes[1]; \ + typedef char no[2]; \ + \ + template \ + static yes& test(typename C::name*); \ + \ + template \ + static no& test(...); \ + \ + static const bool value = sizeof(test(0)) == sizeof(yes); \ +} + +// TODO: Currently generates compiler error when used on forward declarations +// Check for a certain member declaration +#define GENERATE_SFINAE_MEMBER_CHECK(name) \ +template struct has_member_##name { \ + struct Fallback { int name ; }; \ + struct Derived: T, Fallback { }; \ + \ + template struct Check; \ + \ + template static char (&f(Check*))[1];\ + template static char (&f(...))[2]; \ + \ + static bool const value = sizeof(f(0)) == 2; \ +} + +// Checks for certain overload of a member function +#define GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(function) \ +template \ +struct has_member_function_##function { \ + typedef char yes[1]; \ + typedef char no [2]; \ + template struct Check; \ + template static yes &check(Check *); \ + template static no &check(...); \ + static bool const value = sizeof(check(0)) == sizeof(yes); \ +} + +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_oop_nv); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_metadata_nv); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_klass_nv); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_class_loader_data_nv); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_oop); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_metadata); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_klass); +GENERATE_SFINAE_MEMBER_FUNCTION_CHECK(do_class_loader_data); + +#endif // SHARE_VM_UTILITIES_TEMPLATE_IDIOMS_HPP + # HG changeset patch # User eosterlund # Date 1412772591 -7200 # Wed Oct 08 14:49:51 2014 +0200 # Node ID 3283c9b58b45460d380b610e4277299e09e015e4 # Parent 675c881bd17c7a4009f08af223224388dcb344f3 cross platform compiler friendly enum declaration diff --git a/src/share/vm/memory/specialized_oop_closures.hpp b/src/share/vm/memory/specialized_oop_closures.hpp --- a/src/share/vm/memory/specialized_oop_closures.hpp +++ b/src/share/vm/memory/specialized_oop_closures.hpp @@ -214,12 +214,12 @@ // For keeping stats on effectiveness. #define ENABLE_SPECIALIZATION_STATS 0 -enum DispatchTag { +typedef enum DispatchTag { _unknown_klass = 1, _instance_mirror_klass, _instance_class_loader_klass, - _instance_ref_klass, -}; + _instance_ref_klass +} DispatchTag; /** * The OopClosureDispatcher is a proxy class that automatically figures out