< prev index next >

src/share/vm/oops/instanceKlass.inline.hpp

Print this page
rev 10456 : 8151593: Cleanup definition/usage of INLINE/NOINLINE macros and add xlC support
Contributed-by: matthias.baesken@sap.com
   1 /*
   2  * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // The iteration over the oops in objects is a hot path in the GC code.
  37 // By force inlining the following functions, we get similar GC performance
  38 // as the previous macro based implementation.
  39 #ifdef TARGET_COMPILER_visCPP
  40 #define INLINE __forceinline
  41 #elif defined(TARGET_COMPILER_sparcWorks)
  42 #define INLINE __attribute__((always_inline))
  43 #else
  44 #define INLINE inline
  45 #endif
  46 
  47 template <bool nv, typename T, class OopClosureType>
  48 INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  49   T* p         = (T*)obj->obj_field_addr<T>(map->offset());
  50   T* const end = p + map->count();
  51 
  52   for (; p < end; ++p) {
  53     Devirtualizer<nv>::do_oop(closure, p);
  54   }
  55 }
  56 
  57 #if INCLUDE_ALL_GCS
  58 template <bool nv, typename T, class OopClosureType>
  59 INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  60   T* const start = (T*)obj->obj_field_addr<T>(map->offset());
  61   T*       p     = start + map->count();
  62 
  63   while (start < p) {
  64     --p;
  65     Devirtualizer<nv>::do_oop(closure, p);
  66   }
  67 }
  68 #endif
  69 
  70 template <bool nv, typename T, class OopClosureType>
  71 INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  72   T* p   = (T*)obj->obj_field_addr<T>(map->offset());
  73   T* end = p + map->count();
  74 
  75   T* const l   = (T*)mr.start();
  76   T* const h   = (T*)mr.end();
  77   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  78          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  79          "bounded region must be properly aligned");
  80 
  81   if (p < l) {
  82     p = l;
  83   }
  84   if (end > h) {
  85     end = h;
  86   }
  87 
  88   for (;p < end; ++p) {
  89     Devirtualizer<nv>::do_oop(closure, p);
  90   }
  91 }
  92 
  93 template <bool nv, typename T, class OopClosureType>
  94 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
  95   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  96   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
  97 
  98   for (; map < end_map; ++map) {
  99     oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
 100   }
 101 }
 102 
 103 #if INCLUDE_ALL_GCS
 104 template <bool nv, typename T, class OopClosureType>
 105 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
 106   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 107   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 108 
 109   while (start_map < map) {
 110     --map;
 111     oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
 112   }
 113 }
 114 #endif
 115 
 116 template <bool nv, typename T, class OopClosureType>
 117 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 118   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 119   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 120 
 121   for (;map < end_map; ++map) {
 122     oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
 123   }
 124 }
 125 
 126 template <bool nv, class OopClosureType>
 127 INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 128   if (UseCompressedOops) {
 129     oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
 130   } else {
 131     oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
 132   }
 133 }
 134 
 135 #if INCLUDE_ALL_GCS
 136 template <bool nv, class OopClosureType>
 137 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 138   if (UseCompressedOops) {
 139     oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
 140   } else {
 141     oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
 142   }
 143 }
 144 #endif
 145 
 146 template <bool nv, class OopClosureType>
 147 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 148   if (UseCompressedOops) {
 149     oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
 150   } else {
 151     oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
 152   }
 153 }
 154 
 155 template <bool nv, class OopClosureType>
 156 INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 157   if (Devirtualizer<nv>::do_metadata(closure)) {
 158     Devirtualizer<nv>::do_klass(closure, this);
 159   }
 160 
 161   oop_oop_iterate_oop_maps<nv>(obj, closure);
 162 
 163   return size_helper();
 164 }
 165 
 166 #if INCLUDE_ALL_GCS
 167 template <bool nv, class OopClosureType>
 168 INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 169   assert(!Devirtualizer<nv>::do_metadata(closure),
 170       "Code to handle metadata is not implemented");
 171 
 172   oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
 173 
 174   return size_helper();
 175 }
 176 #endif
 177 
 178 template <bool nv, class OopClosureType>
 179 INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 180   if (Devirtualizer<nv>::do_metadata(closure)) {
 181     if (mr.contains(obj)) {
 182       Devirtualizer<nv>::do_klass(closure, this);
 183     }
 184   }
 185 
 186   oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
 187 
 188   return size_helper();
 189 }
 190 
 191 #undef INLINE
 192 
 193 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
 194   OOP_OOP_ITERATE_DEFN(          InstanceKlass, OopClosureType, nv_suffix)  \
 195   OOP_OOP_ITERATE_DEFN_BOUNDED(  InstanceKlass, OopClosureType, nv_suffix)  \
 196   OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
 197 
 198 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
   1 /*
   2  * Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // The iteration over the oops in objects is a hot path in the GC code.
  37 // By force inlining the following functions, we get similar GC performance
  38 // as the previous macro based implementation.







  39 
  40 template <bool nv, typename T, class OopClosureType>
  41 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  42   T* p         = (T*)obj->obj_field_addr<T>(map->offset());
  43   T* const end = p + map->count();
  44 
  45   for (; p < end; ++p) {
  46     Devirtualizer<nv>::do_oop(closure, p);
  47   }
  48 }
  49 
  50 #if INCLUDE_ALL_GCS
  51 template <bool nv, typename T, class OopClosureType>
  52 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  53   T* const start = (T*)obj->obj_field_addr<T>(map->offset());
  54   T*       p     = start + map->count();
  55 
  56   while (start < p) {
  57     --p;
  58     Devirtualizer<nv>::do_oop(closure, p);
  59   }
  60 }
  61 #endif
  62 
  63 template <bool nv, typename T, class OopClosureType>
  64 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  65   T* p   = (T*)obj->obj_field_addr<T>(map->offset());
  66   T* end = p + map->count();
  67 
  68   T* const l   = (T*)mr.start();
  69   T* const h   = (T*)mr.end();
  70   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  71          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  72          "bounded region must be properly aligned");
  73 
  74   if (p < l) {
  75     p = l;
  76   }
  77   if (end > h) {
  78     end = h;
  79   }
  80 
  81   for (;p < end; ++p) {
  82     Devirtualizer<nv>::do_oop(closure, p);
  83   }
  84 }
  85 
  86 template <bool nv, typename T, class OopClosureType>
  87 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
  88   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  89   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
  90 
  91   for (; map < end_map; ++map) {
  92     oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
  93   }
  94 }
  95 
  96 #if INCLUDE_ALL_GCS
  97 template <bool nv, typename T, class OopClosureType>
  98 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
  99   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 100   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 101 
 102   while (start_map < map) {
 103     --map;
 104     oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
 105   }
 106 }
 107 #endif
 108 
 109 template <bool nv, typename T, class OopClosureType>
 110 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 111   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 112   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 113 
 114   for (;map < end_map; ++map) {
 115     oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
 116   }
 117 }
 118 
 119 template <bool nv, class OopClosureType>
 120 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 121   if (UseCompressedOops) {
 122     oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
 123   } else {
 124     oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
 125   }
 126 }
 127 
 128 #if INCLUDE_ALL_GCS
 129 template <bool nv, class OopClosureType>
 130 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 131   if (UseCompressedOops) {
 132     oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
 133   } else {
 134     oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
 135   }
 136 }
 137 #endif
 138 
 139 template <bool nv, class OopClosureType>
 140 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 141   if (UseCompressedOops) {
 142     oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
 143   } else {
 144     oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
 145   }
 146 }
 147 
 148 template <bool nv, class OopClosureType>
 149 ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 150   if (Devirtualizer<nv>::do_metadata(closure)) {
 151     Devirtualizer<nv>::do_klass(closure, this);
 152   }
 153 
 154   oop_oop_iterate_oop_maps<nv>(obj, closure);
 155 
 156   return size_helper();
 157 }
 158 
 159 #if INCLUDE_ALL_GCS
 160 template <bool nv, class OopClosureType>
 161 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 162   assert(!Devirtualizer<nv>::do_metadata(closure),
 163       "Code to handle metadata is not implemented");
 164 
 165   oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
 166 
 167   return size_helper();
 168 }
 169 #endif
 170 
 171 template <bool nv, class OopClosureType>
 172 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 173   if (Devirtualizer<nv>::do_metadata(closure)) {
 174     if (mr.contains(obj)) {
 175       Devirtualizer<nv>::do_klass(closure, this);
 176     }
 177   }
 178 
 179   oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
 180 
 181   return size_helper();
 182 }


 183 
 184 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
 185   OOP_OOP_ITERATE_DEFN(          InstanceKlass, OopClosureType, nv_suffix)  \
 186   OOP_OOP_ITERATE_DEFN_BOUNDED(  InstanceKlass, OopClosureType, nv_suffix)  \
 187   OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
 188 
 189 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
< prev index next >