1 /*
   2  * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // The iteration over the oops in objects is a hot path in the GC code.
  37 // By force inlining the following functions, we get similar GC performance
  38 // as the previous macro based implementation.
  39 #ifdef TARGET_COMPILER_visCPP
  40 #define INLINE __forceinline
  41 #else
  42 #define INLINE inline
  43 #endif
  44 
  45 template <bool nv, typename T, class OopClosureType>
  46 INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  47   T* p         = (T*)obj->obj_field_addr<T>(map->offset());
  48   T* const end = p + map->count();
  49 
  50   for (; p < end; ++p) {
  51     Devirtualizer<nv>::do_oop(closure, p);
  52   }
  53 }
  54 
  55 #if INCLUDE_ALL_GCS
  56 template <bool nv, typename T, class OopClosureType>
  57 INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  58   T* const start = (T*)obj->obj_field_addr<T>(map->offset());
  59   T*       p     = start + map->count();
  60 
  61   while (start < p) {
  62     --p;
  63     Devirtualizer<nv>::do_oop(closure, p);
  64   }
  65 }
  66 #endif
  67 
  68 template <bool nv, typename T, class OopClosureType>
  69 INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  70   T* p   = (T*)obj->obj_field_addr<T>(map->offset());
  71   T* end = p + map->count();
  72 
  73   T* const l   = (T*)mr.start();
  74   T* const h   = (T*)mr.end();
  75   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  76          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  77          "bounded region must be properly aligned");
  78 
  79   if (p < l) {
  80     p = l;
  81   }
  82   if (end > h) {
  83     end = h;
  84   }
  85 
  86   for (;p < end; ++p) {
  87     Devirtualizer<nv>::do_oop(closure, p);
  88   }
  89 }
  90 
  91 template <bool nv, typename T, class OopClosureType>
  92 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
  93   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  94   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
  95 
  96   for (; map < end_map; ++map) {
  97     oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
  98   }
  99 }
 100 
 101 #if INCLUDE_ALL_GCS
 102 template <bool nv, typename T, class OopClosureType>
 103 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
 104   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 105   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 106 
 107   while (start_map < map) {
 108     --map;
 109     oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
 110   }
 111 }
 112 #endif
 113 
 114 template <bool nv, typename T, class OopClosureType>
 115 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 116   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 117   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 118 
 119   for (;map < end_map; ++map) {
 120     oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
 121   }
 122 }
 123 
 124 template <bool nv, class OopClosureType>
 125 INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 126   if (UseCompressedOops) {
 127     oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
 128   } else {
 129     oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
 130   }
 131 }
 132 
 133 #if INCLUDE_ALL_GCS
 134 template <bool nv, class OopClosureType>
 135 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 136   if (UseCompressedOops) {
 137     oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
 138   } else {
 139     oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
 140   }
 141 }
 142 #endif
 143 
 144 template <bool nv, class OopClosureType>
 145 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 146   if (UseCompressedOops) {
 147     oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
 148   } else {
 149     oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
 150   }
 151 }
 152 
 153 template <bool nv, class OopClosureType>
 154 INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 155   if (Devirtualizer<nv>::do_metadata(closure)) {
 156     Devirtualizer<nv>::do_klass(closure, this);
 157   }
 158 
 159   oop_oop_iterate_oop_maps<nv>(obj, closure);
 160 
 161   return size_helper();
 162 }
 163 
 164 #if INCLUDE_ALL_GCS
 165 template <bool nv, class OopClosureType>
 166 INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 167   assert(!Devirtualizer<nv>::do_metadata(closure),
 168       "Code to handle metadata is not implemented");
 169 
 170   oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
 171 
 172   return size_helper();
 173 }
 174 #endif
 175 
 176 template <bool nv, class OopClosureType>
 177 INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 178   if (Devirtualizer<nv>::do_metadata(closure)) {
 179     if (mr.contains(obj)) {
 180       Devirtualizer<nv>::do_klass(closure, this);
 181     }
 182   }
 183 
 184   oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
 185 
 186   return size_helper();
 187 }
 188 
 189 #undef INLINE
 190 
 191 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
 192   OOP_OOP_ITERATE_DEFN(          InstanceKlass, OopClosureType, nv_suffix)  \
 193   OOP_OOP_ITERATE_DEFN_m(        InstanceKlass, OopClosureType, nv_suffix)  \
 194   OOP_OOP_ITERATE_BACKWARDS_DEFN(InstanceKlass, OopClosureType, nv_suffix)
 195 
 196 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP