1 /*
   2  * Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // The iteration over the oops in objects is a hot path in the GC code.
  37 // By force inlining the following functions, we get similar GC performance
  38 // as the previous macro based implementation.
  39 #ifdef TARGET_COMPILER_visCPP
  40 #define INLINE __forceinline
  41 #elif defined(TARGET_COMPILER_sparcWorks)
  42 #define INLINE __attribute__((always_inline))
  43 #else
  44 #define INLINE inline
  45 #endif
  46 
  47 template <bool nv, typename T, class OopClosureType>
  48 INLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  49   T* p         = (T*)obj->obj_field_addr<T>(map->offset());
  50   T* const end = p + map->count();
  51 
  52   for (; p < end; ++p) {
  53     Devirtualizer<nv>::do_oop(closure, p);
  54   }
  55 }
  56 
  57 #if INCLUDE_ALL_GCS
  58 template <bool nv, typename T, class OopClosureType>
  59 INLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  60   T* const start = (T*)obj->obj_field_addr<T>(map->offset());
  61   T*       p     = start + map->count();
  62 
  63   while (start < p) {
  64     --p;
  65     Devirtualizer<nv>::do_oop(closure, p);
  66   }
  67 }
  68 #endif
  69 
  70 template <bool nv, typename T, class OopClosureType>
  71 INLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  72   T* p   = (T*)obj->obj_field_addr<T>(map->offset());
  73   T* end = p + map->count();
  74 
  75   T* const l   = (T*)mr.start();
  76   T* const h   = (T*)mr.end();
  77   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  78          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  79          "bounded region must be properly aligned");
  80 
  81   if (p < l) {
  82     p = l;
  83   }
  84   if (end > h) {
  85     end = h;
  86   }
  87 
  88   for (;p < end; ++p) {
  89     Devirtualizer<nv>::do_oop(closure, p);
  90   }
  91 }
  92 
  93 template <bool nv, typename T, class OopClosureType>
  94 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
  95   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  96   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
  97 
  98   for (; map < end_map; ++map) {
  99     oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
 100   }
 101 }
 102 
 103 #if INCLUDE_ALL_GCS
 104 template <bool nv, typename T, class OopClosureType>
 105 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
 106   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 107   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 108 
 109   while (start_map < map) {
 110     --map;
 111     oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
 112   }
 113 }
 114 #endif
 115 
 116 template <bool nv, typename T, class OopClosureType>
 117 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 118   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 119   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 120 
 121   for (;map < end_map; ++map) {
 122     oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
 123   }
 124 }
 125 
 126 template <bool nv, class OopClosureType>
 127 INLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 128   if (UseCompressedOops) {
 129     oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
 130   } else {
 131     oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
 132   }
 133 }
 134 
 135 #if INCLUDE_ALL_GCS
 136 template <bool nv, class OopClosureType>
 137 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 138   if (UseCompressedOops) {
 139     oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
 140   } else {
 141     oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
 142   }
 143 }
 144 #endif
 145 
 146 template <bool nv, class OopClosureType>
 147 INLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 148   if (UseCompressedOops) {
 149     oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
 150   } else {
 151     oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
 152   }
 153 }
 154 
 155 template <bool nv, class OopClosureType>
 156 INLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 157   if (Devirtualizer<nv>::do_metadata(closure)) {
 158     Devirtualizer<nv>::do_klass(closure, this);
 159   }
 160 
 161   oop_oop_iterate_oop_maps<nv>(obj, closure);
 162 
 163   return size_helper();
 164 }
 165 
 166 #if INCLUDE_ALL_GCS
 167 template <bool nv, class OopClosureType>
 168 INLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 169   assert(!Devirtualizer<nv>::do_metadata(closure),
 170       "Code to handle metadata is not implemented");
 171 
 172   oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
 173 
 174   return size_helper();
 175 }
 176 #endif
 177 
 178 template <bool nv, class OopClosureType>
 179 INLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 180   if (Devirtualizer<nv>::do_metadata(closure)) {
 181     if (mr.contains(obj)) {
 182       Devirtualizer<nv>::do_klass(closure, this);
 183     }
 184   }
 185 
 186   oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
 187 
 188   return size_helper();
 189 }
 190 
 191 #undef INLINE
 192 
 193 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
 194   OOP_OOP_ITERATE_DEFN(          InstanceKlass, OopClosureType, nv_suffix)  \
 195   OOP_OOP_ITERATE_DEFN_BOUNDED(  InstanceKlass, OopClosureType, nv_suffix)  \
 196   OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
 197 
 198 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP