1 /*
   2  * Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 #include "utilities/macros.hpp"
  35 
  36 // The iteration over the oops in objects is a hot path in the GC code.
  37 // By force inlining the following functions, we get similar GC performance
  38 // as the previous macro based implementation.
  39 
  40 template <bool nv, typename T, class OopClosureType>
  41 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  42   T* p         = (T*)obj->obj_field_addr<T>(map->offset());
  43   T* const end = p + map->count();
  44 
  45   for (; p < end; ++p) {
  46     Devirtualizer<nv>::do_oop(closure, p);
  47   }
  48 }
  49 
  50 #if INCLUDE_ALL_GCS
  51 template <bool nv, typename T, class OopClosureType>
  52 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  53   T* const start = (T*)obj->obj_field_addr<T>(map->offset());
  54   T*       p     = start + map->count();
  55 
  56   while (start < p) {
  57     --p;
  58     Devirtualizer<nv>::do_oop(closure, p);
  59   }
  60 }
  61 #endif
  62 
  63 template <bool nv, typename T, class OopClosureType>
  64 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  65   T* p   = (T*)obj->obj_field_addr<T>(map->offset());
  66   T* end = p + map->count();
  67 
  68   T* const l   = (T*)mr.start();
  69   T* const h   = (T*)mr.end();
  70   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  71          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  72          "bounded region must be properly aligned");
  73 
  74   if (p < l) {
  75     p = l;
  76   }
  77   if (end > h) {
  78     end = h;
  79   }
  80 
  81   for (;p < end; ++p) {
  82     Devirtualizer<nv>::do_oop(closure, p);
  83   }
  84 }
  85 
  86 template <bool nv, typename T, class OopClosureType>
  87 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized(oop obj, OopClosureType* closure) {
  88   OopMapBlock* map           = start_of_nonstatic_oop_maps();
  89   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
  90 
  91   for (; map < end_map; ++map) {
  92     oop_oop_iterate_oop_map<nv, T>(map, obj, closure);
  93   }
  94 }
  95 
  96 #if INCLUDE_ALL_GCS
  97 template <bool nv, typename T, class OopClosureType>
  98 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_reverse(oop obj, OopClosureType* closure) {
  99   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 100   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 101 
 102   while (start_map < map) {
 103     --map;
 104     oop_oop_iterate_oop_map_reverse<nv, T>(map, obj, closure);
 105   }
 106 }
 107 #endif
 108 
 109 template <bool nv, typename T, class OopClosureType>
 110 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_specialized_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 111   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 112   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 113 
 114   for (;map < end_map; ++map) {
 115     oop_oop_iterate_oop_map_bounded<nv, T>(map, obj, closure, mr);
 116   }
 117 }
 118 
 119 template <bool nv, class OopClosureType>
 120 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 121   if (UseCompressedOops) {
 122     oop_oop_iterate_oop_maps_specialized<nv, narrowOop>(obj, closure);
 123   } else {
 124     oop_oop_iterate_oop_maps_specialized<nv, oop>(obj, closure);
 125   }
 126 }
 127 
 128 #if INCLUDE_ALL_GCS
 129 template <bool nv, class OopClosureType>
 130 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 131   if (UseCompressedOops) {
 132     oop_oop_iterate_oop_maps_specialized_reverse<nv, narrowOop>(obj, closure);
 133   } else {
 134     oop_oop_iterate_oop_maps_specialized_reverse<nv, oop>(obj, closure);
 135   }
 136 }
 137 #endif
 138 
 139 template <bool nv, class OopClosureType>
 140 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 141   if (UseCompressedOops) {
 142     oop_oop_iterate_oop_maps_specialized_bounded<nv, narrowOop>(obj, closure, mr);
 143   } else {
 144     oop_oop_iterate_oop_maps_specialized_bounded<nv, oop>(obj, closure, mr);
 145   }
 146 }
 147 
 148 template <bool nv, class OopClosureType>
 149 ALWAYSINLINE int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 150   if (Devirtualizer<nv>::do_metadata(closure)) {
 151     Devirtualizer<nv>::do_klass(closure, this);
 152   }
 153 
 154   oop_oop_iterate_oop_maps<nv>(obj, closure);
 155 
 156   return size_helper();
 157 }
 158 
 159 #if INCLUDE_ALL_GCS
 160 template <bool nv, class OopClosureType>
 161 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 162   assert(!Devirtualizer<nv>::do_metadata(closure),
 163       "Code to handle metadata is not implemented");
 164 
 165   oop_oop_iterate_oop_maps_reverse<nv>(obj, closure);
 166 
 167   return size_helper();
 168 }
 169 #endif
 170 
 171 template <bool nv, class OopClosureType>
 172 ALWAYSINLINE int InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 173   if (Devirtualizer<nv>::do_metadata(closure)) {
 174     if (mr.contains(obj)) {
 175       Devirtualizer<nv>::do_klass(closure, this);
 176     }
 177   }
 178 
 179   oop_oop_iterate_oop_maps_bounded<nv>(obj, closure, mr);
 180 
 181   return size_helper();
 182 }
 183 
 184 #define ALL_INSTANCE_KLASS_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)  \
 185   OOP_OOP_ITERATE_DEFN(          InstanceKlass, OopClosureType, nv_suffix)  \
 186   OOP_OOP_ITERATE_DEFN_BOUNDED(  InstanceKlass, OopClosureType, nv_suffix)  \
 187   OOP_OOP_ITERATE_DEFN_BACKWARDS(InstanceKlass, OopClosureType, nv_suffix)
 188 
 189 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP