1 /*
   2  * Copyright (c) 1997, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "oops/instanceKlass.hpp"
  29 #include "oops/oop.inline2.hpp"
  30 
  31 //
  32 // Macros that iterate over areas of oops which are specialized on type of
  33 // oop pointer either narrow or wide, depending on UseCompressedOops
  34 //
  35 // Parameters are:
  36 //   T         - type of oop to point to (either oop or narrowOop)
  37 //   start_p   - starting pointer for region to iterate over
  38 //   count     - number of oops or narrowOops to iterate over
  39 //   do_oop    - action to perform on each oop (it's arbitrary C code which
  40 //               makes it more efficient to put in a macro rather than making
  41 //               it a template function)
  42 //   assert_fn - assert function which is template function because performance
  43 //               doesn't matter when enabled.
  44 #define InstanceKlass_SPECIALIZED_OOP_ITERATE( \
  45   T, start_p, count, do_oop,                \
  46   assert_fn)                                \
  47 {                                           \
  48   T* p         = (T*)(start_p);             \
  49   T* const end = p + (count);               \
  50   while (p < end) {                         \
  51     (assert_fn)(p);                         \
  52     do_oop;                                 \
  53     ++p;                                    \
  54   }                                         \
  55 }
  56 
  57 // The following macros call specialized macros, passing either oop or
  58 // narrowOop as the specialization type.  These test the UseCompressedOops
  59 // flag.
  60 #define InstanceKlass_OOP_MAP_ITERATE(obj, do_oop, assert_fn)            \
  61 {                                                                        \
  62   /* Compute oopmap block range. The common case                         \
  63      is nonstatic_oop_map_size == 1. */                                  \
  64   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
  65   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
  66   if (UseCompressedOops) {                                               \
  67     while (map < end_map) {                                              \
  68       InstanceKlass_SPECIALIZED_OOP_ITERATE(narrowOop,                   \
  69         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
  70         do_oop, assert_fn)                                               \
  71       ++map;                                                             \
  72     }                                                                    \
  73   } else {                                                               \
  74     while (map < end_map) {                                              \
  75       InstanceKlass_SPECIALIZED_OOP_ITERATE(oop,                         \
  76         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
  77         do_oop, assert_fn)                                               \
  78       ++map;                                                             \
  79     }                                                                    \
  80   }                                                                      \
  81 }
  82 
  83 // closure's do_metadata() methodmethod dictates whether the given closure should be
  84 // applied to the klass ptr in the object header.
  85 
  86 template <bool nv, typename OopClosureType>
  87 int InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
  88   /* header */
  89   if (Devirtualizer<nv, OopClosureType>::do_metadata(closure)) {
  90     Devirtualizer<nv, OopClosureType>::do_klass(closure, obj->klass());
  91   }
  92   InstanceKlass_OOP_MAP_ITERATE(
  93     obj,
  94     (Devirtualizer<nv, OopClosureType>::do_oop(closure, p)),
  95     assert_is_in_closed_subset)
  96   return size_helper();
  97 }
  98 
  99 #define InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE( \
 100   T, start_p, count, do_oop,                \
 101   assert_fn)                                \
 102 {                                           \
 103   T* const start = (T*)(start_p);           \
 104   T*       p     = start + (count);         \
 105   while (start < p) {                       \
 106     --p;                                    \
 107     (assert_fn)(p);                         \
 108     do_oop;                                 \
 109   }                                         \
 110 }
 111 
 112 #define InstanceKlass_OOP_MAP_REVERSE_ITERATE(obj, do_oop, assert_fn)    \
 113 {                                                                        \
 114   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();          \
 115   OopMapBlock* map             = start_map + nonstatic_oop_map_count();  \
 116   if (UseCompressedOops) {                                               \
 117     while (start_map < map) {                                            \
 118       --map;                                                             \
 119       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(narrowOop,           \
 120         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
 121         do_oop, assert_fn)                                               \
 122     }                                                                    \
 123   } else {                                                               \
 124     while (start_map < map) {                                            \
 125       --map;                                                             \
 126       InstanceKlass_SPECIALIZED_OOP_REVERSE_ITERATE(oop,                 \
 127         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
 128         do_oop, assert_fn)                                               \
 129     }                                                                    \
 130   }                                                                      \
 131 }
 132 
 133 #ifndef SERIALGC
 134 template <bool nv, typename OopClosureType>
 135 int InstanceKlass::oop_oop_iterate_backwards(oop obj,
 136                                                OopClosureType* closure) {
 137   /* header */
 138   if (Devirtualizer<nv, OopClosureType>::do_metadata(closure)) {
 139     Devirtualizer<nv, OopClosureType>::do_klass(closure, obj->klass());
 140   }
 141   /* instance variables */
 142   InstanceKlass_OOP_MAP_REVERSE_ITERATE(
 143     obj,
 144     (Devirtualizer<nv, OopClosureType>::do_oop(closure, p)),
 145     assert_is_in_closed_subset)
 146    return size_helper();
 147 }
 148 #endif // !SERIALGC
 149 
 150 #define InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
 151   T, start_p, count, low, high,             \
 152   do_oop, assert_fn)                        \
 153 {                                           \
 154   T* const l = (T*)(low);                   \
 155   T* const h = (T*)(high);                  \
 156   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 && \
 157          mask_bits((intptr_t)h, sizeof(T)-1) == 0,   \
 158          "bounded region must be properly aligned"); \
 159   T* p       = (T*)(start_p);               \
 160   T* end     = p + (count);                 \
 161   if (p < l) p = l;                         \
 162   if (end > h) end = h;                     \
 163   while (p < end) {                         \
 164     (assert_fn)(p);                         \
 165     do_oop;                                 \
 166     ++p;                                    \
 167   }                                         \
 168 }
 169 
 170 #define InstanceKlass_BOUNDED_OOP_MAP_ITERATE(obj, low, high, do_oop,    \
 171                                               assert_fn)                 \
 172 {                                                                        \
 173   /* Compute oopmap block range. The common case is                      \
 174      nonstatic_oop_map_size == 1, so we accept the                       \
 175      usually non-existent extra overhead of examining                    \
 176      all the maps. */                                                    \
 177   OopMapBlock* map           = start_of_nonstatic_oop_maps();            \
 178   OopMapBlock* const end_map = map + nonstatic_oop_map_count();          \
 179   if (UseCompressedOops) {                                               \
 180     while (map < end_map) {                                              \
 181       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,           \
 182         obj->obj_field_addr<narrowOop>(map->offset()), map->count(),     \
 183         low, high,                                                       \
 184         do_oop, assert_fn)                                               \
 185       ++map;                                                             \
 186     }                                                                    \
 187   } else {                                                               \
 188     while (map < end_map) {                                              \
 189       InstanceKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,                 \
 190         obj->obj_field_addr<oop>(map->offset()), map->count(),           \
 191         low, high,                                                       \
 192         do_oop, assert_fn)                                               \
 193       ++map;                                                             \
 194     }                                                                    \
 195   }                                                                      \
 196 }
 197 
 198 template <bool nv, typename OopClosureType>
 199 int InstanceKlass::oop_oop_iterate_m(oop obj,
 200                                        OopClosureType* closure,
 201                                        MemRegion mr) {
 202   if (Devirtualizer<nv, OopClosureType>::do_metadata(closure)) {
 203     if (mr.contains(obj)) {
 204       Devirtualizer<nv, OopClosureType>::do_klass(closure, obj->klass());
 205     }
 206   }
 207   InstanceKlass_BOUNDED_OOP_MAP_ITERATE(
 208     obj, mr.start(), mr.end(),
 209     (Devirtualizer<nv, OopClosureType>::do_oop(closure, p)),
 210     assert_is_in_closed_subset)
 211   return size_helper();
 212 }
 213 
 214 #endif // SHARE_VM_OOPS_INSTANCEKLASS_INLINE_HPP