1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
  26 #define SHARE_OOPS_INSTANCEKLASS_INLINE_HPP
  27 
  28 #include "memory/iterator.hpp"
  29 #include "oops/instanceKlass.hpp"
  30 #include "oops/klass.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "runtime/atomic.hpp"
  33 #include "utilities/debug.hpp"
  34 #include "utilities/globalDefinitions.hpp"
  35 #include "utilities/macros.hpp"
  36 
  37 inline Klass* InstanceKlass::array_klasses_acquire() const {
  38   return Atomic::load_acquire(&_array_klasses);
  39 }
  40 
  41 inline void InstanceKlass::release_set_array_klasses(Klass* k) {
  42   Atomic::release_store(&_array_klasses, k);
  43 }
  44 
  45 inline jmethodID* InstanceKlass::methods_jmethod_ids_acquire() const {
  46   return Atomic::load_acquire(&_methods_jmethod_ids);
  47 }
  48 
  49 inline void InstanceKlass::release_set_methods_jmethod_ids(jmethodID* jmeths) {
  50   Atomic::release_store(&_methods_jmethod_ids, jmeths);
  51 }
  52 
  53 // The iteration over the oops in objects is a hot path in the GC code.
  54 // By force inlining the following functions, we get similar GC performance
  55 // as the previous macro based implementation.
  56 
  57 template <typename T, class OopClosureType>
  58 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map(OopMapBlock* map, oop obj, OopClosureType* closure) {
  59   T* p         = (T*)obj->obj_field_addr_raw<T>(map->offset());
  60   T* const end = p + map->count();
  61 
  62   for (; p < end; ++p) {
  63     Devirtualizer::do_oop(closure, p);
  64   }
  65 }
  66 
  67 template <typename T, class OopClosureType>
  68 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_reverse(OopMapBlock* map, oop obj, OopClosureType* closure) {
  69   T* const start = (T*)obj->obj_field_addr_raw<T>(map->offset());
  70   T*       p     = start + map->count();
  71 
  72   while (start < p) {
  73     --p;
  74     Devirtualizer::do_oop(closure, p);
  75   }
  76 }
  77 
  78 template <typename T, class OopClosureType>
  79 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_map_bounded(OopMapBlock* map, oop obj, OopClosureType* closure, MemRegion mr) {
  80   T* p   = (T*)obj->obj_field_addr_raw<T>(map->offset());
  81   T* end = p + map->count();
  82 
  83   T* const l   = (T*)mr.start();
  84   T* const h   = (T*)mr.end();
  85   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
  86          mask_bits((intptr_t)h, sizeof(T)-1) == 0,
  87          "bounded region must be properly aligned");
  88 
  89   if (p < l) {
  90     p = l;
  91   }
  92   if (end > h) {
  93     end = h;
  94   }
  95 
  96   for (;p < end; ++p) {
  97     Devirtualizer::do_oop(closure, p);
  98   }
  99 }
 100 
 101 template <typename T, class OopClosureType>
 102 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps(oop obj, OopClosureType* closure) {
 103   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 104   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 105 
 106   for (; map < end_map; ++map) {
 107     oop_oop_iterate_oop_map<T>(map, obj, closure);
 108   }
 109 }
 110 
 111 template <typename T, class OopClosureType>
 112 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_reverse(oop obj, OopClosureType* closure) {
 113   OopMapBlock* const start_map = start_of_nonstatic_oop_maps();
 114   OopMapBlock* map             = start_map + nonstatic_oop_map_count();
 115 
 116   while (start_map < map) {
 117     --map;
 118     oop_oop_iterate_oop_map_reverse<T>(map, obj, closure);
 119   }
 120 }
 121 
 122 template <typename T, class OopClosureType>
 123 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_oop_maps_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 124   OopMapBlock* map           = start_of_nonstatic_oop_maps();
 125   OopMapBlock* const end_map = map + nonstatic_oop_map_count();
 126 
 127   for (;map < end_map; ++map) {
 128     oop_oop_iterate_oop_map_bounded<T>(map, obj, closure, mr);
 129   }
 130 }
 131 
 132 template <typename T, class OopClosureType>
 133 ALWAYSINLINE void InstanceKlass::oop_oop_iterate(oop obj, OopClosureType* closure) {
 134   if (Devirtualizer::do_metadata(closure)) {
 135     Devirtualizer::do_klass(closure, this);
 136   }
 137 
 138   oop_oop_iterate_oop_maps<T>(obj, closure);
 139 }
 140 
 141 template <typename T, class OopClosureType>
 142 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
 143   assert(!Devirtualizer::do_metadata(closure),
 144       "Code to handle metadata is not implemented");
 145 
 146   oop_oop_iterate_oop_maps_reverse<T>(obj, closure);
 147 }
 148 
 149 template <typename T, class OopClosureType>
 150 ALWAYSINLINE void InstanceKlass::oop_oop_iterate_bounded(oop obj, OopClosureType* closure, MemRegion mr) {
 151   if (Devirtualizer::do_metadata(closure)) {
 152     if (mr.contains(obj)) {
 153       Devirtualizer::do_klass(closure, this);
 154     }
 155   }
 156 
 157   oop_oop_iterate_oop_maps_bounded<T>(obj, closure, mr);
 158 }
 159 
 160 #endif // SHARE_OOPS_INSTANCEKLASS_INLINE_HPP