1 /*
   2  * Copyright (c) 2011 Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "classfile/javaClasses.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "gc_implementation/shared/markSweep.inline.hpp"
  29 #include "gc_interface/collectedHeap.inline.hpp"
  30 #include "memory/genOopClosures.inline.hpp"
  31 #include "memory/oopFactory.hpp"
  32 #include "memory/permGen.hpp"
  33 #include "oops/instanceKlass.hpp"
  34 #include "oops/instanceMirrorKlass.hpp"
  35 #include "oops/instanceOop.hpp"
  36 #include "oops/oop.inline.hpp"
  37 #include "oops/symbol.hpp"
  38 #include "runtime/handles.inline.hpp"
  39 #ifndef SERIALGC
  40 #include "gc_implementation/g1/g1CollectedHeap.inline.hpp"
  41 #include "gc_implementation/g1/g1OopClosures.inline.hpp"
  42 #include "gc_implementation/g1/g1RemSet.inline.hpp"
  43 #include "gc_implementation/g1/heapRegionSeq.inline.hpp"
  44 #include "gc_implementation/parNew/parOopClosures.inline.hpp"
  45 #include "gc_implementation/parallelScavenge/psPromotionManager.inline.hpp"
  46 #include "gc_implementation/parallelScavenge/psScavenge.inline.hpp"
  47 #include "oops/oop.pcgc.inline.hpp"
  48 #endif
  49 
  50 instanceMirrorKlass::_offset_of_static_fields = 0;
  51 
  52 #ifdef ASSERT
  53 template <class T> void assert_is_in(T *p) {
  54   T heap_oop = oopDesc::load_heap_oop(p);
  55   if (!oopDesc::is_null(heap_oop)) {
  56     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  57     assert(Universe::heap()->is_in(o), "should be in heap");
  58   }
  59 }
  60 template <class T> void assert_is_in_closed_subset(T *p) {
  61   T heap_oop = oopDesc::load_heap_oop(p);
  62   if (!oopDesc::is_null(heap_oop)) {
  63     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  64     assert(Universe::heap()->is_in_closed_subset(o), "should be in closed");
  65   }
  66 }
  67 template <class T> void assert_is_in_reserved(T *p) {
  68   T heap_oop = oopDesc::load_heap_oop(p);
  69   if (!oopDesc::is_null(heap_oop)) {
  70     oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
  71     assert(Universe::heap()->is_in_reserved(o), "should be in reserved");
  72   }
  73 }
  74 template <class T> void assert_nothing(T *p) {}
  75 
  76 #else
  77 template <class T> void assert_is_in(T *p) {}
  78 template <class T> void assert_is_in_closed_subset(T *p) {}
  79 template <class T> void assert_is_in_reserved(T *p) {}
  80 template <class T> void assert_nothing(T *p) {}
  81 #endif // ASSERT
  82 
  83 #define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE( \
  84   T, start_p, count, do_oop,                         \
  85   assert_fn)                                         \
  86 {                                                    \
  87   T* p         = (T*)(start_p);                      \
  88   T* const end = p + (count);                        \
  89   while (p < end) {                                  \
  90     (assert_fn)(p);                                  \
  91     do_oop;                                          \
  92     ++p;                                             \
  93   }                                                  \
  94 }
  95 
  96 #define InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE( \
  97   T, start_p, count, low, high,                              \
  98   do_oop, assert_fn)                                         \
  99 {                                                            \
 100   T* const l = (T*)(low);                                    \
 101   T* const h = (T*)(high);                                   \
 102   assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&         \
 103          mask_bits((intptr_t)h, sizeof(T)-1) == 0,           \
 104          "bounded region must be properly aligned");         \
 105   T* p       = (T*)(start_p);                                \
 106   T* end     = p + (count);                                  \
 107   if (p < l) p = l;                                          \
 108   if (end > h) end = h;                                      \
 109   while (p < end) {                                          \
 110     (assert_fn)(p);                                          \
 111     do_oop;                                                  \
 112     ++p;                                                     \
 113   }                                                          \
 114 }
 115 
 116 
 117 #define InstanceMirrorKlass_OOP_ITERATE(start_p, count,    \
 118                                   do_oop, assert_fn)       \
 119 {                                                          \
 120   if (UseCompressedOops) {                                 \
 121     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(narrowOop, \
 122       start_p, count,                                      \
 123       do_oop, assert_fn)                                   \
 124   } else {                                                 \
 125     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE(oop,       \
 126       start_p, count,                                      \
 127       do_oop, assert_fn)                                   \
 128   }                                                        \
 129 }
 130 
 131 // The following macros call specialized macros, passing either oop or
 132 // narrowOop as the specialization type.  These test the UseCompressedOops
 133 // flag.
 134 #define InstanceMirrorKlass_BOUNDED_OOP_ITERATE(start_p, count, low, high, \
 135                                           do_oop, assert_fn)               \
 136 {                                                                          \
 137   if (UseCompressedOops) {                                                 \
 138     InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(narrowOop,         \
 139       start_p, count,                                                      \
 140       low, high,                                                           \
 141       do_oop, assert_fn)                                                   \
 142   } else {                                                                 \
 143     InstanceMirrorKlass_SPECIALIZED_BOUNDED_OOP_ITERATE(oop,               \
 144       start_p, count,                                                      \
 145       low, high,                                                           \
 146       do_oop, assert_fn)                                                   \
 147   }                                                                        \
 148 }
 149 
 150 
 151 void instanceMirrorKlass::oop_follow_contents(oop obj) {
 152   instanceKlass::oop_follow_contents(obj);
 153   InstanceMirrorKlass_OOP_ITERATE(                                                    \
 154     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj),         \
 155     MarkSweep::mark_and_push(p),                                                      \
 156     assert_is_in_closed_subset)
 157 }
 158 
 159 #ifndef SERIALGC
 160 void instanceMirrorKlass::oop_follow_contents(ParCompactionManager* cm,
 161                                               oop obj) {
 162   instanceKlass::oop_follow_contents(cm, obj);
 163   InstanceMirrorKlass_OOP_ITERATE(                                                    \
 164     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj),         \
 165     PSParallelCompact::mark_and_push(cm, p),                                          \
 166     assert_is_in)
 167 }
 168 #endif // SERIALGC
 169 
 170 int instanceMirrorKlass::oop_adjust_pointers(oop obj) {
 171   int size = oop_size(obj);
 172   instanceKlass::oop_adjust_pointers(obj);
 173   InstanceMirrorKlass_OOP_ITERATE(                                                    \
 174     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj),         \
 175     MarkSweep::adjust_pointer(p),                                                     \
 176     assert_nothing)
 177   return size;
 178 }
 179 
 180 #define InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(T, nv_suffix)                \
 181   InstanceMirrorKlass_OOP_ITERATE(                                                    \
 182     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj),         \
 183       (closure)->do_oop##nv_suffix(p),                                                \
 184     assert_is_in_closed_subset)                                                       \
 185   return oop_size(obj);                                                               \
 186 
 187 #define InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(T, nv_suffix, mr)         \
 188   InstanceMirrorKlass_BOUNDED_OOP_ITERATE(                                            \
 189     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj),         \
 190     mr.start(), mr.end(),                                                             \
 191       (closure)->do_oop##nv_suffix(p),                                                \
 192     assert_is_in_closed_subset)                                                       \
 193   return oop_size(obj);                                                               \
 194 
 195 
 196 // Macro to define instanceMirrorKlass::oop_oop_iterate for virtual/nonvirtual for
 197 // all closures.  Macros calling macros above for each oop size.
 198 
 199 #define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN(OopClosureType, nv_suffix)           \
 200                                                                                       \
 201 int instanceMirrorKlass::                                                             \
 202 oop_oop_iterate##nv_suffix(oop obj, OopClosureType* closure) {                        \
 203   /* Get size before changing pointers */                                             \
 204   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);      \
 205                                                                                       \
 206   instanceKlass::oop_oop_iterate##nv_suffix(obj, closure);                            \
 207                                                                                       \
 208   if (UseCompressedOops) {                                                            \
 209     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix);           \
 210   } else {                                                                            \
 211     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix);                 \
 212   }                                                                                   \
 213 }
 214 
 215 #ifndef SERIALGC
 216 #define InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
 217                                                                                       \
 218 int instanceMirrorKlass::                                                             \
 219 oop_oop_iterate_backwards##nv_suffix(oop obj, OopClosureType* closure) {              \
 220   /* Get size before changing pointers */                                             \
 221   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);      \
 222                                                                                       \
 223   instanceKlass::oop_oop_iterate_backwards##nv_suffix(obj, closure);                  \
 224                                                                                       \
 225   if (UseCompressedOops) {                                                            \
 226     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(narrowOop, nv_suffix);           \
 227   } else {                                                                            \
 228     InstanceMirrorKlass_SPECIALIZED_OOP_ITERATE_DEFN(oop, nv_suffix);                 \
 229   }                                                                                   \
 230 }
 231 #endif // !SERIALGC
 232 
 233 
 234 #define InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m(OopClosureType, nv_suffix)         \
 235                                                                                       \
 236 int instanceMirrorKlass::                                                             \
 237 oop_oop_iterate##nv_suffix##_m(oop obj,                                               \
 238                                OopClosureType* closure,                               \
 239                                MemRegion mr) {                                        \
 240   SpecializationStats::record_iterate_call##nv_suffix(SpecializationStats::irk);      \
 241                                                                                       \
 242   instanceKlass::oop_oop_iterate##nv_suffix##_m(obj, closure, mr);                    \
 243   if (UseCompressedOops) {                                                            \
 244     InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(narrowOop, nv_suffix, mr);    \
 245   } else {                                                                            \
 246     InstanceMirrorKlass_BOUNDED_SPECIALIZED_OOP_ITERATE(oop, nv_suffix, mr);          \
 247   }                                                                                   \
 248 }
 249 
 250 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
 251 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN)
 252 #ifndef SERIALGC
 253 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 254 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_BACKWARDS_DEFN)
 255 #endif // SERIALGC
 256 ALL_OOP_OOP_ITERATE_CLOSURES_1(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
 257 ALL_OOP_OOP_ITERATE_CLOSURES_2(InstanceMirrorKlass_OOP_OOP_ITERATE_DEFN_m)
 258 
 259 #ifndef SERIALGC
 260 void instanceMirrorKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
 261   instanceKlass::oop_push_contents(pm, obj);
 262   InstanceMirrorKlass_OOP_ITERATE(                                            \
 263     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj), \
 264     if (PSScavenge::should_scavenge(p)) {                                     \
 265       pm->claim_or_forward_depth(p);                                          \
 266     },                                                                        \
 267     assert_nothing )
 268 }
 269 
 270 int instanceMirrorKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) {
 271   instanceKlass::oop_update_pointers(cm, obj);
 272   InstanceMirrorKlass_OOP_ITERATE(                                            \
 273     start_of_static_fields(obj), java_lang_Class::static_oop_field_size(obj), \
 274     PSParallelCompact::adjust_pointer(p),                                     \
 275     assert_nothing)
 276   return oop_size(obj);
 277 }
 278 #endif // SERIALGC
 279 
 280 int instanceMirrorKlass::instance_size(KlassHandle k) {
 281   if (k() != NULL && k->oop_is_instance()) {
 282     return align_object_size(size_helper() + instanceKlass::cast(k())->static_field_size());
 283   }
 284   return size_helper();
 285 }
 286 
 287 instanceOop instanceMirrorKlass::allocate_instance(KlassHandle k, TRAPS) {
 288   // Query before forming handle.
 289   int size = instance_size(k);
 290   KlassHandle h_k(THREAD, as_klassOop());
 291   instanceOop i;
 292 
 293   if (JavaObjectsInPerm) {
 294     i = (instanceOop) CollectedHeap::permanent_obj_allocate(h_k, size, CHECK_NULL);
 295   } else {
 296     assert(ScavengeRootsInCode > 0, "must be");
 297     i = (instanceOop) CollectedHeap::obj_allocate(h_k, size, CHECK_NULL);
 298   }
 299 
 300   return i;
 301 }
 302 
 303 int instanceMirrorKlass::oop_size(oop obj) const {
 304   return java_lang_Class::oop_size(obj);
 305 }
 306 
 307 int instanceMirrorKlass::compute_static_oop_field_size(oop obj) {
 308   klassOop k = java_lang_Class::as_klassOop(obj);
 309   if (k != NULL && k->klass_part()->oop_is_instance()) {
 310     return instanceKlass::cast(k)->static_oop_field_size();
 311   }
 312   return 0;
 313 }