69 Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
70 } else {
71 Devirtualizer<nv>::do_klass(closure, klass);
72 }
73 } else {
74 // We would like to assert here (as below) that if klass has been NULL, then
75 // this has been a mirror for a primitive type that we do not need to follow
76 // as they are always strong roots.
77 // However, we might get across a klass that just changed during CMS concurrent
78 // marking if allocation occurred in the old generation.
79 // This is benign here, as we keep alive all CLDs that were loaded during the
80 // CMS concurrent phase in the class loading, i.e. they will be iterated over
81 // and kept alive during remark.
82 // assert(java_lang_Class::is_primitive(obj), "Sanity check");
83 }
84 }
85
86 oop_oop_iterate_statics<nv>(obj, closure);
87 }
88
89 #if INCLUDE_ALL_GCS
90 template <bool nv, class OopClosureType>
91 void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
92 InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
93
94 InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
95 }
96 #endif
97
98 template <bool nv, typename T, class OopClosureType>
99 void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj,
100 OopClosureType* closure,
101 MemRegion mr) {
102 T* p = (T*)start_of_static_fields(obj);
103 T* end = p + java_lang_Class::static_oop_field_count(obj);
104
105 T* const l = (T*)mr.start();
106 T* const h = (T*)mr.end();
107 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
108 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
109 "bounded region must be properly aligned");
|
69 Devirtualizer<nv>::do_cld(closure, klass->class_loader_data());
70 } else {
71 Devirtualizer<nv>::do_klass(closure, klass);
72 }
73 } else {
74 // We would like to assert here (as below) that if klass has been NULL, then
75 // this has been a mirror for a primitive type that we do not need to follow
76 // as they are always strong roots.
77 // However, we might get across a klass that just changed during CMS concurrent
78 // marking if allocation occurred in the old generation.
79 // This is benign here, as we keep alive all CLDs that were loaded during the
80 // CMS concurrent phase in the class loading, i.e. they will be iterated over
81 // and kept alive during remark.
82 // assert(java_lang_Class::is_primitive(obj), "Sanity check");
83 }
84 }
85
86 oop_oop_iterate_statics<nv>(obj, closure);
87 }
88
89 #if INCLUDE_OOP_OOP_ITERATE_BACKWARDS
90 template <bool nv, class OopClosureType>
91 void InstanceMirrorKlass::oop_oop_iterate_reverse(oop obj, OopClosureType* closure) {
92 InstanceKlass::oop_oop_iterate_reverse<nv>(obj, closure);
93
94 InstanceMirrorKlass::oop_oop_iterate_statics<nv>(obj, closure);
95 }
96 #endif
97
98 template <bool nv, typename T, class OopClosureType>
99 void InstanceMirrorKlass::oop_oop_iterate_statics_specialized_bounded(oop obj,
100 OopClosureType* closure,
101 MemRegion mr) {
102 T* p = (T*)start_of_static_fields(obj);
103 T* end = p + java_lang_Class::static_oop_field_count(obj);
104
105 T* const l = (T*)mr.start();
106 T* const h = (T*)mr.end();
107 assert(mask_bits((intptr_t)l, sizeof(T)-1) == 0 &&
108 mask_bits((intptr_t)h, sizeof(T)-1) == 0,
109 "bounded region must be properly aligned");
|