15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/markOop.inline.hpp"
37 #include "oops/oop.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/os.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 void oopDesc::release_set_mark(markOop m) {
48 OrderAccess::release_store(&_mark, m);
49 }
50
51 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
52 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
53 }
54
119 OrderAccess::release_store(klass_addr(), k);
120 }
121 }
122
123 #undef CHECK_SET_KLASS
124
125 int oopDesc::klass_gap() const {
126 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
127 }
128
129 void oopDesc::set_klass_gap(int v) {
130 if (UseCompressedClassPointers) {
131 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
132 }
133 }
134
135 void oopDesc::set_klass_to_list_ptr(oop k) {
136 // This is only to be used during GC, for from-space objects, so no
137 // barrier is needed.
138 if (UseCompressedClassPointers) {
139 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
140 } else {
141 _metadata._klass = (Klass*)(address)k;
142 }
143 }
144
145 oop oopDesc::list_ptr_from_klass() {
146 // This is only to be used during GC, for from-space objects.
147 if (UseCompressedClassPointers) {
148 return decode_heap_oop((narrowOop)_metadata._compressed_klass);
149 } else {
150 // Special case for GC
151 return (oop)(address)_metadata._klass;
152 }
153 }
154
155 bool oopDesc::is_a(Klass* k) const {
156 return klass()->is_subtype_of(k);
157 }
158
159 int oopDesc::size() {
160 return size_given_klass(klass());
161 }
162
163 int oopDesc::size_given_klass(Klass* klass) {
164 int lh = klass->layout_helper();
165 int s;
166
167 // lh is now a value computed at class initialization that may hint
168 // at the size. For instances, this is positive and equal to the
222 s = klass->oop_size(this);
223 }
224 }
225
226 assert(s > 0, "Oop size must be greater than zero, not %d", s);
227 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
228 return s;
229 }
230
231 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
232 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
233 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
234 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
235
236 void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
237 void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
238
239 template <class T>
240 T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
241
242 // Functions for getting and setting oops within instance objects.
243 // If the oops are compressed, the type passed to these overloaded functions
244 // is narrowOop. All functions are overloaded so they can be called by
245 // template functions without conditionals (the compiler instantiates via
246 // the right type and inlines the appopriate code).
247
248 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
249 // offset from the heap base. Saving the check for null can save instructions
250 // in inner GC loops so these are separated.
251
252 inline bool check_obj_alignment(oop obj) {
253 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
254 }
255
256 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
257 assert(!is_null(v), "narrow oop value can never be zero");
258 address base = Universe::narrow_oop_base();
259 int shift = Universe::narrow_oop_shift();
260 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
261 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
262 return result;
263 }
264
265 oop oopDesc::decode_heap_oop(narrowOop v) {
266 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
267 }
268
269 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
270 assert(!is_null(v), "oop value can never be zero");
271 assert(check_obj_alignment(v), "Address not aligned");
272 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
273 address base = Universe::narrow_oop_base();
274 int shift = Universe::narrow_oop_shift();
275 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
276 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
277 uint64_t result = pd >> shift;
278 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
279 assert(decode_heap_oop(result) == v, "reversibility");
280 return (narrowOop)result;
281 }
282
283 narrowOop oopDesc::encode_heap_oop(oop v) {
284 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
285 }
286
287 narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
288 oop oopDesc::load_heap_oop(oop* p) { return *p; }
289
290 void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
291 void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
292
293 // Load and decode an oop out of the Java heap into a wide oop.
294 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
295 return decode_heap_oop_not_null(load_heap_oop(p));
296 }
297
298 // Load and decode an oop out of the heap accepting null
299 oop oopDesc::load_decode_heap_oop(narrowOop* p) {
300 return decode_heap_oop(load_heap_oop(p));
301 }
302
303 oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
304 oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
305
306 void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
307 void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
308
309 // Encode and store a heap oop.
310 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
311 *p = encode_heap_oop_not_null(v);
312 }
313
314 // Encode and store a heap oop allowing for null.
315 void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
316 *p = encode_heap_oop(v);
317 }
318
319 template <DecoratorSet decorators>
320 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
321 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
322
323 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
324
325 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
326 inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); }
327
328 inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
329 inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); }
330
331 inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
332 inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
333
334 inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
335 inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); }
336
337 inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
338 inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); }
507 return oop_iterate_size(&cl, mr);
508 }
509
510 #if INCLUDE_ALL_GCS
511 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
512 \
513 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
514 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
515 }
516 #else
517 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
518 #endif // INCLUDE_ALL_GCS
519
520 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \
521 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
522 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
523 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
524
525 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
526 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
527
528 intptr_t oopDesc::identity_hash() {
529 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
530 // Note: The mark must be read into local variable to avoid concurrent updates.
531 markOop mrk = mark();
532 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
533 return mrk->hash();
534 } else if (mrk->is_marked()) {
535 return mrk->hash();
536 } else {
537 return slow_identity_hash();
538 }
539 }
540
541 bool oopDesc::has_displaced_mark() const {
542 return mark()->has_displaced_mark_helper();
543 }
544
545 markOop oopDesc::displaced_mark() const {
546 return mark()->displaced_mark_helper();
|
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_OOPS_OOP_INLINE_HPP
26 #define SHARE_VM_OOPS_OOP_INLINE_HPP
27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/compressedOops.inline.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "oops/markOop.inline.hpp"
38 #include "oops/oop.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/orderAccess.inline.hpp"
41 #include "runtime/os.hpp"
42 #include "utilities/align.hpp"
43 #include "utilities/macros.hpp"
44
45 // Implementation of all inlined member functions defined in oop.hpp
46 // We need a separate file to avoid circular references
47
48 void oopDesc::release_set_mark(markOop m) {
49 OrderAccess::release_store(&_mark, m);
50 }
51
52 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
53 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
54 }
55
120 OrderAccess::release_store(klass_addr(), k);
121 }
122 }
123
124 #undef CHECK_SET_KLASS
125
126 int oopDesc::klass_gap() const {
127 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
128 }
129
130 void oopDesc::set_klass_gap(int v) {
131 if (UseCompressedClassPointers) {
132 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
133 }
134 }
135
136 void oopDesc::set_klass_to_list_ptr(oop k) {
137 // This is only to be used during GC, for from-space objects, so no
138 // barrier is needed.
139 if (UseCompressedClassPointers) {
140 _metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling)
141 } else {
142 _metadata._klass = (Klass*)(address)k;
143 }
144 }
145
146 oop oopDesc::list_ptr_from_klass() {
147 // This is only to be used during GC, for from-space objects.
148 if (UseCompressedClassPointers) {
149 return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
150 } else {
151 // Special case for GC
152 return (oop)(address)_metadata._klass;
153 }
154 }
155
156 bool oopDesc::is_a(Klass* k) const {
157 return klass()->is_subtype_of(k);
158 }
159
160 int oopDesc::size() {
161 return size_given_klass(klass());
162 }
163
164 int oopDesc::size_given_klass(Klass* klass) {
165 int lh = klass->layout_helper();
166 int s;
167
168 // lh is now a value computed at class initialization that may hint
169 // at the size. For instances, this is positive and equal to the
223 s = klass->oop_size(this);
224 }
225 }
226
227 assert(s > 0, "Oop size must be greater than zero, not %d", s);
228 assert(is_object_aligned(s), "Oop size is not properly aligned: %d", s);
229 return s;
230 }
231
232 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
233 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
234 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
235 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
236
237 void* oopDesc::field_addr_raw(int offset) const { return reinterpret_cast<void*>(cast_from_oop<intptr_t>(as_oop()) + offset); }
238 void* oopDesc::field_addr(int offset) const { return Access<>::resolve(as_oop())->field_addr_raw(offset); }
239
240 template <class T>
241 T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
242
243 template <DecoratorSet decorators>
244 inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
245 inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
246
247 inline void oopDesc::obj_field_put(int offset, oop value) { HeapAccess<>::oop_store_at(as_oop(), offset, value); }
248
249 inline jbyte oopDesc::byte_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
250 inline void oopDesc::byte_field_put(int offset, jbyte value) { HeapAccess<>::store_at(as_oop(), offset, value); }
251
252 inline jchar oopDesc::char_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
253 inline void oopDesc::char_field_put(int offset, jchar value) { HeapAccess<>::store_at(as_oop(), offset, value); }
254
255 inline jboolean oopDesc::bool_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
256 inline void oopDesc::bool_field_put(int offset, jboolean value) { HeapAccess<>::store_at(as_oop(), offset, jboolean(value & 1)); }
257
258 inline jshort oopDesc::short_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
259 inline void oopDesc::short_field_put(int offset, jshort value) { HeapAccess<>::store_at(as_oop(), offset, value); }
260
261 inline jint oopDesc::int_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
262 inline void oopDesc::int_field_put(int offset, jint value) { HeapAccess<>::store_at(as_oop(), offset, value); }
431 return oop_iterate_size(&cl, mr);
432 }
433
434 #if INCLUDE_ALL_GCS
435 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix) \
436 \
437 inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
438 klass()->oop_oop_iterate_backwards##nv_suffix(this, blk); \
439 }
440 #else
441 #define OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
442 #endif // INCLUDE_ALL_GCS
443
444 #define ALL_OOPDESC_OOP_ITERATE(OopClosureType, nv_suffix) \
445 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
446 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
447 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
448
449 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
450 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
451
452 bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
453 return obj == NULL || obj->klass()->is_subtype_of(klass);
454 }
455
456 intptr_t oopDesc::identity_hash() {
457 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
458 // Note: The mark must be read into local variable to avoid concurrent updates.
459 markOop mrk = mark();
460 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
461 return mrk->hash();
462 } else if (mrk->is_marked()) {
463 return mrk->hash();
464 } else {
465 return slow_identity_hash();
466 }
467 }
468
469 bool oopDesc::has_displaced_mark() const {
470 return mark()->has_displaced_mark_helper();
471 }
472
473 markOop oopDesc::displaced_mark() const {
474 return mark()->displaced_mark_helper();
|