83 } else {
84 oopDesc::encode_store_heap_oop((oop*)addr, value);
85 }
86 }
87
88 // Implementation of all inlined member functions defined in oop.hpp
89 // We need a separate file to avoid circular references
90
91 void oopDesc::release_set_mark(markOop m) {
92 OrderAccess::release_store_ptr(&_mark, m);
93 }
94
95 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
96 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
97 }
98
99 void oopDesc::init_mark() {
100 set_mark(markOopDesc::prototype_for_object(this));
101 }
102
103 inline Klass* oopDesc::klass() const {
104 if (UseCompressedClassPointers) {
105 return Klass::decode_klass_not_null(_metadata._compressed_klass);
106 } else {
107 return _metadata._klass;
108 }
109 }
110
111 Klass* oopDesc::klass_or_null() const volatile {
112 // can be NULL in CMS
113 if (UseCompressedClassPointers) {
114 return Klass::decode_klass(_metadata._compressed_klass);
115 } else {
116 return _metadata._klass;
117 }
118 }
119
120 Klass** oopDesc::klass_addr() {
121 // Only used internally and with CMS and will not work with
122 // UseCompressedOops
123 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
124 return (Klass**) &_metadata._klass;
125 }
126
127 narrowKlass* oopDesc::compressed_klass_addr() {
128 assert(UseCompressedClassPointers, "only called by compressed klass pointers");
129 return &_metadata._compressed_klass;
130 }
131
132 inline void oopDesc::set_klass(Klass* k) {
133 // since klasses are promoted no store check is needed
134 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
135 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
136 if (UseCompressedClassPointers) {
137 *compressed_klass_addr() = Klass::encode_klass_not_null(k);
138 } else {
139 *klass_addr() = k;
140 }
141 }
142
143 int oopDesc::klass_gap() const {
144 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
145 }
146
147 inline void oopDesc::set_klass_gap(int v) {
148 if (UseCompressedClassPointers) {
149 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
150 }
151 }
152
153 void oopDesc::set_klass_to_list_ptr(oop k) {
154 // This is only to be used during GC, for from-space objects, so no
155 // barrier is needed.
156 if (UseCompressedClassPointers) {
157 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
158 } else {
159 _metadata._klass = (Klass*)(address)k;
160 }
161 }
162
163 oop oopDesc::list_ptr_from_klass() {
164 // This is only to be used during GC, for from-space objects.
165 if (UseCompressedClassPointers) {
166 return decode_heap_oop((narrowOop)_metadata._compressed_klass);
167 } else {
168 // Special case for GC
169 return (oop)(address)_metadata._klass;
170 }
171 }
172
173 bool oopDesc::is_a(Klass* k) const {
174 return klass()->is_subtype_of(k);
175 }
176
177 inline int oopDesc::size() {
178 return size_given_klass(klass());
179 }
180
181 int oopDesc::size_given_klass(Klass* klass) {
182 int lh = klass->layout_helper();
183 int s;
184
185 // lh is now a value computed at class initialization that may hint
186 // at the size. For instances, this is positive and equal to the
187 // size. For arrays, this is negative and provides log2 of the
188 // array element size. For other oops, it is zero and thus requires
189 // a virtual call.
190 //
191 // We go to all this trouble because the size computation is at the
192 // heart of phase 2 of mark-compaction, and called for every object,
193 // alive or dead. So the speed here is equal in importance to the
194 // speed of allocation.
195
196 if (lh > Klass::_lh_neutral_value) {
197 if (!Klass::layout_helper_needs_slow_path(lh)) {
247 // || is_typeArray() // covers second scenario above
248 // If and when UseParallelGC uses the same obj array oop stealing/chunking
249 // technique, we will need to suitably modify the assertion.
250 assert((s == klass->oop_size(this)) ||
251 (Universe::heap()->is_gc_active() &&
252 ((is_typeArray() && UseConcMarkSweepGC) ||
253 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
254 "wrong array object size");
255 } else {
256 // Must be zero, so bite the bullet and take the virtual call.
257 s = klass->oop_size(this);
258 }
259 }
260
261 assert(s % MinObjAlignment == 0, "alignment check");
262 assert(s > 0, "Bad size calculated");
263 return s;
264 }
265
266 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
267 inline bool oopDesc::is_array() const { return klass()->is_array_klass(); }
268 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
270
271 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
272
273 jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
274 jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
275 jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*) field_base(offset); }
276 jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
277 jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
278 jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
279 jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
280 jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
281 Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
282
283 template <class T> T* oopDesc::obj_field_addr(int offset) const { return (T*) field_base(offset); }
284 address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
285
286
287 // Functions for getting and setting oops within instance objects.
288 // If the oops are compressed, the type passed to these overloaded functions
289 // is narrowOop. All functions are overloaded so they can be called by
290 // template functions without conditionals (the compiler instantiates via
291 // the right type and inlines the appopriate code).
292
293 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
294 // offset from the heap base. Saving the check for null can save instructions
295 // in inner GC loops so these are separated.
296
297 inline bool check_obj_alignment(oop obj) {
298 return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
299 }
300
301 inline oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
302 assert(!is_null(v), "narrow oop value can never be zero");
303 address base = Universe::narrow_oop_base();
304 int shift = Universe::narrow_oop_shift();
305 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
306 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
307 return result;
308 }
309
310 inline oop oopDesc::decode_heap_oop(narrowOop v) {
311 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
312 }
313
314 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
315 assert(!is_null(v), "oop value can never be zero");
316 assert(check_obj_alignment(v), "Address not aligned");
317 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
318 address base = Universe::narrow_oop_base();
319 int shift = Universe::narrow_oop_shift();
320 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
321 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
322 uint64_t result = pd >> shift;
323 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
324 assert(decode_heap_oop(result) == v, "reversibility");
325 return (narrowOop)result;
326 }
327
328 inline narrowOop oopDesc::encode_heap_oop(oop v) {
329 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
330 }
331
332 // Load and decode an oop out of the Java heap into a wide oop.
333 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
334 return decode_heap_oop_not_null(*p);
335 }
336
337 // Load and decode an oop out of the heap accepting null
338 oop oopDesc::load_decode_heap_oop(narrowOop* p) {
339 return decode_heap_oop(*p);
340 }
341
342 // Encode and store a heap oop.
343 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
344 *p = encode_heap_oop_not_null(v);
345 }
346
347 // Encode and store a heap oop allowing for null.
348 void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
499
500 jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
501 void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
502
503 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
504 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
505
506 bool oopDesc::is_locked() const {
507 return mark()->is_locked();
508 }
509
510 bool oopDesc::is_unlocked() const {
511 return mark()->is_unlocked();
512 }
513
514 bool oopDesc::has_bias_pattern() const {
515 return mark()->has_bias_pattern();
516 }
517
518 // used only for asserts
519 inline bool oopDesc::is_oop(bool ignore_mark_word) const {
520 oop obj = (oop) this;
521 if (!check_obj_alignment(obj)) return false;
522 if (!Universe::heap()->is_in_reserved(obj)) return false;
523 // obj is aligned and accessible in heap
524 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
525
526 // Header verification: the mark is typically non-NULL. If we're
527 // at a safepoint, it must not be null.
528 // Outside of a safepoint, the header could be changing (for example,
529 // another thread could be inflating a lock on this object).
530 if (ignore_mark_word) {
531 return true;
532 }
533 if (mark() != NULL) {
534 return true;
535 }
536 return !SafepointSynchronize::is_at_safepoint();
537 }
538
539
540 // used only for asserts
541 inline bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
542 return this == NULL ? true : is_oop(ignore_mark_word);
543 }
544
545 #ifndef PRODUCT
546 // used only for asserts
547 bool oopDesc::is_unlocked_oop() const {
548 if (!Universe::heap()->is_in_reserved(this)) return false;
549 return mark()->is_unlocked();
550 }
551 #endif // PRODUCT
552
553 // Used only for markSweep, scavenging
554 bool oopDesc::is_gc_marked() const {
555 return mark()->is_marked();
556 }
557
558 bool oopDesc::is_scavengable() const {
559 return Universe::heap()->is_scavengable(this);
560 }
561
603 if (curMark == oldMark) {
604 return NULL;
605 }
606 // If the CAS was unsuccessful then curMark->is_marked()
607 // should return true as another thread has CAS'd in another
608 // forwarding pointer.
609 oldMark = curMark;
610 }
611 return forwardee();
612 }
613 #endif
614
615 // Note that the forwardee is not the same thing as the displaced_mark.
616 // The forwardee is used when copying during scavenge and mark-sweep.
617 // It does need to clear the low two locking- and GC-related bits.
618 oop oopDesc::forwardee() const {
619 return (oop) mark()->decode_pointer();
620 }
621
622 // The following method needs to be MT safe.
623 inline uint oopDesc::age() const {
624 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
625 if (has_displaced_mark()) {
626 return displaced_mark()->age();
627 } else {
628 return mark()->age();
629 }
630 }
631
632 void oopDesc::incr_age() {
633 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
634 if (has_displaced_mark()) {
635 set_displaced_mark(displaced_mark()->incr_age());
636 } else {
637 set_mark(mark()->incr_age());
638 }
639 }
640
641 int oopDesc::ms_adjust_pointers() {
642 debug_only(int check_size = size());
643 int s = klass()->oop_ms_adjust_pointers(this);
|
83 } else {
84 oopDesc::encode_store_heap_oop((oop*)addr, value);
85 }
86 }
87
88 // Implementation of all inlined member functions defined in oop.hpp
89 // We need a separate file to avoid circular references
90
91 void oopDesc::release_set_mark(markOop m) {
92 OrderAccess::release_store_ptr(&_mark, m);
93 }
94
95 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
96 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
97 }
98
99 void oopDesc::init_mark() {
100 set_mark(markOopDesc::prototype_for_object(this));
101 }
102
103 Klass* oopDesc::klass() const {
104 if (UseCompressedClassPointers) {
105 return Klass::decode_klass_not_null(_metadata._compressed_klass);
106 } else {
107 return _metadata._klass;
108 }
109 }
110
111 Klass* oopDesc::klass_or_null() const volatile {
112 // can be NULL in CMS
113 if (UseCompressedClassPointers) {
114 return Klass::decode_klass(_metadata._compressed_klass);
115 } else {
116 return _metadata._klass;
117 }
118 }
119
120 Klass** oopDesc::klass_addr() {
121 // Only used internally and with CMS and will not work with
122 // UseCompressedOops
123 assert(!UseCompressedClassPointers, "only supported with uncompressed klass pointers");
124 return (Klass**) &_metadata._klass;
125 }
126
127 narrowKlass* oopDesc::compressed_klass_addr() {
128 assert(UseCompressedClassPointers, "only called by compressed klass pointers");
129 return &_metadata._compressed_klass;
130 }
131
132 void oopDesc::set_klass(Klass* k) {
133 // since klasses are promoted no store check is needed
134 assert(Universe::is_bootstrapping() || k != NULL, "must be a real Klass*");
135 assert(Universe::is_bootstrapping() || k->is_klass(), "not a Klass*");
136 if (UseCompressedClassPointers) {
137 *compressed_klass_addr() = Klass::encode_klass_not_null(k);
138 } else {
139 *klass_addr() = k;
140 }
141 }
142
143 int oopDesc::klass_gap() const {
144 return *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes());
145 }
146
147 void oopDesc::set_klass_gap(int v) {
148 if (UseCompressedClassPointers) {
149 *(int*)(((intptr_t)this) + klass_gap_offset_in_bytes()) = v;
150 }
151 }
152
153 void oopDesc::set_klass_to_list_ptr(oop k) {
154 // This is only to be used during GC, for from-space objects, so no
155 // barrier is needed.
156 if (UseCompressedClassPointers) {
157 _metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
158 } else {
159 _metadata._klass = (Klass*)(address)k;
160 }
161 }
162
163 oop oopDesc::list_ptr_from_klass() {
164 // This is only to be used during GC, for from-space objects.
165 if (UseCompressedClassPointers) {
166 return decode_heap_oop((narrowOop)_metadata._compressed_klass);
167 } else {
168 // Special case for GC
169 return (oop)(address)_metadata._klass;
170 }
171 }
172
173 bool oopDesc::is_a(Klass* k) const {
174 return klass()->is_subtype_of(k);
175 }
176
177 int oopDesc::size() {
178 return size_given_klass(klass());
179 }
180
181 int oopDesc::size_given_klass(Klass* klass) {
182 int lh = klass->layout_helper();
183 int s;
184
185 // lh is now a value computed at class initialization that may hint
186 // at the size. For instances, this is positive and equal to the
187 // size. For arrays, this is negative and provides log2 of the
188 // array element size. For other oops, it is zero and thus requires
189 // a virtual call.
190 //
191 // We go to all this trouble because the size computation is at the
192 // heart of phase 2 of mark-compaction, and called for every object,
193 // alive or dead. So the speed here is equal in importance to the
194 // speed of allocation.
195
196 if (lh > Klass::_lh_neutral_value) {
197 if (!Klass::layout_helper_needs_slow_path(lh)) {
247 // || is_typeArray() // covers second scenario above
248 // If and when UseParallelGC uses the same obj array oop stealing/chunking
249 // technique, we will need to suitably modify the assertion.
250 assert((s == klass->oop_size(this)) ||
251 (Universe::heap()->is_gc_active() &&
252 ((is_typeArray() && UseConcMarkSweepGC) ||
253 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
254 "wrong array object size");
255 } else {
256 // Must be zero, so bite the bullet and take the virtual call.
257 s = klass->oop_size(this);
258 }
259 }
260
261 assert(s % MinObjAlignment == 0, "alignment check");
262 assert(s > 0, "Bad size calculated");
263 return s;
264 }
265
266 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
267 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
268 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
270
271 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
272
273 jbyte* oopDesc::byte_field_addr(int offset) const { return (jbyte*) field_base(offset); }
274 jchar* oopDesc::char_field_addr(int offset) const { return (jchar*) field_base(offset); }
275 jboolean* oopDesc::bool_field_addr(int offset) const { return (jboolean*) field_base(offset); }
276 jint* oopDesc::int_field_addr(int offset) const { return (jint*) field_base(offset); }
277 jshort* oopDesc::short_field_addr(int offset) const { return (jshort*) field_base(offset); }
278 jlong* oopDesc::long_field_addr(int offset) const { return (jlong*) field_base(offset); }
279 jfloat* oopDesc::float_field_addr(int offset) const { return (jfloat*) field_base(offset); }
280 jdouble* oopDesc::double_field_addr(int offset) const { return (jdouble*) field_base(offset); }
281 Metadata** oopDesc::metadata_field_addr(int offset) const { return (Metadata**)field_base(offset); }
282
283 template <class T> T* oopDesc::obj_field_addr(int offset) const { return (T*) field_base(offset); }
284 address* oopDesc::address_field_addr(int offset) const { return (address*) field_base(offset); }
285
286
287 // Functions for getting and setting oops within instance objects.
288 // If the oops are compressed, the type passed to these overloaded functions
289 // is narrowOop. All functions are overloaded so they can be called by
290 // template functions without conditionals (the compiler instantiates via
291 // the right type and inlines the appopriate code).
292
293 // Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
294 // offset from the heap base. Saving the check for null can save instructions
295 // in inner GC loops so these are separated.
296
297 inline bool check_obj_alignment(oop obj) {
298 return cast_from_oop<intptr_t>(obj) % MinObjAlignmentInBytes == 0;
299 }
300
301 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
302 assert(!is_null(v), "narrow oop value can never be zero");
303 address base = Universe::narrow_oop_base();
304 int shift = Universe::narrow_oop_shift();
305 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
306 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
307 return result;
308 }
309
310 oop oopDesc::decode_heap_oop(narrowOop v) {
311 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
312 }
313
314 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
315 assert(!is_null(v), "oop value can never be zero");
316 assert(check_obj_alignment(v), "Address not aligned");
317 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
318 address base = Universe::narrow_oop_base();
319 int shift = Universe::narrow_oop_shift();
320 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
321 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
322 uint64_t result = pd >> shift;
323 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
324 assert(decode_heap_oop(result) == v, "reversibility");
325 return (narrowOop)result;
326 }
327
328 narrowOop oopDesc::encode_heap_oop(oop v) {
329 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
330 }
331
332 // Load and decode an oop out of the Java heap into a wide oop.
333 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
334 return decode_heap_oop_not_null(*p);
335 }
336
337 // Load and decode an oop out of the heap accepting null
338 oop oopDesc::load_decode_heap_oop(narrowOop* p) {
339 return decode_heap_oop(*p);
340 }
341
342 // Encode and store a heap oop.
343 void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
344 *p = encode_heap_oop_not_null(v);
345 }
346
347 // Encode and store a heap oop allowing for null.
348 void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
499
500 jdouble oopDesc::double_field_acquire(int offset) const { return OrderAccess::load_acquire(double_field_addr(offset)); }
501 void oopDesc::release_double_field_put(int offset, jdouble contents) { OrderAccess::release_store(double_field_addr(offset), contents); }
502
503 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
504 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
505
506 bool oopDesc::is_locked() const {
507 return mark()->is_locked();
508 }
509
510 bool oopDesc::is_unlocked() const {
511 return mark()->is_unlocked();
512 }
513
514 bool oopDesc::has_bias_pattern() const {
515 return mark()->has_bias_pattern();
516 }
517
518 // used only for asserts
519 bool oopDesc::is_oop(bool ignore_mark_word) const {
520 oop obj = (oop) this;
521 if (!check_obj_alignment(obj)) return false;
522 if (!Universe::heap()->is_in_reserved(obj)) return false;
523 // obj is aligned and accessible in heap
524 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
525
526 // Header verification: the mark is typically non-NULL. If we're
527 // at a safepoint, it must not be null.
528 // Outside of a safepoint, the header could be changing (for example,
529 // another thread could be inflating a lock on this object).
530 if (ignore_mark_word) {
531 return true;
532 }
533 if (mark() != NULL) {
534 return true;
535 }
536 return !SafepointSynchronize::is_at_safepoint();
537 }
538
539
540 // used only for asserts
541 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
542 return this == NULL ? true : is_oop(ignore_mark_word);
543 }
544
545 #ifndef PRODUCT
546 // used only for asserts
547 bool oopDesc::is_unlocked_oop() const {
548 if (!Universe::heap()->is_in_reserved(this)) return false;
549 return mark()->is_unlocked();
550 }
551 #endif // PRODUCT
552
553 // Used only for markSweep, scavenging
554 bool oopDesc::is_gc_marked() const {
555 return mark()->is_marked();
556 }
557
558 bool oopDesc::is_scavengable() const {
559 return Universe::heap()->is_scavengable(this);
560 }
561
603 if (curMark == oldMark) {
604 return NULL;
605 }
606 // If the CAS was unsuccessful then curMark->is_marked()
607 // should return true as another thread has CAS'd in another
608 // forwarding pointer.
609 oldMark = curMark;
610 }
611 return forwardee();
612 }
613 #endif
614
615 // Note that the forwardee is not the same thing as the displaced_mark.
616 // The forwardee is used when copying during scavenge and mark-sweep.
617 // It does need to clear the low two locking- and GC-related bits.
618 oop oopDesc::forwardee() const {
619 return (oop) mark()->decode_pointer();
620 }
621
622 // The following method needs to be MT safe.
623 uint oopDesc::age() const {
624 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
625 if (has_displaced_mark()) {
626 return displaced_mark()->age();
627 } else {
628 return mark()->age();
629 }
630 }
631
632 void oopDesc::incr_age() {
633 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
634 if (has_displaced_mark()) {
635 set_displaced_mark(displaced_mark()->incr_age());
636 } else {
637 set_mark(mark()->incr_age());
638 }
639 }
640
641 int oopDesc::ms_adjust_pointers() {
642 debug_only(int check_size = size());
643 int s = klass()->oop_ms_adjust_pointers(this);
|