257 // of round_to to size_t to guarantee unsigned division == right shift.
258 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
259 HeapWordSize);
260
261 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
262 // of an "old copy" of an object array in the young gen so it indicates
263 // the grey portion of an already copied array. This will cause the first
264 // disjunct below to fail if the two comparands are computed across such
265 // a concurrent change.
266 // ParNew also runs with promotion labs (which look like int
267 // filler arrays) which are subject to changing their declared size
268 // when finally retiring a PLAB; this also can cause the first disjunct
269 // to fail for another worker thread that is concurrently walking the block
270 // offset table. Both these invariant failures are benign for their
271 // current uses; we relax the assertion checking to cover these two cases below:
272 // is_objArray() && is_forwarded() // covers first scenario above
273 // || is_typeArray() // covers second scenario above
274 // If and when UseParallelGC uses the same obj array oop stealing/chunking
275 // technique, we will need to suitably modify the assertion.
276 assert((s == klass->oop_size(this)) ||
277 (Universe::heap()->is_gc_active() &&
278 ((is_typeArray() && UseConcMarkSweepGC) ||
279 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
280 "wrong array object size");
281 } else {
282 // Must be zero, so bite the bullet and take the virtual call.
283 s = klass->oop_size(this);
284 }
285 }
286
287 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
288 assert(s > 0, "Oop size must be greater than zero, not %d", s);
289 return s;
290 }
291
292 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
293 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
294 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
295 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
296
297 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
323 inline bool check_obj_alignment(oop obj) {
324 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
325 }
326
327 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
328 assert(!is_null(v), "narrow oop value can never be zero");
329 address base = Universe::narrow_oop_base();
330 int shift = Universe::narrow_oop_shift();
331 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
332 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
333 return result;
334 }
335
336 oop oopDesc::decode_heap_oop(narrowOop v) {
337 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
338 }
339
340 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
341 assert(!is_null(v), "oop value can never be zero");
342 assert(check_obj_alignment(v), "Address not aligned");
343 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
344 address base = Universe::narrow_oop_base();
345 int shift = Universe::narrow_oop_shift();
346 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
347 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
348 uint64_t result = pd >> shift;
349 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
350 assert(decode_heap_oop(result) == v, "reversibility");
351 return (narrowOop)result;
352 }
353
354 narrowOop oopDesc::encode_heap_oop(oop v) {
355 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
356 }
357
358 // Load and decode an oop out of the Java heap into a wide oop.
359 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
360 return decode_heap_oop_not_null(*p);
361 }
362
363 // Load and decode an oop out of the heap accepting null
528
529 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
530 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
531
532 bool oopDesc::is_locked() const {
533 return mark()->is_locked();
534 }
535
536 bool oopDesc::is_unlocked() const {
537 return mark()->is_unlocked();
538 }
539
540 bool oopDesc::has_bias_pattern() const {
541 return mark()->has_bias_pattern();
542 }
543
544 // used only for asserts
545 bool oopDesc::is_oop(bool ignore_mark_word) const {
546 oop obj = (oop) this;
547 if (!check_obj_alignment(obj)) return false;
548 if (!Universe::heap()->is_in_reserved(obj)) return false;
549 // obj is aligned and accessible in heap
550 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
551
552 // Header verification: the mark is typically non-NULL. If we're
553 // at a safepoint, it must not be null.
554 // Outside of a safepoint, the header could be changing (for example,
555 // another thread could be inflating a lock on this object).
556 if (ignore_mark_word) {
557 return true;
558 }
559 if (mark() != NULL) {
560 return true;
561 }
562 return !SafepointSynchronize::is_at_safepoint();
563 }
564
565
566 // used only for asserts
567 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
568 return this == NULL ? true : is_oop(ignore_mark_word);
569 }
570
571 #ifndef PRODUCT
572 // used only for asserts
573 bool oopDesc::is_unlocked_oop() const {
574 if (!Universe::heap()->is_in_reserved(this)) return false;
575 return mark()->is_unlocked();
576 }
577 #endif // PRODUCT
578
579 // Used only for markSweep, scavenging
580 bool oopDesc::is_gc_marked() const {
581 return mark()->is_marked();
582 }
583
584 bool oopDesc::is_scavengable() const {
585 return Universe::heap()->is_scavengable(this);
586 }
587
588 // Used by scavengers
589 bool oopDesc::is_forwarded() const {
590 // The extra heap check is needed since the obj might be locked, in which case the
591 // mark would point to a stack location and have the sentinel bit cleared
592 return mark()->is_marked();
593 }
594
595 // Used by scavengers
596 void oopDesc::forward_to(oop p) {
597 assert(check_obj_alignment(p),
598 "forwarding to something not aligned");
599 assert(Universe::heap()->is_in_reserved(p),
600 "forwarding to something not in heap");
601 markOop m = markOopDesc::encode_pointer_as_mark(p);
602 assert(m->decode_pointer() == p, "encoding must be reversable");
603 set_mark(m);
604 }
605
606 // Used by parallel scavengers
607 bool oopDesc::cas_forward_to(oop p, markOop compare) {
608 assert(check_obj_alignment(p),
609 "forwarding to something not aligned");
610 assert(Universe::heap()->is_in_reserved(p),
611 "forwarding to something not in heap");
612 markOop m = markOopDesc::encode_pointer_as_mark(p);
613 assert(m->decode_pointer() == p, "encoding must be reversable");
614 return cas_set_mark(m, compare) == compare;
615 }
616
617 #if INCLUDE_ALL_GCS
618 oop oopDesc::forward_to_atomic(oop p) {
619 markOop oldMark = mark();
620 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
621 markOop curMark;
622
623 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
624 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
625
626 while (!oldMark->is_marked()) {
627 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
628 assert(is_forwarded(), "object should have been forwarded");
629 if (curMark == oldMark) {
630 return NULL;
|
257 // of round_to to size_t to guarantee unsigned division == right shift.
258 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
259 HeapWordSize);
260
261 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
262 // of an "old copy" of an object array in the young gen so it indicates
263 // the grey portion of an already copied array. This will cause the first
264 // disjunct below to fail if the two comparands are computed across such
265 // a concurrent change.
266 // ParNew also runs with promotion labs (which look like int
267 // filler arrays) which are subject to changing their declared size
268 // when finally retiring a PLAB; this also can cause the first disjunct
269 // to fail for another worker thread that is concurrently walking the block
270 // offset table. Both these invariant failures are benign for their
271 // current uses; we relax the assertion checking to cover these two cases below:
272 // is_objArray() && is_forwarded() // covers first scenario above
273 // || is_typeArray() // covers second scenario above
274 // If and when UseParallelGC uses the same obj array oop stealing/chunking
275 // technique, we will need to suitably modify the assertion.
276 assert((s == klass->oop_size(this)) ||
277 (GC::gc()->heap()->is_gc_active() &&
278 ((is_typeArray() && UseConcMarkSweepGC) ||
279 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
280 "wrong array object size");
281 } else {
282 // Must be zero, so bite the bullet and take the virtual call.
283 s = klass->oop_size(this);
284 }
285 }
286
287 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
288 assert(s > 0, "Oop size must be greater than zero, not %d", s);
289 return s;
290 }
291
292 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
293 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
294 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
295 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
296
297 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
323 inline bool check_obj_alignment(oop obj) {
324 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
325 }
326
327 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
328 assert(!is_null(v), "narrow oop value can never be zero");
329 address base = Universe::narrow_oop_base();
330 int shift = Universe::narrow_oop_shift();
331 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
332 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
333 return result;
334 }
335
336 oop oopDesc::decode_heap_oop(narrowOop v) {
337 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
338 }
339
340 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
341 assert(!is_null(v), "oop value can never be zero");
342 assert(check_obj_alignment(v), "Address not aligned");
343 assert(GC::gc()->heap()->is_in_reserved(v), "Address not in heap");
344 address base = Universe::narrow_oop_base();
345 int shift = Universe::narrow_oop_shift();
346 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
347 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
348 uint64_t result = pd >> shift;
349 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
350 assert(decode_heap_oop(result) == v, "reversibility");
351 return (narrowOop)result;
352 }
353
354 narrowOop oopDesc::encode_heap_oop(oop v) {
355 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
356 }
357
358 // Load and decode an oop out of the Java heap into a wide oop.
359 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
360 return decode_heap_oop_not_null(*p);
361 }
362
363 // Load and decode an oop out of the heap accepting null
528
529 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
530 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
531
532 bool oopDesc::is_locked() const {
533 return mark()->is_locked();
534 }
535
536 bool oopDesc::is_unlocked() const {
537 return mark()->is_unlocked();
538 }
539
540 bool oopDesc::has_bias_pattern() const {
541 return mark()->has_bias_pattern();
542 }
543
544 // used only for asserts
545 bool oopDesc::is_oop(bool ignore_mark_word) const {
546 oop obj = (oop) this;
547 if (!check_obj_alignment(obj)) return false;
548 if (!GC::gc()->heap()->is_in_reserved(obj)) return false;
549 // obj is aligned and accessible in heap
550 if (GC::gc()->heap()->is_in_reserved(obj->klass_or_null())) return false;
551
552 // Header verification: the mark is typically non-NULL. If we're
553 // at a safepoint, it must not be null.
554 // Outside of a safepoint, the header could be changing (for example,
555 // another thread could be inflating a lock on this object).
556 if (ignore_mark_word) {
557 return true;
558 }
559 if (mark() != NULL) {
560 return true;
561 }
562 return !SafepointSynchronize::is_at_safepoint();
563 }
564
565
566 // used only for asserts
567 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
568 return this == NULL ? true : is_oop(ignore_mark_word);
569 }
570
571 #ifndef PRODUCT
572 // used only for asserts
573 bool oopDesc::is_unlocked_oop() const {
574 if (!GC::gc()->heap()->is_in_reserved(this)) return false;
575 return mark()->is_unlocked();
576 }
577 #endif // PRODUCT
578
579 // Used only for markSweep, scavenging
580 bool oopDesc::is_gc_marked() const {
581 return mark()->is_marked();
582 }
583
584 bool oopDesc::is_scavengable() const {
585 return GC::gc()->heap()->is_scavengable(this);
586 }
587
588 // Used by scavengers
589 bool oopDesc::is_forwarded() const {
590 // The extra heap check is needed since the obj might be locked, in which case the
591 // mark would point to a stack location and have the sentinel bit cleared
592 return mark()->is_marked();
593 }
594
595 // Used by scavengers
596 void oopDesc::forward_to(oop p) {
597 assert(check_obj_alignment(p),
598 "forwarding to something not aligned");
599 assert(GC::gc()->heap()->is_in_reserved(p),
600 "forwarding to something not in heap");
601 markOop m = markOopDesc::encode_pointer_as_mark(p);
602 assert(m->decode_pointer() == p, "encoding must be reversable");
603 set_mark(m);
604 }
605
606 // Used by parallel scavengers
607 bool oopDesc::cas_forward_to(oop p, markOop compare) {
608 assert(check_obj_alignment(p),
609 "forwarding to something not aligned");
610 assert(GC::gc()->heap()->is_in_reserved(p),
611 "forwarding to something not in heap");
612 markOop m = markOopDesc::encode_pointer_as_mark(p);
613 assert(m->decode_pointer() == p, "encoding must be reversable");
614 return cas_set_mark(m, compare) == compare;
615 }
616
617 #if INCLUDE_ALL_GCS
618 oop oopDesc::forward_to_atomic(oop p) {
619 markOop oldMark = mark();
620 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
621 markOop curMark;
622
623 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
624 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
625
626 while (!oldMark->is_marked()) {
627 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
628 assert(is_forwarded(), "object should have been forwarded");
629 if (curMark == oldMark) {
630 return NULL;
|