231 // of round_to to size_t to guarantee unsigned division == right shift.
232 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
233 HeapWordSize);
234
235 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
236 // of an "old copy" of an object array in the young gen so it indicates
237 // the grey portion of an already copied array. This will cause the first
238 // disjunct below to fail if the two comparands are computed across such
239 // a concurrent change.
240 // ParNew also runs with promotion labs (which look like int
241 // filler arrays) which are subject to changing their declared size
242 // when finally retiring a PLAB; this also can cause the first disjunct
243 // to fail for another worker thread that is concurrently walking the block
244 // offset table. Both these invariant failures are benign for their
245 // current uses; we relax the assertion checking to cover these two cases below:
246 // is_objArray() && is_forwarded() // covers first scenario above
247 // || is_typeArray() // covers second scenario above
248 // If and when UseParallelGC uses the same obj array oop stealing/chunking
249 // technique, we will need to suitably modify the assertion.
250 assert((s == klass->oop_size(this)) ||
251 (Universe::heap()->is_gc_active() &&
252 ((is_typeArray() && UseConcMarkSweepGC) ||
253 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
254 "wrong array object size");
255 } else {
256 // Must be zero, so bite the bullet and take the virtual call.
257 s = klass->oop_size(this);
258 }
259 }
260
261 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
262 assert(s > 0, "Oop size must be greater than zero, not %d", s);
263 return s;
264 }
265
266 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
267 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
268 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
270
271 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
297 inline bool check_obj_alignment(oop obj) {
298 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
299 }
300
301 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
302 assert(!is_null(v), "narrow oop value can never be zero");
303 address base = Universe::narrow_oop_base();
304 int shift = Universe::narrow_oop_shift();
305 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
306 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
307 return result;
308 }
309
310 oop oopDesc::decode_heap_oop(narrowOop v) {
311 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
312 }
313
314 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
315 assert(!is_null(v), "oop value can never be zero");
316 assert(check_obj_alignment(v), "Address not aligned");
317 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
318 address base = Universe::narrow_oop_base();
319 int shift = Universe::narrow_oop_shift();
320 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
321 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
322 uint64_t result = pd >> shift;
323 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
324 assert(decode_heap_oop(result) == v, "reversibility");
325 return (narrowOop)result;
326 }
327
328 narrowOop oopDesc::encode_heap_oop(oop v) {
329 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
330 }
331
332 // Load and decode an oop out of the Java heap into a wide oop.
333 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
334 return decode_heap_oop_not_null(*p);
335 }
336
337 // Load and decode an oop out of the heap accepting null
502
503 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
504 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
505
506 bool oopDesc::is_locked() const {
507 return mark()->is_locked();
508 }
509
510 bool oopDesc::is_unlocked() const {
511 return mark()->is_unlocked();
512 }
513
514 bool oopDesc::has_bias_pattern() const {
515 return mark()->has_bias_pattern();
516 }
517
518 // used only for asserts
519 bool oopDesc::is_oop(bool ignore_mark_word) const {
520 oop obj = (oop) this;
521 if (!check_obj_alignment(obj)) return false;
522 if (!Universe::heap()->is_in_reserved(obj)) return false;
523 // obj is aligned and accessible in heap
524 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
525
526 // Header verification: the mark is typically non-NULL. If we're
527 // at a safepoint, it must not be null.
528 // Outside of a safepoint, the header could be changing (for example,
529 // another thread could be inflating a lock on this object).
530 if (ignore_mark_word) {
531 return true;
532 }
533 if (mark() != NULL) {
534 return true;
535 }
536 return !SafepointSynchronize::is_at_safepoint();
537 }
538
539
540 // used only for asserts
541 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
542 return this == NULL ? true : is_oop(ignore_mark_word);
543 }
544
545 #ifndef PRODUCT
546 // used only for asserts
547 bool oopDesc::is_unlocked_oop() const {
548 if (!Universe::heap()->is_in_reserved(this)) return false;
549 return mark()->is_unlocked();
550 }
551 #endif // PRODUCT
552
553 // Used only for markSweep, scavenging
554 bool oopDesc::is_gc_marked() const {
555 return mark()->is_marked();
556 }
557
558 bool oopDesc::is_scavengable() const {
559 return Universe::heap()->is_scavengable(this);
560 }
561
562 // Used by scavengers
563 bool oopDesc::is_forwarded() const {
564 // The extra heap check is needed since the obj might be locked, in which case the
565 // mark would point to a stack location and have the sentinel bit cleared
566 return mark()->is_marked();
567 }
568
569 // Used by scavengers
570 void oopDesc::forward_to(oop p) {
571 assert(check_obj_alignment(p),
572 "forwarding to something not aligned");
573 assert(Universe::heap()->is_in_reserved(p),
574 "forwarding to something not in heap");
575 markOop m = markOopDesc::encode_pointer_as_mark(p);
576 assert(m->decode_pointer() == p, "encoding must be reversable");
577 set_mark(m);
578 }
579
580 // Used by parallel scavengers
581 bool oopDesc::cas_forward_to(oop p, markOop compare) {
582 assert(check_obj_alignment(p),
583 "forwarding to something not aligned");
584 assert(Universe::heap()->is_in_reserved(p),
585 "forwarding to something not in heap");
586 markOop m = markOopDesc::encode_pointer_as_mark(p);
587 assert(m->decode_pointer() == p, "encoding must be reversable");
588 return cas_set_mark(m, compare) == compare;
589 }
590
591 #if INCLUDE_ALL_GCS
592 oop oopDesc::forward_to_atomic(oop p) {
593 markOop oldMark = mark();
594 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
595 markOop curMark;
596
597 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
598 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
599
600 while (!oldMark->is_marked()) {
601 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
602 assert(is_forwarded(), "object should have been forwarded");
603 if (curMark == oldMark) {
604 return NULL;
|
231 // of round_to to size_t to guarantee unsigned division == right shift.
232 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
233 HeapWordSize);
234
235 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
236 // of an "old copy" of an object array in the young gen so it indicates
237 // the grey portion of an already copied array. This will cause the first
238 // disjunct below to fail if the two comparands are computed across such
239 // a concurrent change.
240 // ParNew also runs with promotion labs (which look like int
241 // filler arrays) which are subject to changing their declared size
242 // when finally retiring a PLAB; this also can cause the first disjunct
243 // to fail for another worker thread that is concurrently walking the block
244 // offset table. Both these invariant failures are benign for their
245 // current uses; we relax the assertion checking to cover these two cases below:
246 // is_objArray() && is_forwarded() // covers first scenario above
247 // || is_typeArray() // covers second scenario above
248 // If and when UseParallelGC uses the same obj array oop stealing/chunking
249 // technique, we will need to suitably modify the assertion.
250 assert((s == klass->oop_size(this)) ||
251 (GC::gc()->heap()->is_gc_active() &&
252 ((is_typeArray() && UseConcMarkSweepGC) ||
253 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
254 "wrong array object size");
255 } else {
256 // Must be zero, so bite the bullet and take the virtual call.
257 s = klass->oop_size(this);
258 }
259 }
260
261 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
262 assert(s > 0, "Oop size must be greater than zero, not %d", s);
263 return s;
264 }
265
266 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
267 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
268 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
269 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
270
271 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
297 inline bool check_obj_alignment(oop obj) {
298 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
299 }
300
301 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
302 assert(!is_null(v), "narrow oop value can never be zero");
303 address base = Universe::narrow_oop_base();
304 int shift = Universe::narrow_oop_shift();
305 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
306 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
307 return result;
308 }
309
310 oop oopDesc::decode_heap_oop(narrowOop v) {
311 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
312 }
313
314 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
315 assert(!is_null(v), "oop value can never be zero");
316 assert(check_obj_alignment(v), "Address not aligned");
317 assert(GC::gc()->heap()->is_in_reserved(v), "Address not in heap");
318 address base = Universe::narrow_oop_base();
319 int shift = Universe::narrow_oop_shift();
320 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
321 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
322 uint64_t result = pd >> shift;
323 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
324 assert(decode_heap_oop(result) == v, "reversibility");
325 return (narrowOop)result;
326 }
327
328 narrowOop oopDesc::encode_heap_oop(oop v) {
329 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
330 }
331
332 // Load and decode an oop out of the Java heap into a wide oop.
333 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
334 return decode_heap_oop_not_null(*p);
335 }
336
337 // Load and decode an oop out of the heap accepting null
502
503 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
504 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
505
506 bool oopDesc::is_locked() const {
507 return mark()->is_locked();
508 }
509
510 bool oopDesc::is_unlocked() const {
511 return mark()->is_unlocked();
512 }
513
514 bool oopDesc::has_bias_pattern() const {
515 return mark()->has_bias_pattern();
516 }
517
518 // used only for asserts
519 bool oopDesc::is_oop(bool ignore_mark_word) const {
520 oop obj = (oop) this;
521 if (!check_obj_alignment(obj)) return false;
522 if (!GC::gc()->heap()->is_in_reserved(obj)) return false;
523 // obj is aligned and accessible in heap
524 if (GC::gc()->heap()->is_in_reserved(obj->klass_or_null())) return false;
525
526 // Header verification: the mark is typically non-NULL. If we're
527 // at a safepoint, it must not be null.
528 // Outside of a safepoint, the header could be changing (for example,
529 // another thread could be inflating a lock on this object).
530 if (ignore_mark_word) {
531 return true;
532 }
533 if (mark() != NULL) {
534 return true;
535 }
536 return !SafepointSynchronize::is_at_safepoint();
537 }
538
539
540 // used only for asserts
541 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
542 return this == NULL ? true : is_oop(ignore_mark_word);
543 }
544
545 #ifndef PRODUCT
546 // used only for asserts
547 bool oopDesc::is_unlocked_oop() const {
548 if (!GC::gc()->heap()->is_in_reserved(this)) return false;
549 return mark()->is_unlocked();
550 }
551 #endif // PRODUCT
552
553 // Used only for markSweep, scavenging
554 bool oopDesc::is_gc_marked() const {
555 return mark()->is_marked();
556 }
557
558 bool oopDesc::is_scavengable() const {
559 return GC::gc()->heap()->is_scavengable(this);
560 }
561
562 // Used by scavengers
563 bool oopDesc::is_forwarded() const {
564 // The extra heap check is needed since the obj might be locked, in which case the
565 // mark would point to a stack location and have the sentinel bit cleared
566 return mark()->is_marked();
567 }
568
569 // Used by scavengers
570 void oopDesc::forward_to(oop p) {
571 assert(check_obj_alignment(p),
572 "forwarding to something not aligned");
573 assert(GC::gc()->heap()->is_in_reserved(p),
574 "forwarding to something not in heap");
575 markOop m = markOopDesc::encode_pointer_as_mark(p);
576 assert(m->decode_pointer() == p, "encoding must be reversable");
577 set_mark(m);
578 }
579
580 // Used by parallel scavengers
581 bool oopDesc::cas_forward_to(oop p, markOop compare) {
582 assert(check_obj_alignment(p),
583 "forwarding to something not aligned");
584 assert(GC::gc()->heap()->is_in_reserved(p),
585 "forwarding to something not in heap");
586 markOop m = markOopDesc::encode_pointer_as_mark(p);
587 assert(m->decode_pointer() == p, "encoding must be reversable");
588 return cas_set_mark(m, compare) == compare;
589 }
590
591 #if INCLUDE_ALL_GCS
592 oop oopDesc::forward_to_atomic(oop p) {
593 markOop oldMark = mark();
594 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
595 markOop curMark;
596
597 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
598 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
599
600 while (!oldMark->is_marked()) {
601 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
602 assert(is_forwarded(), "object should have been forwarded");
603 if (curMark == oldMark) {
604 return NULL;
|