34 #include "oops/arrayKlass.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "oops/markOop.inline.hpp"
38 #include "oops/oop.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/orderAccess.inline.hpp"
41 #include "runtime/os.hpp"
42 #include "utilities/macros.hpp"
43
44 inline void update_barrier_set(void* p, oop v, bool release = false) {
45 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
46 oopDesc::bs()->write_ref_field(p, v, release);
47 }
48
49 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
50 oopDesc::bs()->write_ref_field_pre(p, v);
51 }
52
53 template <class T> void oop_store(T* p, oop v) {
54 if (always_do_update_barrier) {
55 oop_store((volatile T*)p, v);
56 } else {
57 update_barrier_set_pre(p, v);
58 oopDesc::encode_store_heap_oop(p, v);
59 // always_do_update_barrier == false =>
60 // Either we are at a safepoint (in GC) or CMS is not used. In both
61 // cases it's unnecessary to mark the card as dirty with release sematics.
62 update_barrier_set((void*)p, v, false /* release */); // cast away type
63 }
64 }
65
66 template <class T> void oop_store(volatile T* p, oop v) {
67 update_barrier_set_pre((T*)p, v); // cast away volatile
68 // Used by release_obj_field_put, so use release_store_ptr.
69 oopDesc::release_encode_store_heap_oop(p, v);
70 // When using CMS we must mark the card corresponding to p as dirty
71 // with release sematics to prevent that CMS sees the dirty card but
72 // not the new value v at p due to reordering of the two
73 // stores. Note that CMS has a concurrent precleaning phase, where
74 // it reads the card table while the Java threads are running.
75 update_barrier_set((void*)p, v, true /* release */); // cast away type
76 }
77
78 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
79 // (without having to remember the function name this calls).
80 inline void oop_store_raw(HeapWord* addr, oop value) {
81 if (UseCompressedOops) {
82 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
83 } else {
84 oopDesc::encode_store_heap_oop((oop*)addr, value);
85 }
86 }
87
88 // Implementation of all inlined member functions defined in oop.hpp
89 // We need a separate file to avoid circular references
90
91 void oopDesc::release_set_mark(markOop m) {
92 OrderAccess::release_store_ptr(&_mark, m);
93 }
94
95 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
257 // of round_to to size_t to guarantee unsigned division == right shift.
258 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
259 HeapWordSize);
260
261 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
262 // of an "old copy" of an object array in the young gen so it indicates
263 // the grey portion of an already copied array. This will cause the first
264 // disjunct below to fail if the two comparands are computed across such
265 // a concurrent change.
266 // ParNew also runs with promotion labs (which look like int
267 // filler arrays) which are subject to changing their declared size
268 // when finally retiring a PLAB; this also can cause the first disjunct
269 // to fail for another worker thread that is concurrently walking the block
270 // offset table. Both these invariant failures are benign for their
271 // current uses; we relax the assertion checking to cover these two cases below:
272 // is_objArray() && is_forwarded() // covers first scenario above
273 // || is_typeArray() // covers second scenario above
274 // If and when UseParallelGC uses the same obj array oop stealing/chunking
275 // technique, we will need to suitably modify the assertion.
276 assert((s == klass->oop_size(this)) ||
277 (Universe::heap()->is_gc_active() &&
278 ((is_typeArray() && UseConcMarkSweepGC) ||
279 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
280 "wrong array object size");
281 } else {
282 // Must be zero, so bite the bullet and take the virtual call.
283 s = klass->oop_size(this);
284 }
285 }
286
287 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
288 assert(s > 0, "Oop size must be greater than zero, not %d", s);
289 return s;
290 }
291
292 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
293 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
294 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
295 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
296
297 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
323 inline bool check_obj_alignment(oop obj) {
324 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
325 }
326
327 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
328 assert(!is_null(v), "narrow oop value can never be zero");
329 address base = Universe::narrow_oop_base();
330 int shift = Universe::narrow_oop_shift();
331 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
332 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
333 return result;
334 }
335
336 oop oopDesc::decode_heap_oop(narrowOop v) {
337 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
338 }
339
340 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
341 assert(!is_null(v), "oop value can never be zero");
342 assert(check_obj_alignment(v), "Address not aligned");
343 assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
344 address base = Universe::narrow_oop_base();
345 int shift = Universe::narrow_oop_shift();
346 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
347 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
348 uint64_t result = pd >> shift;
349 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
350 assert(decode_heap_oop(result) == v, "reversibility");
351 return (narrowOop)result;
352 }
353
354 narrowOop oopDesc::encode_heap_oop(oop v) {
355 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
356 }
357
358 // Load and decode an oop out of the Java heap into a wide oop.
359 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
360 return decode_heap_oop_not_null(*p);
361 }
362
363 // Load and decode an oop out of the heap accepting null
528
529 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
530 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
531
532 bool oopDesc::is_locked() const {
533 return mark()->is_locked();
534 }
535
536 bool oopDesc::is_unlocked() const {
537 return mark()->is_unlocked();
538 }
539
540 bool oopDesc::has_bias_pattern() const {
541 return mark()->has_bias_pattern();
542 }
543
544 // used only for asserts
545 bool oopDesc::is_oop(bool ignore_mark_word) const {
546 oop obj = (oop) this;
547 if (!check_obj_alignment(obj)) return false;
548 if (!Universe::heap()->is_in_reserved(obj)) return false;
549 // obj is aligned and accessible in heap
550 if (Universe::heap()->is_in_reserved(obj->klass_or_null())) return false;
551
552 // Header verification: the mark is typically non-NULL. If we're
553 // at a safepoint, it must not be null.
554 // Outside of a safepoint, the header could be changing (for example,
555 // another thread could be inflating a lock on this object).
556 if (ignore_mark_word) {
557 return true;
558 }
559 if (mark() != NULL) {
560 return true;
561 }
562 return !SafepointSynchronize::is_at_safepoint();
563 }
564
565
566 // used only for asserts
567 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
568 return this == NULL ? true : is_oop(ignore_mark_word);
569 }
570
571 #ifndef PRODUCT
572 // used only for asserts
573 bool oopDesc::is_unlocked_oop() const {
574 if (!Universe::heap()->is_in_reserved(this)) return false;
575 return mark()->is_unlocked();
576 }
577 #endif // PRODUCT
578
579 // Used only for markSweep, scavenging
580 bool oopDesc::is_gc_marked() const {
581 return mark()->is_marked();
582 }
583
584 bool oopDesc::is_scavengable() const {
585 return Universe::heap()->is_scavengable(this);
586 }
587
588 // Used by scavengers
589 bool oopDesc::is_forwarded() const {
590 // The extra heap check is needed since the obj might be locked, in which case the
591 // mark would point to a stack location and have the sentinel bit cleared
592 return mark()->is_marked();
593 }
594
595 // Used by scavengers
596 void oopDesc::forward_to(oop p) {
597 assert(check_obj_alignment(p),
598 "forwarding to something not aligned");
599 assert(Universe::heap()->is_in_reserved(p),
600 "forwarding to something not in heap");
601 markOop m = markOopDesc::encode_pointer_as_mark(p);
602 assert(m->decode_pointer() == p, "encoding must be reversable");
603 set_mark(m);
604 }
605
606 // Used by parallel scavengers
607 bool oopDesc::cas_forward_to(oop p, markOop compare) {
608 assert(check_obj_alignment(p),
609 "forwarding to something not aligned");
610 assert(Universe::heap()->is_in_reserved(p),
611 "forwarding to something not in heap");
612 markOop m = markOopDesc::encode_pointer_as_mark(p);
613 assert(m->decode_pointer() == p, "encoding must be reversable");
614 return cas_set_mark(m, compare) == compare;
615 }
616
617 #if INCLUDE_ALL_GCS
618 oop oopDesc::forward_to_atomic(oop p) {
619 markOop oldMark = mark();
620 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
621 markOop curMark;
622
623 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
624 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
625
626 while (!oldMark->is_marked()) {
627 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
628 assert(is_forwarded(), "object should have been forwarded");
629 if (curMark == oldMark) {
630 return NULL;
|
34 #include "oops/arrayKlass.hpp"
35 #include "oops/arrayOop.hpp"
36 #include "oops/klass.inline.hpp"
37 #include "oops/markOop.inline.hpp"
38 #include "oops/oop.hpp"
39 #include "runtime/atomic.hpp"
40 #include "runtime/orderAccess.inline.hpp"
41 #include "runtime/os.hpp"
42 #include "utilities/macros.hpp"
43
44 inline void update_barrier_set(void* p, oop v, bool release = false) {
45 assert(oopDesc::bs() != NULL, "Uninitialized bs in oop!");
46 oopDesc::bs()->write_ref_field(p, v, release);
47 }
48
49 template <class T> inline void update_barrier_set_pre(T* p, oop v) {
50 oopDesc::bs()->write_ref_field_pre(p, v);
51 }
52
53 template <class T> void oop_store(T* p, oop v) {
54 oopDesc::bs()->oop_store(p, v);
55 }
56
57 template <class T> void oop_store(volatile T* p, oop v) {
58 oopDesc::bs()->oop_store(p, v);
59 }
60
61 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
62 // (without having to remember the function name this calls).
63 inline void oop_store_raw(HeapWord* addr, oop value) {
64 if (UseCompressedOops) {
65 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
66 } else {
67 oopDesc::encode_store_heap_oop((oop*)addr, value);
68 }
69 }
70
71 // Implementation of all inlined member functions defined in oop.hpp
72 // We need a separate file to avoid circular references
73
74 void oopDesc::release_set_mark(markOop m) {
75 OrderAccess::release_store_ptr(&_mark, m);
76 }
77
78 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
240 // of round_to to size_t to guarantee unsigned division == right shift.
241 s = (int)((size_t)round_to(size_in_bytes, MinObjAlignmentInBytes) /
242 HeapWordSize);
243
244 // ParNew (used by CMS), UseParallelGC and UseG1GC can change the length field
245 // of an "old copy" of an object array in the young gen so it indicates
246 // the grey portion of an already copied array. This will cause the first
247 // disjunct below to fail if the two comparands are computed across such
248 // a concurrent change.
249 // ParNew also runs with promotion labs (which look like int
250 // filler arrays) which are subject to changing their declared size
251 // when finally retiring a PLAB; this also can cause the first disjunct
252 // to fail for another worker thread that is concurrently walking the block
253 // offset table. Both these invariant failures are benign for their
254 // current uses; we relax the assertion checking to cover these two cases below:
255 // is_objArray() && is_forwarded() // covers first scenario above
256 // || is_typeArray() // covers second scenario above
257 // If and when UseParallelGC uses the same obj array oop stealing/chunking
258 // technique, we will need to suitably modify the assertion.
259 assert((s == klass->oop_size(this)) ||
260 (GC::gc()->heap()->is_gc_active() &&
261 ((is_typeArray() && UseConcMarkSweepGC) ||
262 (is_objArray() && is_forwarded() && (UseConcMarkSweepGC || UseParallelGC || UseG1GC)))),
263 "wrong array object size");
264 } else {
265 // Must be zero, so bite the bullet and take the virtual call.
266 s = klass->oop_size(this);
267 }
268 }
269
270 assert(s % MinObjAlignment == 0, "Oop size is not properly aligned: %d", s);
271 assert(s > 0, "Oop size must be greater than zero, not %d", s);
272 return s;
273 }
274
275 bool oopDesc::is_instance() const { return klass()->is_instance_klass(); }
276 bool oopDesc::is_array() const { return klass()->is_array_klass(); }
277 bool oopDesc::is_objArray() const { return klass()->is_objArray_klass(); }
278 bool oopDesc::is_typeArray() const { return klass()->is_typeArray_klass(); }
279
280 void* oopDesc::field_base(int offset) const { return (void*)&((char*)this)[offset]; }
306 inline bool check_obj_alignment(oop obj) {
307 return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
308 }
309
310 oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
311 assert(!is_null(v), "narrow oop value can never be zero");
312 address base = Universe::narrow_oop_base();
313 int shift = Universe::narrow_oop_shift();
314 oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
315 assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
316 return result;
317 }
318
319 oop oopDesc::decode_heap_oop(narrowOop v) {
320 return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
321 }
322
323 narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
324 assert(!is_null(v), "oop value can never be zero");
325 assert(check_obj_alignment(v), "Address not aligned");
326 assert(GC::gc()->heap()->is_in_reserved(v), "Address not in heap");
327 address base = Universe::narrow_oop_base();
328 int shift = Universe::narrow_oop_shift();
329 uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
330 assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
331 uint64_t result = pd >> shift;
332 assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
333 assert(decode_heap_oop(result) == v, "reversibility");
334 return (narrowOop)result;
335 }
336
337 narrowOop oopDesc::encode_heap_oop(oop v) {
338 return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
339 }
340
341 // Load and decode an oop out of the Java heap into a wide oop.
342 oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
343 return decode_heap_oop_not_null(*p);
344 }
345
346 // Load and decode an oop out of the heap accepting null
511
512 address oopDesc::address_field_acquire(int offset) const { return (address) OrderAccess::load_ptr_acquire(address_field_addr(offset)); }
513 void oopDesc::release_address_field_put(int offset, address contents) { OrderAccess::release_store_ptr(address_field_addr(offset), contents); }
514
515 bool oopDesc::is_locked() const {
516 return mark()->is_locked();
517 }
518
519 bool oopDesc::is_unlocked() const {
520 return mark()->is_unlocked();
521 }
522
523 bool oopDesc::has_bias_pattern() const {
524 return mark()->has_bias_pattern();
525 }
526
527 // used only for asserts
528 bool oopDesc::is_oop(bool ignore_mark_word) const {
529 oop obj = (oop) this;
530 if (!check_obj_alignment(obj)) return false;
531 if (!GC::gc()->heap()->is_in_reserved(obj)) return false;
532 // obj is aligned and accessible in heap
533 if (GC::gc()->heap()->is_in_reserved(obj->klass_or_null())) return false;
534
535 // Header verification: the mark is typically non-NULL. If we're
536 // at a safepoint, it must not be null.
537 // Outside of a safepoint, the header could be changing (for example,
538 // another thread could be inflating a lock on this object).
539 if (ignore_mark_word) {
540 return true;
541 }
542 if (mark() != NULL) {
543 return true;
544 }
545 return !SafepointSynchronize::is_at_safepoint();
546 }
547
548
549 // used only for asserts
550 bool oopDesc::is_oop_or_null(bool ignore_mark_word) const {
551 return this == NULL ? true : is_oop(ignore_mark_word);
552 }
553
554 #ifndef PRODUCT
555 // used only for asserts
556 bool oopDesc::is_unlocked_oop() const {
557 if (!GC::gc()->heap()->is_in_reserved(this)) return false;
558 return mark()->is_unlocked();
559 }
560 #endif // PRODUCT
561
562 // Used only for markSweep, scavenging
563 bool oopDesc::is_gc_marked() const {
564 return mark()->is_marked();
565 }
566
567 bool oopDesc::is_scavengable() const {
568 return GC::gc()->heap()->is_scavengable(this);
569 }
570
571 // Used by scavengers
572 bool oopDesc::is_forwarded() const {
573 // The extra heap check is needed since the obj might be locked, in which case the
574 // mark would point to a stack location and have the sentinel bit cleared
575 return mark()->is_marked();
576 }
577
578 // Used by scavengers
579 void oopDesc::forward_to(oop p) {
580 assert(check_obj_alignment(p),
581 "forwarding to something not aligned");
582 assert(GC::gc()->heap()->is_in_reserved(p),
583 "forwarding to something not in heap");
584 markOop m = markOopDesc::encode_pointer_as_mark(p);
585 assert(m->decode_pointer() == p, "encoding must be reversable");
586 set_mark(m);
587 }
588
589 // Used by parallel scavengers
590 bool oopDesc::cas_forward_to(oop p, markOop compare) {
591 assert(check_obj_alignment(p),
592 "forwarding to something not aligned");
593 assert(GC::gc()->heap()->is_in_reserved(p),
594 "forwarding to something not in heap");
595 markOop m = markOopDesc::encode_pointer_as_mark(p);
596 assert(m->decode_pointer() == p, "encoding must be reversable");
597 return cas_set_mark(m, compare) == compare;
598 }
599
600 #if INCLUDE_ALL_GCS
601 oop oopDesc::forward_to_atomic(oop p) {
602 markOop oldMark = mark();
603 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
604 markOop curMark;
605
606 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
607 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
608
609 while (!oldMark->is_marked()) {
610 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
611 assert(is_forwarded(), "object should have been forwarded");
612 if (curMark == oldMark) {
613 return NULL;
|