77 }
78
79 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
80 // (without having to remember the function name this calls).
81 inline void oop_store_raw(HeapWord* addr, oop value) {
82 if (UseCompressedOops) {
83 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
84 } else {
85 oopDesc::encode_store_heap_oop((oop*)addr, value);
86 }
87 }
88
89 // Implementation of all inlined member functions defined in oop.hpp
90 // We need a separate file to avoid circular references
91
92 void oopDesc::release_set_mark(markOop m) {
93 OrderAccess::release_store_ptr(&_mark, m);
94 }
95
96 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
97 return (markOop) Atomic::cmpxchg_ptr(new_mark, &_mark, old_mark);
98 }
99
100 void oopDesc::init_mark() {
101 set_mark(markOopDesc::prototype_for_object(this));
102 }
103
104 Klass* oopDesc::klass() const {
105 if (UseCompressedClassPointers) {
106 return Klass::decode_klass_not_null(_metadata._compressed_klass);
107 } else {
108 return _metadata._klass;
109 }
110 }
111
112 Klass* oopDesc::klass_or_null() const volatile {
113 if (UseCompressedClassPointers) {
114 return Klass::decode_klass(_metadata._compressed_klass);
115 } else {
116 return _metadata._klass;
117 }
391 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
392 // decode old from T to oop
393 return decode_heap_oop(old);
394 } else {
395 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
396 }
397 }
398
399 oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
400 volatile HeapWord *dest,
401 oop compare_value,
402 bool prebarrier) {
403 if (UseCompressedOops) {
404 if (prebarrier) {
405 update_barrier_set_pre((narrowOop*)dest, exchange_value);
406 }
407 // encode exchange and compare value from oop to T
408 narrowOop val = encode_heap_oop(exchange_value);
409 narrowOop cmp = encode_heap_oop(compare_value);
410
411 narrowOop old = (narrowOop) Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
412 // decode old from T to oop
413 return decode_heap_oop(old);
414 } else {
415 if (prebarrier) {
416 update_barrier_set_pre((oop*)dest, exchange_value);
417 }
418 return (oop)Atomic::cmpxchg_ptr(exchange_value, (oop*)dest, compare_value);
419 }
420 }
421
422 // In order to put or get a field out of an instance, must first check
423 // if the field has been compressed and uncompress it.
424 oop oopDesc::obj_field(int offset) const {
425 return UseCompressedOops ?
426 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
427 load_decode_heap_oop(obj_field_addr<oop>(offset));
428 }
429
430 void oopDesc::obj_field_put(int offset, oop value) {
431 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
432 oop_store(obj_field_addr<oop>(offset), value);
433 }
434
435 void oopDesc::obj_field_put_raw(int offset, oop value) {
436 UseCompressedOops ?
437 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
438 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
599 bool oopDesc::cas_forward_to(oop p, markOop compare) {
600 assert(check_obj_alignment(p),
601 "forwarding to something not aligned");
602 assert(Universe::heap()->is_in_reserved(p),
603 "forwarding to something not in heap");
604 markOop m = markOopDesc::encode_pointer_as_mark(p);
605 assert(m->decode_pointer() == p, "encoding must be reversable");
606 return cas_set_mark(m, compare) == compare;
607 }
608
609 #if INCLUDE_ALL_GCS
610 oop oopDesc::forward_to_atomic(oop p) {
611 markOop oldMark = mark();
612 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
613 markOop curMark;
614
615 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
616 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
617
618 while (!oldMark->is_marked()) {
619 curMark = (markOop)Atomic::cmpxchg_ptr(forwardPtrMark, &_mark, oldMark);
620 assert(is_forwarded(), "object should have been forwarded");
621 if (curMark == oldMark) {
622 return NULL;
623 }
624 // If the CAS was unsuccessful then curMark->is_marked()
625 // should return true as another thread has CAS'd in another
626 // forwarding pointer.
627 oldMark = curMark;
628 }
629 return forwardee();
630 }
631 #endif
632
633 // Note that the forwardee is not the same thing as the displaced_mark.
634 // The forwardee is used when copying during scavenge and mark-sweep.
635 // It does need to clear the low two locking- and GC-related bits.
636 oop oopDesc::forwardee() const {
637 return (oop) mark()->decode_pointer();
638 }
639
|
77 }
78
79 // Should replace *addr = oop assignments where addr type depends on UseCompressedOops
80 // (without having to remember the function name this calls).
81 inline void oop_store_raw(HeapWord* addr, oop value) {
82 if (UseCompressedOops) {
83 oopDesc::encode_store_heap_oop((narrowOop*)addr, value);
84 } else {
85 oopDesc::encode_store_heap_oop((oop*)addr, value);
86 }
87 }
88
89 // Implementation of all inlined member functions defined in oop.hpp
90 // We need a separate file to avoid circular references
91
92 void oopDesc::release_set_mark(markOop m) {
93 OrderAccess::release_store_ptr(&_mark, m);
94 }
95
96 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
97 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
98 }
99
100 void oopDesc::init_mark() {
101 set_mark(markOopDesc::prototype_for_object(this));
102 }
103
104 Klass* oopDesc::klass() const {
105 if (UseCompressedClassPointers) {
106 return Klass::decode_klass_not_null(_metadata._compressed_klass);
107 } else {
108 return _metadata._klass;
109 }
110 }
111
112 Klass* oopDesc::klass_or_null() const volatile {
113 if (UseCompressedClassPointers) {
114 return Klass::decode_klass(_metadata._compressed_klass);
115 } else {
116 return _metadata._klass;
117 }
391 narrowOop old = (narrowOop)Atomic::xchg(val, (narrowOop*)dest);
392 // decode old from T to oop
393 return decode_heap_oop(old);
394 } else {
395 return (oop)Atomic::xchg_ptr(exchange_value, (oop*)dest);
396 }
397 }
398
399 oop oopDesc::atomic_compare_exchange_oop(oop exchange_value,
400 volatile HeapWord *dest,
401 oop compare_value,
402 bool prebarrier) {
403 if (UseCompressedOops) {
404 if (prebarrier) {
405 update_barrier_set_pre((narrowOop*)dest, exchange_value);
406 }
407 // encode exchange and compare value from oop to T
408 narrowOop val = encode_heap_oop(exchange_value);
409 narrowOop cmp = encode_heap_oop(compare_value);
410
411 narrowOop old = Atomic::cmpxchg(val, (narrowOop*)dest, cmp);
412 // decode old from T to oop
413 return decode_heap_oop(old);
414 } else {
415 if (prebarrier) {
416 update_barrier_set_pre((oop*)dest, exchange_value);
417 }
418 return Atomic::cmpxchg(exchange_value, (oop*)dest, compare_value);
419 }
420 }
421
422 // In order to put or get a field out of an instance, must first check
423 // if the field has been compressed and uncompress it.
424 oop oopDesc::obj_field(int offset) const {
425 return UseCompressedOops ?
426 load_decode_heap_oop(obj_field_addr<narrowOop>(offset)) :
427 load_decode_heap_oop(obj_field_addr<oop>(offset));
428 }
429
430 void oopDesc::obj_field_put(int offset, oop value) {
431 UseCompressedOops ? oop_store(obj_field_addr<narrowOop>(offset), value) :
432 oop_store(obj_field_addr<oop>(offset), value);
433 }
434
435 void oopDesc::obj_field_put_raw(int offset, oop value) {
436 UseCompressedOops ?
437 encode_store_heap_oop(obj_field_addr<narrowOop>(offset), value) :
438 encode_store_heap_oop(obj_field_addr<oop>(offset), value);
599 bool oopDesc::cas_forward_to(oop p, markOop compare) {
600 assert(check_obj_alignment(p),
601 "forwarding to something not aligned");
602 assert(Universe::heap()->is_in_reserved(p),
603 "forwarding to something not in heap");
604 markOop m = markOopDesc::encode_pointer_as_mark(p);
605 assert(m->decode_pointer() == p, "encoding must be reversable");
606 return cas_set_mark(m, compare) == compare;
607 }
608
609 #if INCLUDE_ALL_GCS
610 oop oopDesc::forward_to_atomic(oop p) {
611 markOop oldMark = mark();
612 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
613 markOop curMark;
614
615 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
616 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
617
618 while (!oldMark->is_marked()) {
619 curMark = Atomic::cmpxchg(forwardPtrMark, &_mark, oldMark);
620 assert(is_forwarded(), "object should have been forwarded");
621 if (curMark == oldMark) {
622 return NULL;
623 }
624 // If the CAS was unsuccessful then curMark->is_marked()
625 // should return true as another thread has CAS'd in another
626 // forwarding pointer.
627 oldMark = curMark;
628 }
629 return forwardee();
630 }
631 #endif
632
633 // Note that the forwardee is not the same thing as the displaced_mark.
634 // The forwardee is used when copying during scavenge and mark-sweep.
635 // It does need to clear the low two locking- and GC-related bits.
636 oop oopDesc::forwardee() const {
637 return (oop) mark()->decode_pointer();
638 }
639
|