27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/markOop.inline.hpp"
37 #include "oops/oop.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/os.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 inline markOop oopDesc::mark() const {
48 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
49 }
50
51 inline markOop* oopDesc::mark_addr() const {
52 return (markOop*) &_mark;
53 }
54
55 inline void oopDesc::set_mark(volatile markOop m) {
56 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
57 }
58
59 void oopDesc::release_set_mark(markOop m) {
60 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
61 }
62
63 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
64 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
65 }
66
67 void oopDesc::init_mark() {
68 set_mark(markOopDesc::prototype_for_object(this));
69 }
70
71 Klass* oopDesc::klass() const {
72 if (UseCompressedClassPointers) {
73 return Klass::decode_klass_not_null(_metadata._compressed_klass);
74 } else {
75 return _metadata._klass;
76 }
77 }
78
79 Klass* oopDesc::klass_or_null() const volatile {
80 if (UseCompressedClassPointers) {
81 return Klass::decode_klass(_metadata._compressed_klass);
82 } else {
83 return _metadata._klass;
84 }
85 }
86
87 Klass* oopDesc::klass_or_null_acquire() const volatile {
88 if (UseCompressedClassPointers) {
89 // Workaround for non-const load_acquire parameter.
90 const volatile narrowKlass* addr = &_metadata._compressed_klass;
353 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
354
355 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
356 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
357
358 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
359 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
360
361 bool oopDesc::is_locked() const {
362 return mark()->is_locked();
363 }
364
365 bool oopDesc::is_unlocked() const {
366 return mark()->is_unlocked();
367 }
368
369 bool oopDesc::has_bias_pattern() const {
370 return mark()->has_bias_pattern();
371 }
372
373 // Used only for markSweep, scavenging
374 bool oopDesc::is_gc_marked() const {
375 return mark()->is_marked();
376 }
377
378 // Used by scavengers
379 bool oopDesc::is_forwarded() const {
380 // The extra heap check is needed since the obj might be locked, in which case the
381 // mark would point to a stack location and have the sentinel bit cleared
382 return mark()->is_marked();
383 }
384
385 // Used by scavengers
386 void oopDesc::forward_to(oop p) {
387 assert(check_obj_alignment(p),
388 "forwarding to something not aligned");
389 assert(Universe::heap()->is_in_reserved(p),
390 "forwarding to something not in heap");
391 assert(!is_archive_object(oop(this)) &&
392 !is_archive_object(p),
393 "forwarding archive object");
394 markOop m = markOopDesc::encode_pointer_as_mark(p);
395 assert(m->decode_pointer() == p, "encoding must be reversable");
396 set_mark(m);
397 }
398
399 // Used by parallel scavengers
400 bool oopDesc::cas_forward_to(oop p, markOop compare) {
401 assert(check_obj_alignment(p),
402 "forwarding to something not aligned");
403 assert(Universe::heap()->is_in_reserved(p),
404 "forwarding to something not in heap");
405 markOop m = markOopDesc::encode_pointer_as_mark(p);
406 assert(m->decode_pointer() == p, "encoding must be reversable");
407 return cas_set_mark(m, compare) == compare;
408 }
409
410 #if INCLUDE_ALL_GCS
411 oop oopDesc::forward_to_atomic(oop p) {
412 markOop oldMark = mark();
413 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
414 markOop curMark;
415
416 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
417 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
418
419 while (!oldMark->is_marked()) {
420 curMark = Atomic::cmpxchg(forwardPtrMark, &_mark, oldMark);
421 assert(is_forwarded(), "object should have been forwarded");
422 if (curMark == oldMark) {
423 return NULL;
424 }
425 // If the CAS was unsuccessful then curMark->is_marked()
426 // should return true as another thread has CAS'd in another
427 // forwarding pointer.
428 oldMark = curMark;
429 }
430 return forwardee();
431 }
432 #endif
433
434 // Note that the forwardee is not the same thing as the displaced_mark.
435 // The forwardee is used when copying during scavenge and mark-sweep.
436 // It does need to clear the low two locking- and GC-related bits.
437 oop oopDesc::forwardee() const {
438 return (oop) mark()->decode_pointer();
439 }
440
441 // The following method needs to be MT safe.
442 uint oopDesc::age() const {
443 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
444 if (has_displaced_mark()) {
445 return displaced_mark()->age();
446 } else {
447 return mark()->age();
448 }
449 }
450
451 void oopDesc::incr_age() {
452 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
453 if (has_displaced_mark()) {
454 set_displaced_mark(displaced_mark()->incr_age());
455 } else {
456 set_mark(mark()->incr_age());
457 }
458 }
459
460 #if INCLUDE_ALL_GCS
461 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
462 klass()->oop_pc_follow_contents(this, cm);
463 }
464
465 void oopDesc::pc_update_contents(ParCompactionManager* cm) {
466 Klass* k = klass();
467 if (!k->is_typeArray_klass()) {
468 // It might contain oops beyond the header, so take the virtual call.
469 k->oop_pc_update_pointers(this, cm);
470 }
471 // Else skip it. The TypeArrayKlass in the header never needs scavenging.
472 }
473
474 void oopDesc::ps_push_contents(PSPromotionManager* pm) {
475 Klass* k = klass();
476 if (!k->is_typeArray_klass()) {
533 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
534 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
535 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
536
537 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
538 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
539
540 intptr_t oopDesc::identity_hash() {
541 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
542 // Note: The mark must be read into local variable to avoid concurrent updates.
543 markOop mrk = mark();
544 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
545 return mrk->hash();
546 } else if (mrk->is_marked()) {
547 return mrk->hash();
548 } else {
549 return slow_identity_hash();
550 }
551 }
552
553 bool oopDesc::has_displaced_mark() const {
554 return mark()->has_displaced_mark_helper();
555 }
556
557 markOop oopDesc::displaced_mark() const {
558 return mark()->displaced_mark_helper();
559 }
560
561 void oopDesc::set_displaced_mark(markOop m) {
562 mark()->set_displaced_mark_helper(m);
563 }
564
565 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
|
27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/markOop.inline.hpp"
37 #include "oops/oop.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/os.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 markOop oopDesc::mark() const {
48 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
49 }
50
51 markOop oopDesc::mark_raw() const {
52 return _mark;
53 }
54
55 markOop* oopDesc::mark_addr_raw() const {
56 return (markOop*) &_mark;
57 }
58
59 void oopDesc::set_mark(volatile markOop m) {
60 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
61 }
62
63 void oopDesc::set_mark_raw(volatile markOop m) {
64 _mark = m;
65 }
66
67 void oopDesc::release_set_mark(markOop m) {
68 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
69 }
70
71 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
72 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
73 }
74
75 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark) {
76 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
77 }
78
79 void oopDesc::init_mark() {
80 set_mark(markOopDesc::prototype_for_object(this));
81 }
82
83 void oopDesc::init_mark_raw() {
84 set_mark_raw(markOopDesc::prototype_for_object(this));
85 }
86
87 Klass* oopDesc::klass() const {
88 if (UseCompressedClassPointers) {
89 return Klass::decode_klass_not_null(_metadata._compressed_klass);
90 } else {
91 return _metadata._klass;
92 }
93 }
94
95 Klass* oopDesc::klass_or_null() const volatile {
96 if (UseCompressedClassPointers) {
97 return Klass::decode_klass(_metadata._compressed_klass);
98 } else {
99 return _metadata._klass;
100 }
101 }
102
103 Klass* oopDesc::klass_or_null_acquire() const volatile {
104 if (UseCompressedClassPointers) {
105 // Workaround for non-const load_acquire parameter.
106 const volatile narrowKlass* addr = &_metadata._compressed_klass;
369 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
370
371 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
372 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
373
374 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
375 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
376
377 bool oopDesc::is_locked() const {
378 return mark()->is_locked();
379 }
380
381 bool oopDesc::is_unlocked() const {
382 return mark()->is_unlocked();
383 }
384
385 bool oopDesc::has_bias_pattern() const {
386 return mark()->has_bias_pattern();
387 }
388
389 bool oopDesc::has_bias_pattern_raw() const {
390 return mark_raw()->has_bias_pattern();
391 }
392
393 // Used only for markSweep, scavenging
394 bool oopDesc::is_gc_marked() const {
395 return mark_raw()->is_marked();
396 }
397
398 // Used by scavengers
399 bool oopDesc::is_forwarded() const {
400 // The extra heap check is needed since the obj might be locked, in which case the
401 // mark would point to a stack location and have the sentinel bit cleared
402 return mark_raw()->is_marked();
403 }
404
405 // Used by scavengers
406 void oopDesc::forward_to(oop p) {
407 assert(check_obj_alignment(p),
408 "forwarding to something not aligned");
409 assert(Universe::heap()->is_in_reserved(p),
410 "forwarding to something not in heap");
411 assert(!is_archive_object(oop(this)) &&
412 !is_archive_object(p),
413 "forwarding archive object");
414 markOop m = markOopDesc::encode_pointer_as_mark(p);
415 assert(m->decode_pointer() == p, "encoding must be reversable");
416 set_mark_raw(m);
417 }
418
419 // Used by parallel scavengers
420 bool oopDesc::cas_forward_to(oop p, markOop compare) {
421 assert(check_obj_alignment(p),
422 "forwarding to something not aligned");
423 assert(Universe::heap()->is_in_reserved(p),
424 "forwarding to something not in heap");
425 markOop m = markOopDesc::encode_pointer_as_mark(p);
426 assert(m->decode_pointer() == p, "encoding must be reversable");
427 return cas_set_mark_raw(m, compare) == compare;
428 }
429
430 #if INCLUDE_ALL_GCS
431 oop oopDesc::forward_to_atomic(oop p) {
432 markOop oldMark = mark_raw();
433 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
434 markOop curMark;
435
436 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
437 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
438
439 while (!oldMark->is_marked()) {
440 curMark = cas_set_mark_raw(forwardPtrMark, oldMark);
441 assert(is_forwarded(), "object should have been forwarded");
442 if (curMark == oldMark) {
443 return NULL;
444 }
445 // If the CAS was unsuccessful then curMark->is_marked()
446 // should return true as another thread has CAS'd in another
447 // forwarding pointer.
448 oldMark = curMark;
449 }
450 return forwardee();
451 }
452 #endif
453
454 // Note that the forwardee is not the same thing as the displaced_mark.
455 // The forwardee is used when copying during scavenge and mark-sweep.
456 // It does need to clear the low two locking- and GC-related bits.
457 oop oopDesc::forwardee() const {
458 return (oop) mark_raw()->decode_pointer();
459 }
460
461 // The following method needs to be MT safe.
462 uint oopDesc::age() const {
463 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
464 if (has_displaced_mark_raw()) {
465 return displaced_mark_raw()->age();
466 } else {
467 return mark_raw()->age();
468 }
469 }
470
471 void oopDesc::incr_age() {
472 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
473 if (has_displaced_mark_raw()) {
474 set_displaced_mark_raw(displaced_mark_raw()->incr_age());
475 } else {
476 set_mark_raw(mark_raw()->incr_age());
477 }
478 }
479
480 #if INCLUDE_ALL_GCS
481 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
482 klass()->oop_pc_follow_contents(this, cm);
483 }
484
485 void oopDesc::pc_update_contents(ParCompactionManager* cm) {
486 Klass* k = klass();
487 if (!k->is_typeArray_klass()) {
488 // It might contain oops beyond the header, so take the virtual call.
489 k->oop_pc_update_pointers(this, cm);
490 }
491 // Else skip it. The TypeArrayKlass in the header never needs scavenging.
492 }
493
494 void oopDesc::ps_push_contents(PSPromotionManager* pm) {
495 Klass* k = klass();
496 if (!k->is_typeArray_klass()) {
553 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
554 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
555 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
556
557 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
558 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
559
560 intptr_t oopDesc::identity_hash() {
561 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
562 // Note: The mark must be read into local variable to avoid concurrent updates.
563 markOop mrk = mark();
564 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
565 return mrk->hash();
566 } else if (mrk->is_marked()) {
567 return mrk->hash();
568 } else {
569 return slow_identity_hash();
570 }
571 }
572
573 bool oopDesc::has_displaced_mark_raw() const {
574 return mark_raw()->has_displaced_mark_helper();
575 }
576
577 markOop oopDesc::displaced_mark_raw() const {
578 return mark_raw()->displaced_mark_helper();
579 }
580
581 void oopDesc::set_displaced_mark_raw(markOop m) {
582 mark_raw()->set_displaced_mark_helper(m);
583 }
584
585 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
|