27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/markOop.inline.hpp"
37 #include "oops/oop.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/os.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 void oopDesc::release_set_mark(markOop m) {
48 OrderAccess::release_store(&_mark, m);
49 }
50
51 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
52 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
53 }
54
55 void oopDesc::init_mark() {
56 set_mark(markOopDesc::prototype_for_object(this));
57 }
58
59 Klass* oopDesc::klass() const {
60 if (UseCompressedClassPointers) {
61 return Klass::decode_klass_not_null(_metadata._compressed_klass);
62 } else {
63 return _metadata._klass;
64 }
65 }
66
67 Klass* oopDesc::klass_or_null() const volatile {
68 if (UseCompressedClassPointers) {
69 return Klass::decode_klass(_metadata._compressed_klass);
70 } else {
71 return _metadata._klass;
72 }
73 }
74
75 Klass* oopDesc::klass_or_null_acquire() const volatile {
76 if (UseCompressedClassPointers) {
77 // Workaround for non-const load_acquire parameter.
78 const volatile narrowKlass* addr = &_metadata._compressed_klass;
341 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
342
343 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
344 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
345
346 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
347 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
348
349 bool oopDesc::is_locked() const {
350 return mark()->is_locked();
351 }
352
353 bool oopDesc::is_unlocked() const {
354 return mark()->is_unlocked();
355 }
356
357 bool oopDesc::has_bias_pattern() const {
358 return mark()->has_bias_pattern();
359 }
360
361 // Used only for markSweep, scavenging
362 bool oopDesc::is_gc_marked() const {
363 return mark()->is_marked();
364 }
365
366 // Used by scavengers
367 bool oopDesc::is_forwarded() const {
368 // The extra heap check is needed since the obj might be locked, in which case the
369 // mark would point to a stack location and have the sentinel bit cleared
370 return mark()->is_marked();
371 }
372
373 // Used by scavengers
374 void oopDesc::forward_to(oop p) {
375 assert(check_obj_alignment(p),
376 "forwarding to something not aligned");
377 assert(Universe::heap()->is_in_reserved(p),
378 "forwarding to something not in heap");
379 assert(!is_archive_object(oop(this)) &&
380 !is_archive_object(p),
381 "forwarding archive object");
382 markOop m = markOopDesc::encode_pointer_as_mark(p);
383 assert(m->decode_pointer() == p, "encoding must be reversable");
384 set_mark(m);
385 }
386
387 // Used by parallel scavengers
388 bool oopDesc::cas_forward_to(oop p, markOop compare) {
389 assert(check_obj_alignment(p),
390 "forwarding to something not aligned");
391 assert(Universe::heap()->is_in_reserved(p),
392 "forwarding to something not in heap");
393 markOop m = markOopDesc::encode_pointer_as_mark(p);
394 assert(m->decode_pointer() == p, "encoding must be reversable");
395 return cas_set_mark(m, compare) == compare;
396 }
397
398 #if INCLUDE_ALL_GCS
399 oop oopDesc::forward_to_atomic(oop p) {
400 markOop oldMark = mark();
401 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
402 markOop curMark;
403
404 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
405 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
406
407 while (!oldMark->is_marked()) {
408 curMark = Atomic::cmpxchg(forwardPtrMark, &_mark, oldMark);
409 assert(is_forwarded(), "object should have been forwarded");
410 if (curMark == oldMark) {
411 return NULL;
412 }
413 // If the CAS was unsuccessful then curMark->is_marked()
414 // should return true as another thread has CAS'd in another
415 // forwarding pointer.
416 oldMark = curMark;
417 }
418 return forwardee();
419 }
420 #endif
421
422 // Note that the forwardee is not the same thing as the displaced_mark.
423 // The forwardee is used when copying during scavenge and mark-sweep.
424 // It does need to clear the low two locking- and GC-related bits.
425 oop oopDesc::forwardee() const {
426 return (oop) mark()->decode_pointer();
427 }
428
429 // The following method needs to be MT safe.
430 uint oopDesc::age() const {
431 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
432 if (has_displaced_mark()) {
433 return displaced_mark()->age();
434 } else {
435 return mark()->age();
436 }
437 }
438
439 void oopDesc::incr_age() {
440 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
441 if (has_displaced_mark()) {
442 set_displaced_mark(displaced_mark()->incr_age());
443 } else {
444 set_mark(mark()->incr_age());
445 }
446 }
447
448 #if INCLUDE_ALL_GCS
449 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
450 klass()->oop_pc_follow_contents(this, cm);
451 }
452
453 void oopDesc::pc_update_contents(ParCompactionManager* cm) {
454 Klass* k = klass();
455 if (!k->is_typeArray_klass()) {
456 // It might contain oops beyond the header, so take the virtual call.
457 k->oop_pc_update_pointers(this, cm);
458 }
459 // Else skip it. The TypeArrayKlass in the header never needs scavenging.
460 }
461
462 void oopDesc::ps_push_contents(PSPromotionManager* pm) {
463 Klass* k = klass();
464 if (!k->is_typeArray_klass()) {
521 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
522 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
523 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
524
525 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
526 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
527
528 intptr_t oopDesc::identity_hash() {
529 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
530 // Note: The mark must be read into local variable to avoid concurrent updates.
531 markOop mrk = mark();
532 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
533 return mrk->hash();
534 } else if (mrk->is_marked()) {
535 return mrk->hash();
536 } else {
537 return slow_identity_hash();
538 }
539 }
540
541 bool oopDesc::has_displaced_mark() const {
542 return mark()->has_displaced_mark_helper();
543 }
544
545 markOop oopDesc::displaced_mark() const {
546 return mark()->displaced_mark_helper();
547 }
548
549 void oopDesc::set_displaced_mark(markOop m) {
550 mark()->set_displaced_mark_helper(m);
551 }
552
553 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
|
27
28 #include "gc/shared/ageTable.hpp"
29 #include "gc/shared/collectedHeap.inline.hpp"
30 #include "gc/shared/genCollectedHeap.hpp"
31 #include "gc/shared/generation.hpp"
32 #include "oops/access.inline.hpp"
33 #include "oops/arrayKlass.hpp"
34 #include "oops/arrayOop.hpp"
35 #include "oops/klass.inline.hpp"
36 #include "oops/markOop.inline.hpp"
37 #include "oops/oop.hpp"
38 #include "runtime/atomic.hpp"
39 #include "runtime/orderAccess.inline.hpp"
40 #include "runtime/os.hpp"
41 #include "utilities/align.hpp"
42 #include "utilities/macros.hpp"
43
44 // Implementation of all inlined member functions defined in oop.hpp
45 // We need a separate file to avoid circular references
46
47 markOop oopDesc::mark() const {
48 return HeapAccess<MO_VOLATILE>::load_at(as_oop(), mark_offset_in_bytes());
49 }
50
51 markOop oopDesc::mark_raw() const {
52 return _mark;
53 }
54
55 markOop* oopDesc::mark_addr_raw() const {
56 return (markOop*) &_mark;
57 }
58
59 void oopDesc::set_mark(volatile markOop m) {
60 HeapAccess<MO_VOLATILE>::store_at(as_oop(), mark_offset_in_bytes(), m);
61 }
62
63 void oopDesc::set_mark_raw(volatile markOop m) {
64 _mark = m;
65 }
66
67 void oopDesc::release_set_mark(markOop m) {
68 HeapAccess<MO_RELEASE>::store_at(as_oop(), mark_offset_in_bytes(), m);
69 }
70
71 markOop oopDesc::cas_set_mark(markOop new_mark, markOop old_mark) {
72 return HeapAccess<>::atomic_cmpxchg_at(new_mark, as_oop(), mark_offset_in_bytes(), old_mark);
73 }
74
75 markOop oopDesc::cas_set_mark_raw(markOop new_mark, markOop old_mark) {
76 return Atomic::cmpxchg(new_mark, &_mark, old_mark);
77 }
78
79 void oopDesc::init_mark() {
80 set_mark(markOopDesc::prototype_for_object(this));
81 }
82
83 void oopDesc::init_mark_raw() {
84 set_mark_raw(markOopDesc::prototype_for_object(this));
85 }
86
87 Klass* oopDesc::klass() const {
88 if (UseCompressedClassPointers) {
89 return Klass::decode_klass_not_null(_metadata._compressed_klass);
90 } else {
91 return _metadata._klass;
92 }
93 }
94
95 Klass* oopDesc::klass_or_null() const volatile {
96 if (UseCompressedClassPointers) {
97 return Klass::decode_klass(_metadata._compressed_klass);
98 } else {
99 return _metadata._klass;
100 }
101 }
102
103 Klass* oopDesc::klass_or_null_acquire() const volatile {
104 if (UseCompressedClassPointers) {
105 // Workaround for non-const load_acquire parameter.
106 const volatile narrowKlass* addr = &_metadata._compressed_klass;
369 inline void oopDesc::long_field_put(int offset, jlong value) { HeapAccess<>::store_at(as_oop(), offset, value); }
370
371 inline jfloat oopDesc::float_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
372 inline void oopDesc::float_field_put(int offset, jfloat value) { HeapAccess<>::store_at(as_oop(), offset, value); }
373
374 inline jdouble oopDesc::double_field(int offset) const { return HeapAccess<>::load_at(as_oop(), offset); }
375 inline void oopDesc::double_field_put(int offset, jdouble value) { HeapAccess<>::store_at(as_oop(), offset, value); }
376
377 bool oopDesc::is_locked() const {
378 return mark()->is_locked();
379 }
380
381 bool oopDesc::is_unlocked() const {
382 return mark()->is_unlocked();
383 }
384
385 bool oopDesc::has_bias_pattern() const {
386 return mark()->has_bias_pattern();
387 }
388
389 bool oopDesc::has_bias_pattern_raw() const {
390 return mark_raw()->has_bias_pattern();
391 }
392
393 // Used only for markSweep, scavenging
394 bool oopDesc::is_gc_marked() const {
395 return mark_raw()->is_marked();
396 }
397
398 // Used by scavengers
399 bool oopDesc::is_forwarded() const {
400 // The extra heap check is needed since the obj might be locked, in which case the
401 // mark would point to a stack location and have the sentinel bit cleared
402 return mark_raw()->is_marked();
403 }
404
405 // Used by scavengers
406 void oopDesc::forward_to(oop p) {
407 assert(check_obj_alignment(p),
408 "forwarding to something not aligned");
409 assert(Universe::heap()->is_in_reserved(p),
410 "forwarding to something not in heap");
411 assert(!is_archive_object(oop(this)) &&
412 !is_archive_object(p),
413 "forwarding archive object");
414 markOop m = markOopDesc::encode_pointer_as_mark(p);
415 assert(m->decode_pointer() == p, "encoding must be reversable");
416 set_mark_raw(m);
417 }
418
419 // Used by parallel scavengers
420 bool oopDesc::cas_forward_to(oop p, markOop compare) {
421 assert(check_obj_alignment(p),
422 "forwarding to something not aligned");
423 assert(Universe::heap()->is_in_reserved(p),
424 "forwarding to something not in heap");
425 markOop m = markOopDesc::encode_pointer_as_mark(p);
426 assert(m->decode_pointer() == p, "encoding must be reversable");
427 return cas_set_mark_raw(m, compare) == compare;
428 }
429
430 #if INCLUDE_ALL_GCS
431 oop oopDesc::forward_to_atomic(oop p) {
432 markOop oldMark = mark_raw();
433 markOop forwardPtrMark = markOopDesc::encode_pointer_as_mark(p);
434 markOop curMark;
435
436 assert(forwardPtrMark->decode_pointer() == p, "encoding must be reversable");
437 assert(sizeof(markOop) == sizeof(intptr_t), "CAS below requires this.");
438
439 while (!oldMark->is_marked()) {
440 curMark = cas_set_mark_raw(forwardPtrMark, oldMark);
441 assert(is_forwarded(), "object should have been forwarded");
442 if (curMark == oldMark) {
443 return NULL;
444 }
445 // If the CAS was unsuccessful then curMark->is_marked()
446 // should return true as another thread has CAS'd in another
447 // forwarding pointer.
448 oldMark = curMark;
449 }
450 return forwardee();
451 }
452 #endif
453
454 // Note that the forwardee is not the same thing as the displaced_mark.
455 // The forwardee is used when copying during scavenge and mark-sweep.
456 // It does need to clear the low two locking- and GC-related bits.
457 oop oopDesc::forwardee() const {
458 return (oop) mark_raw()->decode_pointer();
459 }
460
461 // The following method needs to be MT safe.
462 uint oopDesc::age() const {
463 assert(!is_forwarded(), "Attempt to read age from forwarded mark");
464 if (has_displaced_mark_raw()) {
465 return displaced_mark_raw()->age();
466 } else {
467 return mark_raw()->age();
468 }
469 }
470
471 void oopDesc::incr_age() {
472 assert(!is_forwarded(), "Attempt to increment age of forwarded mark");
473 if (has_displaced_mark_raw()) {
474 set_displaced_mark_raw(displaced_mark_raw()->incr_age());
475 } else {
476 set_mark_raw(mark_raw()->incr_age());
477 }
478 }
479
480 #if INCLUDE_ALL_GCS
481 void oopDesc::pc_follow_contents(ParCompactionManager* cm) {
482 klass()->oop_pc_follow_contents(this, cm);
483 }
484
485 void oopDesc::pc_update_contents(ParCompactionManager* cm) {
486 Klass* k = klass();
487 if (!k->is_typeArray_klass()) {
488 // It might contain oops beyond the header, so take the virtual call.
489 k->oop_pc_update_pointers(this, cm);
490 }
491 // Else skip it. The TypeArrayKlass in the header never needs scavenging.
492 }
493
494 void oopDesc::ps_push_contents(PSPromotionManager* pm) {
495 Klass* k = klass();
496 if (!k->is_typeArray_klass()) {
553 OOP_ITERATE_DEFN(OopClosureType, nv_suffix) \
554 OOP_ITERATE_SIZE_DEFN(OopClosureType, nv_suffix) \
555 OOP_ITERATE_BACKWARDS_DEFN(OopClosureType, nv_suffix)
556
557 ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
558 ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
559
560 intptr_t oopDesc::identity_hash() {
561 // Fast case; if the object is unlocked and the hash value is set, no locking is needed
562 // Note: The mark must be read into local variable to avoid concurrent updates.
563 markOop mrk = mark();
564 if (mrk->is_unlocked() && !mrk->has_no_hash()) {
565 return mrk->hash();
566 } else if (mrk->is_marked()) {
567 return mrk->hash();
568 } else {
569 return slow_identity_hash();
570 }
571 }
572
573 bool oopDesc::has_displaced_mark_raw() const {
574 return mark_raw()->has_displaced_mark_helper();
575 }
576
577 markOop oopDesc::displaced_mark_raw() const {
578 return mark_raw()->displaced_mark_helper();
579 }
580
581 void oopDesc::set_displaced_mark_raw(markOop m) {
582 mark_raw()->set_displaced_mark_helper(m);
583 }
584
585 #endif // SHARE_VM_OOPS_OOP_INLINE_HPP
|