506 ldr(r2, Address(r1, Method::access_flags_offset()));
507 tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
508
509 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
510 // is set.
511 cbnz(r3, no_unlock);
512
513 // unlock monitor
514 push(state); // save result
515
516 // BasicObjectLock will be first in list, since this is a
517 // synchronized method. However, need to check that the object has
518 // not been unlocked by an explicit monitorexit bytecode.
519 const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *
520 wordSize - (int) sizeof(BasicObjectLock));
521 // We use c_rarg1 so that if we go slow path it will be the correct
522 // register for unlock_object to pass to VM directly
523 lea(c_rarg1, monitor); // address of first monitor
524
525 ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
526 cbnz(r0, unlock);
527
528 pop(state);
529 if (throw_monitor_exception) {
530 // Entry already unlocked, need to throw exception
531 call_VM(noreg, CAST_FROM_FN_PTR(address,
532 InterpreterRuntime::throw_illegal_monitor_state_exception));
533 should_not_reach_here();
534 } else {
535 // Monitor already unlocked during a stack unroll. If requested,
536 // install an illegal_monitor_state_exception. Continue with
537 // stack unrolling.
538 if (install_monitor_exception) {
539 call_VM(noreg, CAST_FROM_FN_PTR(address,
540 InterpreterRuntime::new_illegal_monitor_state_exception));
541 }
542 b(unlocked);
543 }
544
545 bind(unlock);
583 // Stack unrolling. Unlock object and install illegal_monitor_exception.
584 // Unlock does not block, so don't have to worry about the frame.
585 // We don't have to preserve c_rarg1 since we are going to throw an exception.
586
587 push(state);
588 unlock_object(c_rarg1);
589 pop(state);
590
591 if (install_monitor_exception) {
592 call_VM(noreg, CAST_FROM_FN_PTR(address,
593 InterpreterRuntime::
594 new_illegal_monitor_state_exception));
595 }
596
597 b(restart);
598 }
599
600 bind(loop);
601 // check if current entry is used
602 ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
603 cbnz(rscratch1, exception);
604
605 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
606 bind(entry);
607 cmp(c_rarg1, r19); // check if bottom reached
608 br(Assembler::NE, loop); // if not at bottom then check this entry
609 }
610
611 bind(no_unlock);
612
613 // jvmti support
614 if (notify_jvmdi) {
615 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
616 } else {
617 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
618 }
619
620 // remove activation
621 // get sender esp
622 ldr(esp,
646 call_VM(noreg,
647 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
648 lock_reg);
649 } else {
650 Label done;
651
652 const Register swap_reg = r0;
653 const Register tmp = c_rarg2;
654 const Register obj_reg = c_rarg3; // Will contain the oop
655
656 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
657 const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
658 const int mark_offset = lock_offset +
659 BasicLock::displaced_header_offset_in_bytes();
660
661 Label slow_case;
662
663 // Load object pointer into obj_reg %c_rarg3
664 ldr(obj_reg, Address(lock_reg, obj_offset));
665
666 shenandoah_store_check(obj_reg);
667
668 if (UseBiasedLocking) {
669 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
670 }
671
672 // Load (object->mark() | 1) into swap_reg
673 ldr(rscratch1, Address(obj_reg, 0));
674 orr(swap_reg, rscratch1, 1);
675
676 // Save (object->mark() | 1) into BasicLock's displaced header
677 str(swap_reg, Address(lock_reg, mark_offset));
678
679 assert(lock_offset == 0,
680 "displached header must be first word in BasicObjectLock");
681
682 Label fail;
683 if (PrintBiasedLockingStatistics) {
684 Label fast;
685 cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
686 bind(fast);
747 if (UseHeavyMonitors) {
748 call_VM(noreg,
749 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
750 lock_reg);
751 } else {
752 Label done;
753
754 const Register swap_reg = r0;
755 const Register header_reg = c_rarg2; // Will contain the old oopMark
756 const Register obj_reg = c_rarg3; // Will contain the oop
757
758 save_bcp(); // Save in case of exception
759
760 // Convert from BasicObjectLock structure to object and BasicLock
761 // structure Store the BasicLock address into %r0
762 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
763
764 // Load oop into obj_reg(%c_rarg3)
765 ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
766
767 shenandoah_store_check(obj_reg);
768
769 // Free entry
770 str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
771
772 if (UseBiasedLocking) {
773 biased_locking_exit(obj_reg, header_reg, done);
774 }
775
776 // Load the old header from BasicLock structure
777 ldr(header_reg, Address(swap_reg,
778 BasicLock::displaced_header_offset_in_bytes()));
779
780 // Test for recursion
781 cbz(header_reg, done);
782
783 // Atomic swap back the old header
784 cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
785
786 // Call the runtime routine for slow case.
787 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
|
506 ldr(r2, Address(r1, Method::access_flags_offset()));
507 tbz(r2, exact_log2(JVM_ACC_SYNCHRONIZED), unlocked);
508
509 // Don't unlock anything if the _do_not_unlock_if_synchronized flag
510 // is set.
511 cbnz(r3, no_unlock);
512
513 // unlock monitor
514 push(state); // save result
515
516 // BasicObjectLock will be first in list, since this is a
517 // synchronized method. However, need to check that the object has
518 // not been unlocked by an explicit monitorexit bytecode.
519 const Address monitor(rfp, frame::interpreter_frame_initial_sp_offset *
520 wordSize - (int) sizeof(BasicObjectLock));
521 // We use c_rarg1 so that if we go slow path it will be the correct
522 // register for unlock_object to pass to VM directly
523 lea(c_rarg1, monitor); // address of first monitor
524
525 ldr(r0, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
526 shenandoah_store_addr_check(r0); // Invariant
527 cbnz(r0, unlock);
528
529 pop(state);
530 if (throw_monitor_exception) {
531 // Entry already unlocked, need to throw exception
532 call_VM(noreg, CAST_FROM_FN_PTR(address,
533 InterpreterRuntime::throw_illegal_monitor_state_exception));
534 should_not_reach_here();
535 } else {
536 // Monitor already unlocked during a stack unroll. If requested,
537 // install an illegal_monitor_state_exception. Continue with
538 // stack unrolling.
539 if (install_monitor_exception) {
540 call_VM(noreg, CAST_FROM_FN_PTR(address,
541 InterpreterRuntime::new_illegal_monitor_state_exception));
542 }
543 b(unlocked);
544 }
545
546 bind(unlock);
584 // Stack unrolling. Unlock object and install illegal_monitor_exception.
585 // Unlock does not block, so don't have to worry about the frame.
586 // We don't have to preserve c_rarg1 since we are going to throw an exception.
587
588 push(state);
589 unlock_object(c_rarg1);
590 pop(state);
591
592 if (install_monitor_exception) {
593 call_VM(noreg, CAST_FROM_FN_PTR(address,
594 InterpreterRuntime::
595 new_illegal_monitor_state_exception));
596 }
597
598 b(restart);
599 }
600
601 bind(loop);
602 // check if current entry is used
603 ldr(rscratch1, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
604 shenandoah_store_addr_check(rscratch1); // Invariant
605 cbnz(rscratch1, exception);
606
607 add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
608 bind(entry);
609 cmp(c_rarg1, r19); // check if bottom reached
610 br(Assembler::NE, loop); // if not at bottom then check this entry
611 }
612
613 bind(no_unlock);
614
615 // jvmti support
616 if (notify_jvmdi) {
617 notify_method_exit(state, NotifyJVMTI); // preserve TOSCA
618 } else {
619 notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
620 }
621
622 // remove activation
623 // get sender esp
624 ldr(esp,
648 call_VM(noreg,
649 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
650 lock_reg);
651 } else {
652 Label done;
653
654 const Register swap_reg = r0;
655 const Register tmp = c_rarg2;
656 const Register obj_reg = c_rarg3; // Will contain the oop
657
658 const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
659 const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
660 const int mark_offset = lock_offset +
661 BasicLock::displaced_header_offset_in_bytes();
662
663 Label slow_case;
664
665 // Load object pointer into obj_reg %c_rarg3
666 ldr(obj_reg, Address(lock_reg, obj_offset));
667
668 shenandoah_store_addr_check(obj_reg);
669
670 if (UseBiasedLocking) {
671 biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
672 }
673
674 // Load (object->mark() | 1) into swap_reg
675 ldr(rscratch1, Address(obj_reg, 0));
676 orr(swap_reg, rscratch1, 1);
677
678 // Save (object->mark() | 1) into BasicLock's displaced header
679 str(swap_reg, Address(lock_reg, mark_offset));
680
681 assert(lock_offset == 0,
682 "displached header must be first word in BasicObjectLock");
683
684 Label fail;
685 if (PrintBiasedLockingStatistics) {
686 Label fast;
687 cmpxchgptr(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
688 bind(fast);
749 if (UseHeavyMonitors) {
750 call_VM(noreg,
751 CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
752 lock_reg);
753 } else {
754 Label done;
755
756 const Register swap_reg = r0;
757 const Register header_reg = c_rarg2; // Will contain the old oopMark
758 const Register obj_reg = c_rarg3; // Will contain the oop
759
760 save_bcp(); // Save in case of exception
761
762 // Convert from BasicObjectLock structure to object and BasicLock
763 // structure Store the BasicLock address into %r0
764 lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
765
766 // Load oop into obj_reg(%c_rarg3)
767 ldr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
768
769 shenandoah_store_addr_check(obj_reg);
770
771 // Free entry
772 str(zr, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
773
774 if (UseBiasedLocking) {
775 biased_locking_exit(obj_reg, header_reg, done);
776 }
777
778 // Load the old header from BasicLock structure
779 ldr(header_reg, Address(swap_reg,
780 BasicLock::displaced_header_offset_in_bytes()));
781
782 // Test for recursion
783 cbz(header_reg, done);
784
785 // Atomic swap back the old header
786 cmpxchgptr(swap_reg, header_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
787
788 // Call the runtime routine for slow case.
789 str(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes())); // restore obj
|