528 // get receiver (assume this is frequent case)
529 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
530 __ jcc(Assembler::zero, done);
531 __ movptr(rax, Address(rbx, Method::const_offset()));
532 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
533 __ movptr(rax, Address(rax,
534 ConstantPool::pool_holder_offset_in_bytes()));
535 __ movptr(rax, Address(rax, mirror_offset));
536
537 #ifdef ASSERT
538 {
539 Label L;
540 __ testptr(rax, rax);
541 __ jcc(Assembler::notZero, L);
542 __ stop("synchronization object is NULL");
543 __ bind(L);
544 }
545 #endif // ASSERT
546
547 __ bind(done);
548 }
549
550 // add space for monitor & lock
551 __ subptr(rsp, entry_size); // add space for a monitor entry
552 __ movptr(monitor_block_top, rsp); // set new monitor block top
553 // store object
554 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
555 __ movptr(c_rarg1, rsp); // object address
556 __ lock_object(c_rarg1);
557 }
558
559 // Generate a fixed interpreter frame. This is identical setup for
560 // interpreted methods and for native methods hence the shared code.
561 //
562 // Args:
563 // rax: return address
564 // rbx: Method*
565 // r14: pointer to locals
566 // r13: sender sp
567 // rdx: cp cache
620 // update buffer.
621 // If the code for the getfield template is modified so that the
622 // G1 pre-barrier code is executed when the current method is
623 // Reference.get() then going through the normal method entry
624 // will be fine.
625 // * The G1 code can, however, check the receiver object (the instance
626 // of java.lang.Reference) and jump to the slow path if null. If the
627 // Reference object is null then we obviously cannot fetch the referent
628 // and so we don't need to call the G1 pre-barrier. Thus we can use the
629 // regular method entry code to generate the NPE.
630 //
631 // rbx: Method*
632
633 // r13: senderSP must preserve for slow path, set SP to it on fast path
634
635 address entry = __ pc();
636
637 const int referent_offset = java_lang_ref_Reference::referent_offset;
638 guarantee(referent_offset > 0, "referent offset not initialized");
639
640 if (UseG1GC) {
641 Label slow_path;
642 // rbx: method
643
644 // Check if local 0 != NULL
645 // If the receiver is null then it is OK to jump to the slow path.
646 __ movptr(rax, Address(rsp, wordSize));
647
648 __ testptr(rax, rax);
649 __ jcc(Assembler::zero, slow_path);
650
651 // rax: local 0
652 // rbx: method (but can be used as scratch now)
653 // rdx: scratch
654 // rdi: scratch
655
656 // Generate the G1 pre-barrier code to log the value of
657 // the referent field in an SATB buffer.
658
659 // Load the value of the referent field.
660 const Address field_address(rax, referent_offset);
661 __ load_heap_oop(rax, field_address);
662
663 // Generate the G1 pre-barrier code to log the value of
664 // the referent field in an SATB buffer.
665 __ g1_write_barrier_pre(noreg /* obj */,
666 rax /* pre_val */,
667 r15_thread /* thread */,
668 rbx /* tmp */,
669 true /* tosca_live */,
670 true /* expand_call */);
761 __ jcc(Assembler::notEqual, slow_path);
762
763 // We don't generate local frame and don't align stack because
764 // we call stub code and there is no safepoint on this path.
765
766 // Load parameters
767 const Register crc = c_rarg0; // crc
768 const Register buf = c_rarg1; // source java byte array address
769 const Register len = c_rarg2; // length
770 const Register off = len; // offset (never overlaps with 'len')
771
772 // Arguments are reversed on java expression stack
773 // Calculate address of start element
774 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
775 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
776 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
777 __ addq(buf, off); // + offset
778 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
779 } else {
780 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
781 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
782 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
783 __ addq(buf, off); // + offset
784 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
785 }
786 // Can now load 'len' since we're finished with 'off'
787 __ movl(len, Address(rsp, wordSize)); // Length
788
789 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
790 // result in rax
791
792 // _areturn
793 __ pop(rdi); // get return address
794 __ mov(rsp, r13); // set sp to sender sp
795 __ jmp(rdi);
796
797 // generate a vanilla native entry as the slow path
798 __ bind(slow_path);
799
800 (void) generate_native_entry(false);
|
528 // get receiver (assume this is frequent case)
529 __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
530 __ jcc(Assembler::zero, done);
531 __ movptr(rax, Address(rbx, Method::const_offset()));
532 __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
533 __ movptr(rax, Address(rax,
534 ConstantPool::pool_holder_offset_in_bytes()));
535 __ movptr(rax, Address(rax, mirror_offset));
536
537 #ifdef ASSERT
538 {
539 Label L;
540 __ testptr(rax, rax);
541 __ jcc(Assembler::notZero, L);
542 __ stop("synchronization object is NULL");
543 __ bind(L);
544 }
545 #endif // ASSERT
546
547 __ bind(done);
548 oopDesc::bs()->interpreter_write_barrier(_masm, rax);
549 }
550
551 // add space for monitor & lock
552 __ subptr(rsp, entry_size); // add space for a monitor entry
553 __ movptr(monitor_block_top, rsp); // set new monitor block top
554 // store object
555 __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
556 __ movptr(c_rarg1, rsp); // object address
557 __ lock_object(c_rarg1);
558 }
559
560 // Generate a fixed interpreter frame. This is identical setup for
561 // interpreted methods and for native methods hence the shared code.
562 //
563 // Args:
564 // rax: return address
565 // rbx: Method*
566 // r14: pointer to locals
567 // r13: sender sp
568 // rdx: cp cache
621 // update buffer.
622 // If the code for the getfield template is modified so that the
623 // G1 pre-barrier code is executed when the current method is
624 // Reference.get() then going through the normal method entry
625 // will be fine.
626 // * The G1 code can, however, check the receiver object (the instance
627 // of java.lang.Reference) and jump to the slow path if null. If the
628 // Reference object is null then we obviously cannot fetch the referent
629 // and so we don't need to call the G1 pre-barrier. Thus we can use the
630 // regular method entry code to generate the NPE.
631 //
632 // rbx: Method*
633
634 // r13: senderSP must preserve for slow path, set SP to it on fast path
635
636 address entry = __ pc();
637
638 const int referent_offset = java_lang_ref_Reference::referent_offset;
639 guarantee(referent_offset > 0, "referent offset not initialized");
640
641 if (UseG1GC || UseShenandoahGC) {
642 Label slow_path;
643 // rbx: method
644
645 // Check if local 0 != NULL
646 // If the receiver is null then it is OK to jump to the slow path.
647 __ movptr(rax, Address(rsp, wordSize));
648
649 __ testptr(rax, rax);
650 __ jcc(Assembler::zero, slow_path);
651
652 oopDesc::bs()->interpreter_read_barrier_not_null(_masm, rax);
653
654 // rax: local 0
655 // rbx: method (but can be used as scratch now)
656 // rdx: scratch
657 // rdi: scratch
658
659 // Generate the G1 pre-barrier code to log the value of
660 // the referent field in an SATB buffer.
661
662 // Load the value of the referent field.
663 const Address field_address(rax, referent_offset);
664 __ load_heap_oop(rax, field_address);
665
666 // Generate the G1 pre-barrier code to log the value of
667 // the referent field in an SATB buffer.
668 __ g1_write_barrier_pre(noreg /* obj */,
669 rax /* pre_val */,
670 r15_thread /* thread */,
671 rbx /* tmp */,
672 true /* tosca_live */,
673 true /* expand_call */);
764 __ jcc(Assembler::notEqual, slow_path);
765
766 // We don't generate local frame and don't align stack because
767 // we call stub code and there is no safepoint on this path.
768
769 // Load parameters
770 const Register crc = c_rarg0; // crc
771 const Register buf = c_rarg1; // source java byte array address
772 const Register len = c_rarg2; // length
773 const Register off = len; // offset (never overlaps with 'len')
774
775 // Arguments are reversed on java expression stack
776 // Calculate address of start element
777 if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
778 __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
779 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
780 __ addq(buf, off); // + offset
781 __ movl(crc, Address(rsp, 5*wordSize)); // Initial CRC
782 } else {
783 __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
784 oopDesc::bs()->interpreter_read_barrier_not_null(_masm, buf);
785 __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
786 __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
787 __ addq(buf, off); // + offset
788 __ movl(crc, Address(rsp, 4*wordSize)); // Initial CRC
789 }
790 // Can now load 'len' since we're finished with 'off'
791 __ movl(len, Address(rsp, wordSize)); // Length
792
793 __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
794 // result in rax
795
796 // _areturn
797 __ pop(rdi); // get return address
798 __ mov(rsp, r13); // set sp to sender sp
799 __ jmp(rdi);
800
801 // generate a vanilla native entry as the slow path
802 __ bind(slow_path);
803
804 (void) generate_native_entry(false);
|