< prev index next >

src/hotspot/cpu/aarch64/interp_masm_aarch64.cpp

Print this page
rev 54670 : Port of valuetypes to aarch64


  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interp_masm_aarch64.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "logging/log.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/markOop.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/methodData.hpp"

  38 #include "prims/jvmtiExport.hpp"
  39 #include "prims/jvmtiThreadState.hpp"
  40 #include "runtime/basicLock.hpp"
  41 #include "runtime/biasedLocking.hpp"
  42 #include "runtime/frame.inline.hpp"
  43 #include "runtime/safepointMechanism.hpp"
  44 #include "runtime/sharedRuntime.hpp"
  45 #include "runtime/thread.inline.hpp"
  46 
  47 
  48 void InterpreterMacroAssembler::narrow(Register result) {
  49 
  50   // Get method->_constMethod->_result_type
  51   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  52   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  53   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  54 
  55   Label done, notBool, notByte, notChar;
  56 
  57   // common case first


 639 
 640     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 641     bind(entry);
 642     cmp(c_rarg1, r19); // check if bottom reached
 643     br(Assembler::NE, loop); // if not at bottom then check this entry
 644   }
 645 
 646   bind(no_unlock);
 647 
 648   // jvmti support
 649   if (notify_jvmdi) {
 650     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 651   } else {
 652     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 653   }
 654 
 655   // remove activation
 656   // get sender esp
 657   ldr(esp,
 658       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));

 659   if (StackReservedPages > 0) {
 660     // testing if reserved zone needs to be re-enabled
 661     Label no_reserved_zone_enabling;
 662 
 663     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 664     cmp(esp, rscratch1);
 665     br(Assembler::LS, no_reserved_zone_enabling);
 666 
 667     call_VM_leaf(
 668       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 669     call_VM(noreg, CAST_FROM_FN_PTR(address,
 670                    InterpreterRuntime::throw_delayed_StackOverflowError));
 671     should_not_reach_here();
 672 
 673     bind(no_reserved_zone_enabling);
 674   }
























 675   // remove frame anchor
 676   leave();
 677   // If we're returning to interpreted code we will shortly be
 678   // adjusting SP to allow some space for ESP.  If we're returning to
 679   // compiled code the saved sender SP was saved in sender_sp, so this
 680   // restores it.
 681   andr(sp, esp, -16);
 682 }
 683 
 684 // Lock object
 685 //
 686 // Args:
 687 //      c_rarg1: BasicObjectLock to be used for locking
 688 //
 689 // Kills:
 690 //      r0
 691 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 692 //      rscratch1, rscratch2 (scratch regs)
 693 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 694 {


 707     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 708     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 709     const int mark_offset = lock_offset +
 710                             BasicLock::displaced_header_offset_in_bytes();
 711 
 712     Label slow_case;
 713 
 714     // Load object pointer into obj_reg %c_rarg3
 715     ldr(obj_reg, Address(lock_reg, obj_offset));
 716 
 717     if (UseBiasedLocking) {
 718       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
 719     }
 720 
 721     // Load (object->mark() | 1) into swap_reg
 722     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 723     orr(swap_reg, rscratch1, 1);
 724 
 725     // Save (object->mark() | 1) into BasicLock's displaced header
 726     str(swap_reg, Address(lock_reg, mark_offset));





 727 
 728     assert(lock_offset == 0,
 729            "displached header must be first word in BasicObjectLock");
 730 
 731     Label fail;
 732     if (PrintBiasedLockingStatistics) {
 733       Label fast;
 734       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
 735       bind(fast);
 736       atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
 737                   rscratch2, rscratch1, tmp);
 738       b(done);
 739       bind(fail);
 740     } else {
 741       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 742     }
 743 
 744     // Test if the oopMark is an obvious stack pointer, i.e.,
 745     //  1) (mark & 7) == 0, and
 746     //  2) rsp <= mark < mark + os::pagesize()




  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "gc/shared/barrierSet.hpp"
  29 #include "gc/shared/barrierSetAssembler.hpp"
  30 #include "interp_masm_aarch64.hpp"
  31 #include "interpreter/interpreter.hpp"
  32 #include "interpreter/interpreterRuntime.hpp"
  33 #include "logging/log.hpp"
  34 #include "oops/arrayOop.hpp"
  35 #include "oops/markOop.hpp"
  36 #include "oops/method.hpp"
  37 #include "oops/methodData.hpp"
  38 #include "oops/valueKlass.hpp"
  39 #include "prims/jvmtiExport.hpp"
  40 #include "prims/jvmtiThreadState.hpp"
  41 #include "runtime/basicLock.hpp"
  42 #include "runtime/biasedLocking.hpp"
  43 #include "runtime/frame.inline.hpp"
  44 #include "runtime/safepointMechanism.hpp"
  45 #include "runtime/sharedRuntime.hpp"
  46 #include "runtime/thread.inline.hpp"
  47 
  48 
  49 void InterpreterMacroAssembler::narrow(Register result) {
  50 
  51   // Get method->_constMethod->_result_type
  52   ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
  53   ldr(rscratch1, Address(rscratch1, Method::const_offset()));
  54   ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
  55 
  56   Label done, notBool, notByte, notChar;
  57 
  58   // common case first


 640 
 641     add(c_rarg1, c_rarg1, entry_size); // otherwise advance to next entry
 642     bind(entry);
 643     cmp(c_rarg1, r19); // check if bottom reached
 644     br(Assembler::NE, loop); // if not at bottom then check this entry
 645   }
 646 
 647   bind(no_unlock);
 648 
 649   // jvmti support
 650   if (notify_jvmdi) {
 651     notify_method_exit(state, NotifyJVMTI);    // preserve TOSCA
 652   } else {
 653     notify_method_exit(state, SkipNotifyJVMTI); // preserve TOSCA
 654   }
 655 
 656   // remove activation
 657   // get sender esp
 658   ldr(esp,
 659       Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
 660 
 661   if (StackReservedPages > 0) {
 662     // testing if reserved zone needs to be re-enabled
 663     Label no_reserved_zone_enabling;
 664 
 665     ldr(rscratch1, Address(rthread, JavaThread::reserved_stack_activation_offset()));
 666     cmp(esp, rscratch1);
 667     br(Assembler::LS, no_reserved_zone_enabling);
 668 
 669     call_VM_leaf(
 670       CAST_FROM_FN_PTR(address, SharedRuntime::enable_stack_reserved_zone), rthread);
 671     call_VM(noreg, CAST_FROM_FN_PTR(address,
 672                    InterpreterRuntime::throw_delayed_StackOverflowError));
 673     should_not_reach_here();
 674 
 675     bind(no_reserved_zone_enabling);
 676   }
 677 
 678   if (state == atos && ValueTypeReturnedAsFields) {
 679     Label skip;
 680 
 681     // Test if the return type is a value type
 682     ldr(rscratch1, Address(rfp, frame::interpreter_frame_method_offset * wordSize));
 683     ldr(rscratch1, Address(rscratch1, Method::const_offset()));
 684     ldrb(rscratch1, Address(rscratch1, ConstMethod::result_type_offset()));
 685     cmpw(rscratch1, (u1) T_VALUETYPE);
 686     br(Assembler::NE, skip);
 687    
 688     // We are returning a value type, load its fields into registers
 689     // Load fields from a buffered value with a value class specific handler
 690 
 691     load_klass(rscratch1 /*dst*/, r0 /*src*/);
 692     ldr(rscratch1, Address(rscratch1, InstanceKlass::adr_valueklass_fixed_block_offset()));
 693     ldr(rscratch1, Address(rscratch1, ValueKlass::unpack_handler_offset()));
 694     cbz(rscratch1, skip); 
 695    
 696     blrt(rscratch1, 1, 0, 0);
 697 
 698     bind(skip);
 699   }
 700 
 701   // remove frame anchor
 702   leave();
 703   // If we're returning to interpreted code we will shortly be
 704   // adjusting SP to allow some space for ESP.  If we're returning to
 705   // compiled code the saved sender SP was saved in sender_sp, so this
 706   // restores it.
 707   andr(sp, esp, -16);
 708 }
 709 
 710 // Lock object
 711 //
 712 // Args:
 713 //      c_rarg1: BasicObjectLock to be used for locking
 714 //
 715 // Kills:
 716 //      r0
 717 //      c_rarg0, c_rarg1, c_rarg2, c_rarg3, .. (param regs)
 718 //      rscratch1, rscratch2 (scratch regs)
 719 void InterpreterMacroAssembler::lock_object(Register lock_reg)
 720 {


 733     const int obj_offset = BasicObjectLock::obj_offset_in_bytes();
 734     const int lock_offset = BasicObjectLock::lock_offset_in_bytes ();
 735     const int mark_offset = lock_offset +
 736                             BasicLock::displaced_header_offset_in_bytes();
 737 
 738     Label slow_case;
 739 
 740     // Load object pointer into obj_reg %c_rarg3
 741     ldr(obj_reg, Address(lock_reg, obj_offset));
 742 
 743     if (UseBiasedLocking) {
 744       biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp, false, done, &slow_case);
 745     }
 746 
 747     // Load (object->mark() | 1) into swap_reg
 748     ldr(rscratch1, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
 749     orr(swap_reg, rscratch1, 1);
 750 
 751     // Save (object->mark() | 1) into BasicLock's displaced header
 752     str(swap_reg, Address(lock_reg, mark_offset));
 753 
 754     if (EnableValhalla && !UseBiasedLocking) { 
 755       // For slow path is_always_locked, using biased, which is never natural for !UseBiasLocking
 756       andr(swap_reg, swap_reg, ~markOopDesc::biased_lock_bit_in_place);
 757     }
 758 
 759     assert(lock_offset == 0,
 760            "displached header must be first word in BasicObjectLock");
 761 
 762     Label fail;
 763     if (PrintBiasedLockingStatistics) {
 764       Label fast;
 765       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, fast, &fail);
 766       bind(fast);
 767       atomic_incw(Address((address)BiasedLocking::fast_path_entry_count_addr()),
 768                   rscratch2, rscratch1, tmp);
 769       b(done);
 770       bind(fail);
 771     } else {
 772       cmpxchg_obj_header(swap_reg, lock_reg, obj_reg, rscratch1, done, /*fallthrough*/NULL);
 773     }
 774 
 775     // Test if the oopMark is an obvious stack pointer, i.e.,
 776     //  1) (mark & 7) == 0, and
 777     //  2) rsp <= mark < mark + os::pagesize()


< prev index next >