< prev index next >

src/hotspot/cpu/aarch64/sharedRuntime_aarch64.cpp

Print this page
rev 58823 : [mq]: aarch64-jdk-nmethod-barriers-3.patch


 715 
 716   // Class initialization barrier for static methods
 717   address c2i_no_clinit_check_entry = NULL;
 718   if (VM_Version::supports_fast_class_init_checks()) {
 719     Label L_skip_barrier;
 720 
 721     { // Bypass the barrier for non-static methods
 722       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 723       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 724       __ br(Assembler::EQ, L_skip_barrier); // non-static
 725     }
 726 
 727     __ load_method_holder(rscratch2, rmethod);
 728     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 729     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 730 
 731     __ bind(L_skip_barrier);
 732     c2i_no_clinit_check_entry = __ pc();
 733   }
 734 



 735   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 736 
 737   __ flush();
 738   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 739 }
 740 
 741 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 742                                          VMRegPair *regs,
 743                                          VMRegPair *regs2,
 744                                          int total_args_passed) {
 745   assert(regs2 == NULL, "not needed on AArch64");
 746 
 747 // We return the amount of VMRegImpl stack slots we need to reserve for all
 748 // the arguments NOT counting out_preserve_stack_slots.
 749 
 750     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 751       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 752     };
 753     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 754       c_farg0, c_farg1, c_farg2, c_farg3,


1486   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1487     Label L_skip_barrier;
1488     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1489     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1490     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1491 
1492     __ bind(L_skip_barrier);
1493   }
1494 
1495   // Generate stack overflow check
1496   if (UseStackBanging) {
1497     __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
1498   } else {
1499     Unimplemented();
1500   }
1501 
1502   // Generate a new frame for the wrapper.
1503   __ enter();
1504   // -2 because return address is already present and so is saved rfp
1505   __ sub(sp, sp, stack_size - 2*wordSize);



1506 
1507   // Frame is now completed as far as size and linkage.
1508   int frame_complete = ((intptr_t)__ pc()) - start;
1509 
1510   // We use r20 as the oop handle for the receiver/klass
1511   // It is callee save so it survives the call to native
1512 
1513   const Register oop_handle_reg = r20;
1514 
1515   if (is_critical_native) {
1516     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1517                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1518   }
1519 
1520   //
1521   // We immediately shuffle the arguments so that any vm call we have to
1522   // make from here on out (sync slow path, jvmti, etc.) we will have
1523   // captured the oops from our caller and have a valid oopMap for
1524   // them.
1525 




 715 
 716   // Class initialization barrier for static methods
 717   address c2i_no_clinit_check_entry = NULL;
 718   if (VM_Version::supports_fast_class_init_checks()) {
 719     Label L_skip_barrier;
 720 
 721     { // Bypass the barrier for non-static methods
 722       __ ldrw(rscratch1, Address(rmethod, Method::access_flags_offset()));
 723       __ andsw(zr, rscratch1, JVM_ACC_STATIC);
 724       __ br(Assembler::EQ, L_skip_barrier); // non-static
 725     }
 726 
 727     __ load_method_holder(rscratch2, rmethod);
 728     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
 729     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
 730 
 731     __ bind(L_skip_barrier);
 732     c2i_no_clinit_check_entry = __ pc();
 733   }
 734 
 735   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
 736   bs->c2i_entry_barrier(masm);
 737 
 738   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 739 
 740   __ flush();
 741   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry, c2i_no_clinit_check_entry);
 742 }
 743 
 744 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 745                                          VMRegPair *regs,
 746                                          VMRegPair *regs2,
 747                                          int total_args_passed) {
 748   assert(regs2 == NULL, "not needed on AArch64");
 749 
 750 // We return the amount of VMRegImpl stack slots we need to reserve for all
 751 // the arguments NOT counting out_preserve_stack_slots.
 752 
 753     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 754       c_rarg0, c_rarg1, c_rarg2, c_rarg3, c_rarg4, c_rarg5,  c_rarg6,  c_rarg7
 755     };
 756     static const FloatRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {
 757       c_farg0, c_farg1, c_farg2, c_farg3,


1489   if (VM_Version::supports_fast_class_init_checks() && method->needs_clinit_barrier()) {
1490     Label L_skip_barrier;
1491     __ mov_metadata(rscratch2, method->method_holder()); // InstanceKlass*
1492     __ clinit_barrier(rscratch2, rscratch1, &L_skip_barrier);
1493     __ far_jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
1494 
1495     __ bind(L_skip_barrier);
1496   }
1497 
1498   // Generate stack overflow check
1499   if (UseStackBanging) {
1500     __ bang_stack_with_offset(JavaThread::stack_shadow_zone_size());
1501   } else {
1502     Unimplemented();
1503   }
1504 
1505   // Generate a new frame for the wrapper.
1506   __ enter();
1507   // -2 because return address is already present and so is saved rfp
1508   __ sub(sp, sp, stack_size - 2*wordSize);
1509 
1510   BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
1511   bs->nmethod_entry_barrier(masm);
1512 
1513   // Frame is now completed as far as size and linkage.
1514   int frame_complete = ((intptr_t)__ pc()) - start;
1515 
1516   // We use r20 as the oop handle for the receiver/klass
1517   // It is callee save so it survives the call to native
1518 
1519   const Register oop_handle_reg = r20;
1520 
1521   if (is_critical_native) {
1522     check_needs_gc_for_critical_native(masm, stack_slots, total_c_args, total_in_args,
1523                                        oop_handle_offset, oop_maps, in_regs, in_sig_bt);
1524   }
1525 
1526   //
1527   // We immediately shuffle the arguments so that any vm call we have to
1528   // make from here on out (sync slow path, jvmti, etc.) we will have
1529   // captured the oops from our caller and have a valid oopMap for
1530   // them.
1531 


< prev index next >