< prev index next >

src/hotspot/cpu/x86/sharedRuntime_x86_64.cpp

Print this page




 953   Register temp = rbx;
 954 
 955   {
 956     __ load_klass(temp, receiver);
 957     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 958     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 959     __ jcc(Assembler::equal, ok);
 960     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 961 
 962     __ bind(ok);
 963     // Method might have been compiled since the call site was patched to
 964     // interpreted if that is the case treat it as a miss so we can get
 965     // the call site corrected.
 966     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 967     __ jcc(Assembler::equal, skip_fixup);
 968     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 969   }
 970 
 971   address c2i_entry = __ pc();
 972 





















 973   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 974 
 975   __ flush();
 976   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 977 }
 978 
 979 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
 980                                          VMRegPair *regs,
 981                                          VMRegPair *regs2,
 982                                          int total_args_passed) {
 983   assert(regs2 == NULL, "not needed on x86");
 984 // We return the amount of VMRegImpl stack slots we need to reserve for all
 985 // the arguments NOT counting out_preserve_stack_slots.
 986 
 987 // NOTE: These arrays will have to change when c1 is ported
 988 #ifdef _WIN64
 989     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
 990       c_rarg0, c_rarg1, c_rarg2, c_rarg3
 991     };
 992     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {


2120   const Register receiver = j_rarg0;
2121 
2122   Label hit;
2123   Label exception_pending;
2124 
2125   assert_different_registers(ic_reg, receiver, rscratch1);
2126   __ verify_oop(receiver);
2127   __ load_klass(rscratch1, receiver);
2128   __ cmpq(ic_reg, rscratch1);
2129   __ jcc(Assembler::equal, hit);
2130 
2131   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2132 
2133   // Verified entry point must be aligned
2134   __ align(8);
2135 
2136   __ bind(hit);
2137 
2138   int vep_offset = ((intptr_t)__ pc()) - start;
2139 











2140 #ifdef COMPILER1
2141   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2142   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2143     inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2144   }
2145 #endif // COMPILER1
2146 
2147   // The instruction at the verified entry point must be 5 bytes or longer
2148   // because it can be patched on the fly by make_non_entrant. The stack bang
2149   // instruction fits that requirement.
2150 
2151   // Generate stack overflow check
2152 
2153   if (UseStackBanging) {
2154     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2155   } else {
2156     // need a 5 byte instruction to allow MT safe patching to non-entrant
2157     __ fat_nop();
2158   }
2159 




 953   Register temp = rbx;
 954 
 955   {
 956     __ load_klass(temp, receiver);
 957     __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
 958     __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
 959     __ jcc(Assembler::equal, ok);
 960     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 961 
 962     __ bind(ok);
 963     // Method might have been compiled since the call site was patched to
 964     // interpreted if that is the case treat it as a miss so we can get
 965     // the call site corrected.
 966     __ cmpptr(Address(rbx, in_bytes(Method::code_offset())), (int32_t)NULL_WORD);
 967     __ jcc(Assembler::equal, skip_fixup);
 968     __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
 969   }
 970 
 971   address c2i_entry = __ pc();
 972 
 973   // Class initialization barrier for static methods
 974   if (UseFastClassInitChecks) {
 975     Label L_skip_barrier;
 976     Register method = rbx;
 977 
 978     { // Bypass the barrier for non-static methods
 979       Register flags  = rscratch1;
 980       __ movl(flags, Address(method, Method::access_flags_offset()));
 981       __ testl(flags, JVM_ACC_STATIC);
 982       __ jcc(Assembler::zero, L_skip_barrier); // non-static
 983     }
 984 
 985     Register klass = rscratch1;
 986     __ load_method_holder(klass, method);
 987     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
 988 
 989     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
 990 
 991     __ bind(L_skip_barrier);
 992   }
 993 
 994   gen_c2i_adapter(masm, total_args_passed, comp_args_on_stack, sig_bt, regs, skip_fixup);
 995 
 996   __ flush();
 997   return AdapterHandlerLibrary::new_entry(fingerprint, i2c_entry, c2i_entry, c2i_unverified_entry);
 998 }
 999 
1000 int SharedRuntime::c_calling_convention(const BasicType *sig_bt,
1001                                          VMRegPair *regs,
1002                                          VMRegPair *regs2,
1003                                          int total_args_passed) {
1004   assert(regs2 == NULL, "not needed on x86");
1005 // We return the amount of VMRegImpl stack slots we need to reserve for all
1006 // the arguments NOT counting out_preserve_stack_slots.
1007 
1008 // NOTE: These arrays will have to change when c1 is ported
1009 #ifdef _WIN64
1010     static const Register INT_ArgReg[Argument::n_int_register_parameters_c] = {
1011       c_rarg0, c_rarg1, c_rarg2, c_rarg3
1012     };
1013     static const XMMRegister FP_ArgReg[Argument::n_float_register_parameters_c] = {


2141   const Register receiver = j_rarg0;
2142 
2143   Label hit;
2144   Label exception_pending;
2145 
2146   assert_different_registers(ic_reg, receiver, rscratch1);
2147   __ verify_oop(receiver);
2148   __ load_klass(rscratch1, receiver);
2149   __ cmpq(ic_reg, rscratch1);
2150   __ jcc(Assembler::equal, hit);
2151 
2152   __ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
2153 
2154   // Verified entry point must be aligned
2155   __ align(8);
2156 
2157   __ bind(hit);
2158 
2159   int vep_offset = ((intptr_t)__ pc()) - start;
2160 
2161   if (UseFastClassInitChecks && method->needs_clinit_barrier()) {
2162     Label L_skip_barrier;
2163     Register klass = r10;
2164     __ mov_metadata(klass, method->method_holder()); // InstanceKlass*
2165     __ clinit_barrier(klass, r15_thread, &L_skip_barrier /*L_fast_path*/);
2166 
2167     __ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub())); // slow path
2168 
2169     __ bind(L_skip_barrier);
2170   }
2171 
2172 #ifdef COMPILER1
2173   // For Object.hashCode, System.identityHashCode try to pull hashCode from object header if available.
2174   if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
2175     inline_check_hashcode_from_object_header(masm, method, j_rarg0 /*obj_reg*/, rax /*result*/);
2176   }
2177 #endif // COMPILER1
2178 
2179   // The instruction at the verified entry point must be 5 bytes or longer
2180   // because it can be patched on the fly by make_non_entrant. The stack bang
2181   // instruction fits that requirement.
2182 
2183   // Generate stack overflow check
2184 
2185   if (UseStackBanging) {
2186     __ bang_stack_with_offset((int)JavaThread::stack_shadow_zone_size());
2187   } else {
2188     // need a 5 byte instruction to allow MT safe patching to non-entrant
2189     __ fat_nop();
2190   }
2191 


< prev index next >