< prev index next >

src/cpu/sparc/vm/sharedRuntime_sparc.cpp

Print this page




 299   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 300   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 301 #endif /* _LP64 */
 302 
 303   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 304 
 305   __ restore();
 306 
 307 #if !defined(_LP64)
 308   // Now reload the 64bit Oregs after we've restore the window.
 309   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 310   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 311 #endif /* _LP64 */
 312 
 313 }
 314 
 315 // Is vector's size (in bytes) bigger than a size saved by default?
 316 // 8 bytes FP registers are saved by default on SPARC.
 317 bool SharedRuntime::is_wide_vector(int size) {
 318   // Note, MaxVectorSize == 8 on SPARC.
 319   assert(size <= 8, err_msg_res("%d bytes vectors are not supported", size));
 320   return size > 8;
 321 }
 322 
 323 // The java_calling_convention describes stack locations as ideal slots on
 324 // a frame with no abi restrictions. Since we must observe abi restrictions
 325 // (like the placement of the register window) the slots must be biased by
 326 // the following value.
 327 static int reg2offset(VMReg r) {
 328   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 329 }
 330 
 331 static VMRegPair reg64_to_VMRegPair(Register r) {
 332   VMRegPair ret;
 333   if (wordSize == 8) {
 334     ret.set2(r->as_VMReg());
 335   } else {
 336     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
 337   }
 338   return ret;
 339 }


 447 
 448     case T_DOUBLE:
 449       assert(sig_bt[i+1] == T_VOID, "expecting half");
 450       if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
 451         flt_reg = round_to(flt_reg, 2);  // align
 452         FloatRegister r = as_FloatRegister(flt_reg);
 453         regs[i].set2(r->as_VMReg());
 454         flt_reg += 2;
 455       } else {
 456         slot = round_to(slot, 2);  // align
 457         regs[i].set2(VMRegImpl::stack2reg(slot));
 458         slot += 2;
 459       }
 460       break;
 461 
 462     case T_VOID:
 463       regs[i].set_bad();   // Halves of longs & doubles
 464       break;
 465 
 466     default:
 467       fatal(err_msg_res("unknown basic type %d", sig_bt[i]));
 468       break;
 469     }
 470   }
 471 
 472   // retun the amount of stack space these arguments will need.
 473   return slot;
 474 }
 475 
 476 // Helper class mostly to avoid passing masm everywhere, and handle
 477 // store displacement overflow logic.
 478 class AdapterGenerator {
 479   MacroAssembler *masm;
 480   Register Rdisp;
 481   void set_Rdisp(Register r)  { Rdisp = r; }
 482 
 483   void patch_callers_callsite();
 484 
 485   // base+st_off points to top of argument
 486   int arg_offset(const int st_off) { return st_off; }
 487   int next_arg_offset(const int st_off) {


1842 static void gen_special_dispatch(MacroAssembler* masm,
1843                                  methodHandle method,
1844                                  const BasicType* sig_bt,
1845                                  const VMRegPair* regs) {
1846   verify_oop_args(masm, method, sig_bt, regs);
1847   vmIntrinsics::ID iid = method->intrinsic_id();
1848 
1849   // Now write the args into the outgoing interpreter space
1850   bool     has_receiver   = false;
1851   Register receiver_reg   = noreg;
1852   int      member_arg_pos = -1;
1853   Register member_reg     = noreg;
1854   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1855   if (ref_kind != 0) {
1856     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1857     member_reg = G5_method;  // known to be free at this point
1858     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1859   } else if (iid == vmIntrinsics::_invokeBasic) {
1860     has_receiver = true;
1861   } else {
1862     fatal(err_msg_res("unexpected intrinsic id %d", iid));
1863   }
1864 
1865   if (member_reg != noreg) {
1866     // Load the member_arg into register, if necessary.
1867     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1868     VMReg r = regs[member_arg_pos].first();
1869     if (r->is_stack()) {
1870       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1871       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1872       __ ld_ptr(SP, ld_off, member_reg);
1873     } else {
1874       // no data motion is needed
1875       member_reg = r->as_Register();
1876     }
1877   }
1878 
1879   if (has_receiver) {
1880     // Make sure the receiver is loaded into a register.
1881     assert(method->size_of_parameters() > 0, "oob");
1882     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");




 299   __ stx(O0, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8);
 300   __ stx(O1, G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8);
 301 #endif /* _LP64 */
 302 
 303   __ ldf(FloatRegisterImpl::D, SP, d00_offset+STACK_BIAS, as_FloatRegister(0));
 304 
 305   __ restore();
 306 
 307 #if !defined(_LP64)
 308   // Now reload the 64bit Oregs after we've restore the window.
 309   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+0*8, O0);
 310   __ ldx(G2_thread, JavaThread::o_reg_temps_offset_in_bytes()+1*8, O1);
 311 #endif /* _LP64 */
 312 
 313 }
 314 
 315 // Is vector's size (in bytes) bigger than a size saved by default?
 316 // 8 bytes FP registers are saved by default on SPARC.
 317 bool SharedRuntime::is_wide_vector(int size) {
 318   // Note, MaxVectorSize == 8 on SPARC.
 319   assert(size <= 8, "%d bytes vectors are not supported", size);
 320   return size > 8;
 321 }
 322 
 323 // The java_calling_convention describes stack locations as ideal slots on
 324 // a frame with no abi restrictions. Since we must observe abi restrictions
 325 // (like the placement of the register window) the slots must be biased by
 326 // the following value.
 327 static int reg2offset(VMReg r) {
 328   return (r->reg2stack() + SharedRuntime::out_preserve_stack_slots()) * VMRegImpl::stack_slot_size;
 329 }
 330 
 331 static VMRegPair reg64_to_VMRegPair(Register r) {
 332   VMRegPair ret;
 333   if (wordSize == 8) {
 334     ret.set2(r->as_VMReg());
 335   } else {
 336     ret.set_pair(r->successor()->as_VMReg(), r->as_VMReg());
 337   }
 338   return ret;
 339 }


 447 
 448     case T_DOUBLE:
 449       assert(sig_bt[i+1] == T_VOID, "expecting half");
 450       if (round_to(flt_reg, 2) + 1 < flt_reg_max) {
 451         flt_reg = round_to(flt_reg, 2);  // align
 452         FloatRegister r = as_FloatRegister(flt_reg);
 453         regs[i].set2(r->as_VMReg());
 454         flt_reg += 2;
 455       } else {
 456         slot = round_to(slot, 2);  // align
 457         regs[i].set2(VMRegImpl::stack2reg(slot));
 458         slot += 2;
 459       }
 460       break;
 461 
 462     case T_VOID:
 463       regs[i].set_bad();   // Halves of longs & doubles
 464       break;
 465 
 466     default:
 467       fatal("unknown basic type %d", sig_bt[i]);
 468       break;
 469     }
 470   }
 471 
 472   // retun the amount of stack space these arguments will need.
 473   return slot;
 474 }
 475 
 476 // Helper class mostly to avoid passing masm everywhere, and handle
 477 // store displacement overflow logic.
 478 class AdapterGenerator {
 479   MacroAssembler *masm;
 480   Register Rdisp;
 481   void set_Rdisp(Register r)  { Rdisp = r; }
 482 
 483   void patch_callers_callsite();
 484 
 485   // base+st_off points to top of argument
 486   int arg_offset(const int st_off) { return st_off; }
 487   int next_arg_offset(const int st_off) {


1842 static void gen_special_dispatch(MacroAssembler* masm,
1843                                  methodHandle method,
1844                                  const BasicType* sig_bt,
1845                                  const VMRegPair* regs) {
1846   verify_oop_args(masm, method, sig_bt, regs);
1847   vmIntrinsics::ID iid = method->intrinsic_id();
1848 
1849   // Now write the args into the outgoing interpreter space
1850   bool     has_receiver   = false;
1851   Register receiver_reg   = noreg;
1852   int      member_arg_pos = -1;
1853   Register member_reg     = noreg;
1854   int      ref_kind       = MethodHandles::signature_polymorphic_intrinsic_ref_kind(iid);
1855   if (ref_kind != 0) {
1856     member_arg_pos = method->size_of_parameters() - 1;  // trailing MemberName argument
1857     member_reg = G5_method;  // known to be free at this point
1858     has_receiver = MethodHandles::ref_kind_has_receiver(ref_kind);
1859   } else if (iid == vmIntrinsics::_invokeBasic) {
1860     has_receiver = true;
1861   } else {
1862     fatal("unexpected intrinsic id %d", iid);
1863   }
1864 
1865   if (member_reg != noreg) {
1866     // Load the member_arg into register, if necessary.
1867     SharedRuntime::check_member_name_argument_is_last_argument(method, sig_bt, regs);
1868     VMReg r = regs[member_arg_pos].first();
1869     if (r->is_stack()) {
1870       RegisterOrConstant ld_off = reg2offset(r) + STACK_BIAS;
1871       ld_off = __ ensure_simm13_or_reg(ld_off, member_reg);
1872       __ ld_ptr(SP, ld_off, member_reg);
1873     } else {
1874       // no data motion is needed
1875       member_reg = r->as_Register();
1876     }
1877   }
1878 
1879   if (has_receiver) {
1880     // Make sure the receiver is loaded into a register.
1881     assert(method->size_of_parameters() > 0, "oob");
1882     assert(sig_bt[0] == T_OBJECT, "receiver argument must be an object");


< prev index next >