src/cpu/sparc/vm/sharedRuntime_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 6879902 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/sharedRuntime_sparc.cpp

Print this page




 523       assert(sig_bt[i+1] == T_VOID, "expecting half");
 524       if (flt_reg_pairs + 1 < flt_reg_max) {
 525         regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
 526         flt_reg_pairs += 2;
 527       } else {
 528         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 529         stk_reg_pairs += 2;
 530       }
 531       break;
 532     case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
 533     default:
 534       ShouldNotReachHere();
 535     }
 536   }
 537 
 538   // retun the amount of stack space these arguments will need.
 539   return stk_reg_pairs;
 540 
 541 }
 542 
 543 // Helper class mostly to avoid passing masm everywhere, and handle store
 544 // displacement overflow logic for LP64
 545 class AdapterGenerator {
 546   MacroAssembler *masm;
 547 #ifdef _LP64
 548   Register Rdisp;
 549   void set_Rdisp(Register r)  { Rdisp = r; }
 550 #endif // _LP64
 551 
 552   void patch_callers_callsite();
 553   void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
 554 
 555   // base+st_off points to top of argument
 556   int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
 557   int next_arg_offset(const int st_off) {
 558     return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
 559   }
 560 
 561 #ifdef _LP64
 562   // On _LP64 argument slot values are loaded first into a register
 563   // because they might not fit into displacement.
 564   Register arg_slot(const int st_off);
 565   Register next_arg_slot(const int st_off);
 566 #else
 567   int arg_slot(const int st_off)      { return arg_offset(st_off); }
 568   int next_arg_slot(const int st_off) { return next_arg_offset(st_off); }
 569 #endif // _LP64



 570 
 571   // Stores long into offset pointed to by base
 572   void store_c2i_long(Register r, Register base,
 573                       const int st_off, bool is_stack);
 574   void store_c2i_object(Register r, Register base,
 575                         const int st_off);
 576   void store_c2i_int(Register r, Register base,
 577                      const int st_off);
 578   void store_c2i_double(VMReg r_2,
 579                         VMReg r_1, Register base, const int st_off);
 580   void store_c2i_float(FloatRegister f, Register base,
 581                        const int st_off);
 582 
 583  public:
 584   void gen_c2i_adapter(int total_args_passed,
 585                               // VMReg max_arg,
 586                               int comp_args_on_stack, // VMRegStackSlots
 587                               const BasicType *sig_bt,
 588                               const VMRegPair *regs,
 589                               Label& skip_fixup);


 639   __ mov(G5_method, L5);
 640   __ mov(G5_method, O0);         // VM needs target method
 641   __ mov(I7, O1);                // VM needs caller's callsite
 642   // Must be a leaf call...
 643   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 644   __ delayed()->mov(G2_thread, L7_thread_cache);
 645   __ mov(L7_thread_cache, G2_thread);
 646   __ ldx(FP, -8 + STACK_BIAS, G1);
 647   __ ldx(FP, -16 + STACK_BIAS, G4);
 648   __ mov(L5, G5_method);
 649   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 650 #endif /* _LP64 */
 651 
 652   __ restore();      // Restore args
 653   __ bind(L);
 654 }
 655 
 656 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
 657                  Register scratch) {
 658   if (TaggedStackInterpreter) {
 659     int tag_off = st_off + Interpreter::tag_offset_in_bytes();
 660 #ifdef _LP64
 661     Register tag_slot = Rdisp;
 662     __ set(tag_off, tag_slot);
 663 #else
 664     int tag_slot = tag_off;
 665 #endif // _LP64
 666     // have to store zero because local slots can be reused (rats!)
 667     if (t == frame::TagValue) {
 668       __ st_ptr(G0, base, tag_slot);
 669     } else if (t == frame::TagCategory2) {
 670       __ st_ptr(G0, base, tag_slot);
 671       int next_tag_off  = st_off - Interpreter::stackElementSize() +
 672                                    Interpreter::tag_offset_in_bytes();
 673 #ifdef _LP64
 674       __ set(next_tag_off, tag_slot);
 675 #else
 676       tag_slot = next_tag_off;
 677 #endif // _LP64
 678       __ st_ptr(G0, base, tag_slot);
 679     } else {
 680       __ mov(t, scratch);
 681       __ st_ptr(scratch, base, tag_slot);
 682     }
 683   }
 684 }
 685 
 686 #ifdef _LP64
 687 Register AdapterGenerator::arg_slot(const int st_off) {
 688   __ set( arg_offset(st_off), Rdisp);
 689   return Rdisp;
 690 }
 691 
 692 Register AdapterGenerator::next_arg_slot(const int st_off){
 693   __ set( next_arg_offset(st_off), Rdisp);
 694   return Rdisp;











 695 }
 696 #endif // _LP64
 697 
 698 // Stores long into offset pointed to by base
 699 void AdapterGenerator::store_c2i_long(Register r, Register base,
 700                                       const int st_off, bool is_stack) {
 701 #ifdef _LP64
 702   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 703   // data is passed in only 1 slot.
 704   __ stx(r, base, next_arg_slot(st_off));
 705 #else
 706 #ifdef COMPILER2
 707   // Misaligned store of 64-bit data
 708   __ stw(r, base, arg_slot(st_off));    // lo bits
 709   __ srlx(r, 32, r);
 710   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 711 #else
 712   if (is_stack) {
 713     // Misaligned store of 64-bit data
 714     __ stw(r, base, arg_slot(st_off));    // lo bits
 715     __ srlx(r, 32, r);
 716     __ stw(r, base, next_arg_slot(st_off));  // hi bits


1035   // Will jump to the compiled code just as if compiled code was doing it.
1036   // Pre-load the register-jump target early, to schedule it better.
1037   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1038 
1039   // Now generate the shuffle code.  Pick up all register args and move the
1040   // rest through G1_scratch.
1041   for (int i=0; i<total_args_passed; i++) {
1042     if (sig_bt[i] == T_VOID) {
1043       // Longs and doubles are passed in native word order, but misaligned
1044       // in the 32-bit build.
1045       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1046       continue;
1047     }
1048 
1049     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
1050     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
1051     // ldx/lddf optimizations.
1052 
1053     // Load in argument order going down.
1054     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1055 #ifdef _LP64
1056     set_Rdisp(G1_scratch);
1057 #endif // _LP64
1058 
1059     VMReg r_1 = regs[i].first();
1060     VMReg r_2 = regs[i].second();
1061     if (!r_1->is_valid()) {
1062       assert(!r_2->is_valid(), "");
1063       continue;
1064     }
1065     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
1066       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
1067       if (r_2->is_valid()) r_2 = r_1->next();
1068     }
1069     if (r_1->is_Register()) {  // Register argument
1070       Register r = r_1->as_Register()->after_restore();
1071       if (!r_2->is_valid()) {
1072         __ ld(Gargs, arg_slot(ld_off), r);
1073       } else {
1074 #ifdef _LP64
1075         // In V9, longs are given 2 64-bit slots in the interpreter, but the
1076         // data is passed in only 1 slot.
1077         Register slot = (sig_bt[i]==T_LONG) ?
1078               next_arg_slot(ld_off) : arg_slot(ld_off);
1079         __ ldx(Gargs, slot, r);
1080 #else
1081         // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1082         // stack shuffle.  Load the first 2 longs into G1/G4 later.
1083 #endif
1084       }
1085     } else {
1086       assert(r_1->is_FloatRegister(), "");
1087       if (!r_2->is_valid()) {
1088         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1089       } else {
1090 #ifdef _LP64
1091         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1092         // data is passed in only 1 slot.  This code also handles longs that
1093         // are passed on the stack, but need a stack-to-stack move through a
1094         // spare float register.
1095         Register slot = (sig_bt[i]==T_LONG || sig_bt[i] == T_DOUBLE) ?
1096               next_arg_slot(ld_off) : arg_slot(ld_off);
1097         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1098 #else
1099         // Need to marshal 64-bit value from misaligned Lesp loads
1100         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1101         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1102 #endif
1103       }
1104     }
1105     // Was the argument really intended to be on the stack, but was loaded
1106     // into F8/F9?
1107     if (regs[i].first()->is_stack()) {
1108       assert(r_1->as_FloatRegister() == F8, "fix this code");
1109       // Convert stack slot to an SP offset
1110       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1111       // Store down the shuffled stack word.  Target address _is_ aligned.
1112       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, st_off);
1113       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, st_off);

1114     }
1115   }
1116   bool made_space = false;
1117 #ifndef _LP64
1118   // May need to pick up a few long args in G1/G4
1119   bool g4_crushed = false;
1120   bool g3_crushed = false;
1121   for (int i=0; i<total_args_passed; i++) {
1122     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1123       // Load in argument order going down
1124       int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1125       // Need to marshal 64-bit value from misaligned Lesp loads
1126       Register r = regs[i].first()->as_Register()->after_restore();
1127       if (r == G1 || r == G4) {
1128         assert(!g4_crushed, "ordering problem");
1129         if (r == G4){
1130           g4_crushed = true;
1131           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1132           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1133         } else {




 523       assert(sig_bt[i+1] == T_VOID, "expecting half");
 524       if (flt_reg_pairs + 1 < flt_reg_max) {
 525         regs[i].set2(as_FloatRegister(flt_reg_pairs)->as_VMReg());
 526         flt_reg_pairs += 2;
 527       } else {
 528         regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
 529         stk_reg_pairs += 2;
 530       }
 531       break;
 532     case T_VOID: regs[i].set_bad();  break; // Halves of longs & doubles
 533     default:
 534       ShouldNotReachHere();
 535     }
 536   }
 537 
 538   // retun the amount of stack space these arguments will need.
 539   return stk_reg_pairs;
 540 
 541 }
 542 
 543 // Helper class mostly to avoid passing masm everywhere, and handle
 544 // store displacement overflow logic.
 545 class AdapterGenerator {
 546   MacroAssembler *masm;

 547   Register Rdisp;
 548   void set_Rdisp(Register r)  { Rdisp = r; }

 549 
 550   void patch_callers_callsite();
 551   void tag_c2i_arg(frame::Tag t, Register base, int st_off, Register scratch);
 552 
 553   // base+st_off points to top of argument
 554   int arg_offset(const int st_off) { return st_off + Interpreter::value_offset_in_bytes(); }
 555   int next_arg_offset(const int st_off) {
 556     return st_off - Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
 557   }
 558 
 559   int tag_offset(const int st_off) { return st_off + Interpreter::tag_offset_in_bytes(); }
 560   int next_tag_offset(const int st_off) {
 561     return st_off - Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes();
 562   }
 563 
 564   // Argument slot values may be loaded first into a register because
 565   // they might not fit into displacement.
 566   RegisterOrConstant arg_slot(const int st_off);
 567   RegisterOrConstant next_arg_slot(const int st_off);
 568 
 569   RegisterOrConstant tag_slot(const int st_off);
 570   RegisterOrConstant next_tag_slot(const int st_off);
 571 
 572   // Stores long into offset pointed to by base
 573   void store_c2i_long(Register r, Register base,
 574                       const int st_off, bool is_stack);
 575   void store_c2i_object(Register r, Register base,
 576                         const int st_off);
 577   void store_c2i_int(Register r, Register base,
 578                      const int st_off);
 579   void store_c2i_double(VMReg r_2,
 580                         VMReg r_1, Register base, const int st_off);
 581   void store_c2i_float(FloatRegister f, Register base,
 582                        const int st_off);
 583 
 584  public:
 585   void gen_c2i_adapter(int total_args_passed,
 586                               // VMReg max_arg,
 587                               int comp_args_on_stack, // VMRegStackSlots
 588                               const BasicType *sig_bt,
 589                               const VMRegPair *regs,
 590                               Label& skip_fixup);


 640   __ mov(G5_method, L5);
 641   __ mov(G5_method, O0);         // VM needs target method
 642   __ mov(I7, O1);                // VM needs caller's callsite
 643   // Must be a leaf call...
 644   __ call(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite), relocInfo::runtime_call_type);
 645   __ delayed()->mov(G2_thread, L7_thread_cache);
 646   __ mov(L7_thread_cache, G2_thread);
 647   __ ldx(FP, -8 + STACK_BIAS, G1);
 648   __ ldx(FP, -16 + STACK_BIAS, G4);
 649   __ mov(L5, G5_method);
 650   __ ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), G3_scratch);
 651 #endif /* _LP64 */
 652 
 653   __ restore();      // Restore args
 654   __ bind(L);
 655 }
 656 
 657 void AdapterGenerator::tag_c2i_arg(frame::Tag t, Register base, int st_off,
 658                  Register scratch) {
 659   if (TaggedStackInterpreter) {
 660     RegisterOrConstant slot = tag_slot(st_off);






 661     // have to store zero because local slots can be reused (rats!)
 662     if (t == frame::TagValue) {
 663       __ st_ptr(G0, base, slot);
 664     } else if (t == frame::TagCategory2) {
 665       __ st_ptr(G0, base, slot);
 666       __ st_ptr(G0, base, next_tag_slot(st_off));







 667     } else {
 668       __ mov(t, scratch);
 669       __ st_ptr(scratch, base, slot);
 670     }
 671   }
 672 }
 673 
 674 
 675 RegisterOrConstant AdapterGenerator::arg_slot(const int st_off) {
 676   RegisterOrConstant roc(arg_offset(st_off));
 677   return __ ensure_simm13_or_reg(roc, Rdisp);
 678 }
 679 
 680 RegisterOrConstant AdapterGenerator::next_arg_slot(const int st_off) {
 681   RegisterOrConstant roc(next_arg_offset(st_off));
 682   return __ ensure_simm13_or_reg(roc, Rdisp);
 683 }
 684 
 685 
 686 RegisterOrConstant AdapterGenerator::tag_slot(const int st_off) {
 687   RegisterOrConstant roc(tag_offset(st_off));
 688   return __ ensure_simm13_or_reg(roc, Rdisp);
 689 }
 690 
 691 RegisterOrConstant AdapterGenerator::next_tag_slot(const int st_off) {
 692   RegisterOrConstant roc(next_tag_offset(st_off));
 693   return __ ensure_simm13_or_reg(roc, Rdisp);
 694 }
 695 
 696 
 697 // Stores long into offset pointed to by base
 698 void AdapterGenerator::store_c2i_long(Register r, Register base,
 699                                       const int st_off, bool is_stack) {
 700 #ifdef _LP64
 701   // In V9, longs are given 2 64-bit slots in the interpreter, but the
 702   // data is passed in only 1 slot.
 703   __ stx(r, base, next_arg_slot(st_off));
 704 #else
 705 #ifdef COMPILER2
 706   // Misaligned store of 64-bit data
 707   __ stw(r, base, arg_slot(st_off));    // lo bits
 708   __ srlx(r, 32, r);
 709   __ stw(r, base, next_arg_slot(st_off));  // hi bits
 710 #else
 711   if (is_stack) {
 712     // Misaligned store of 64-bit data
 713     __ stw(r, base, arg_slot(st_off));    // lo bits
 714     __ srlx(r, 32, r);
 715     __ stw(r, base, next_arg_slot(st_off));  // hi bits


1034   // Will jump to the compiled code just as if compiled code was doing it.
1035   // Pre-load the register-jump target early, to schedule it better.
1036   __ ld_ptr(G5_method, in_bytes(methodOopDesc::from_compiled_offset()), G3);
1037 
1038   // Now generate the shuffle code.  Pick up all register args and move the
1039   // rest through G1_scratch.
1040   for (int i=0; i<total_args_passed; i++) {
1041     if (sig_bt[i] == T_VOID) {
1042       // Longs and doubles are passed in native word order, but misaligned
1043       // in the 32-bit build.
1044       assert(i > 0 && (sig_bt[i-1] == T_LONG || sig_bt[i-1] == T_DOUBLE), "missing half");
1045       continue;
1046     }
1047 
1048     // Pick up 0, 1 or 2 words from Lesp+offset.  Assume mis-aligned in the
1049     // 32-bit build and aligned in the 64-bit build.  Look for the obvious
1050     // ldx/lddf optimizations.
1051 
1052     // Load in argument order going down.
1053     const int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();

1054     set_Rdisp(G1_scratch);

1055 
1056     VMReg r_1 = regs[i].first();
1057     VMReg r_2 = regs[i].second();
1058     if (!r_1->is_valid()) {
1059       assert(!r_2->is_valid(), "");
1060       continue;
1061     }
1062     if (r_1->is_stack()) {        // Pretend stack targets are loaded into F8/F9
1063       r_1 = F8->as_VMReg();        // as part of the load/store shuffle
1064       if (r_2->is_valid()) r_2 = r_1->next();
1065     }
1066     if (r_1->is_Register()) {  // Register argument
1067       Register r = r_1->as_Register()->after_restore();
1068       if (!r_2->is_valid()) {
1069         __ ld(Gargs, arg_slot(ld_off), r);
1070       } else {
1071 #ifdef _LP64
1072         // In V9, longs are given 2 64-bit slots in the interpreter, but the
1073         // data is passed in only 1 slot.
1074         RegisterOrConstant slot = (sig_bt[i] == T_LONG) ?
1075               next_arg_slot(ld_off) : arg_slot(ld_off);
1076         __ ldx(Gargs, slot, r);
1077 #else
1078         // Need to load a 64-bit value into G1/G4, but G1/G4 is being used in the
1079         // stack shuffle.  Load the first 2 longs into G1/G4 later.
1080 #endif
1081       }
1082     } else {
1083       assert(r_1->is_FloatRegister(), "");
1084       if (!r_2->is_valid()) {
1085         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_1->as_FloatRegister());
1086       } else {
1087 #ifdef _LP64
1088         // In V9, doubles are given 2 64-bit slots in the interpreter, but the
1089         // data is passed in only 1 slot.  This code also handles longs that
1090         // are passed on the stack, but need a stack-to-stack move through a
1091         // spare float register.
1092         RegisterOrConstant slot = (sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) ?
1093               next_arg_slot(ld_off) : arg_slot(ld_off);
1094         __ ldf(FloatRegisterImpl::D, Gargs, slot, r_1->as_FloatRegister());
1095 #else
1096         // Need to marshal 64-bit value from misaligned Lesp loads
1097         __ ldf(FloatRegisterImpl::S, Gargs, next_arg_slot(ld_off), r_1->as_FloatRegister());
1098         __ ldf(FloatRegisterImpl::S, Gargs, arg_slot(ld_off), r_2->as_FloatRegister());
1099 #endif
1100       }
1101     }
1102     // Was the argument really intended to be on the stack, but was loaded
1103     // into F8/F9?
1104     if (regs[i].first()->is_stack()) {
1105       assert(r_1->as_FloatRegister() == F8, "fix this code");
1106       // Convert stack slot to an SP offset
1107       int st_off = reg2offset(regs[i].first()) + STACK_BIAS;
1108       // Store down the shuffled stack word.  Target address _is_ aligned.
1109       RegisterOrConstant slot = __ ensure_simm13_or_reg(st_off, Rdisp);
1110       if (!r_2->is_valid()) __ stf(FloatRegisterImpl::S, r_1->as_FloatRegister(), SP, slot);
1111       else                  __ stf(FloatRegisterImpl::D, r_1->as_FloatRegister(), SP, slot);
1112     }
1113   }
1114   bool made_space = false;
1115 #ifndef _LP64
1116   // May need to pick up a few long args in G1/G4
1117   bool g4_crushed = false;
1118   bool g3_crushed = false;
1119   for (int i=0; i<total_args_passed; i++) {
1120     if (regs[i].first()->is_Register() && regs[i].second()->is_valid()) {
1121       // Load in argument order going down
1122       int ld_off = (total_args_passed-i)*Interpreter::stackElementSize();
1123       // Need to marshal 64-bit value from misaligned Lesp loads
1124       Register r = regs[i].first()->as_Register()->after_restore();
1125       if (r == G1 || r == G4) {
1126         assert(!g4_crushed, "ordering problem");
1127         if (r == G4){
1128           g4_crushed = true;
1129           __ lduw(Gargs, arg_slot(ld_off)     , G3_scratch); // Load lo bits
1130           __ ld  (Gargs, next_arg_slot(ld_off), r);          // Load hi bits
1131         } else {


src/cpu/sparc/vm/sharedRuntime_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File